示例#1
0
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    searchResult = SearchResult.get(SearchResult.id == searchResultId)
    indexerName = searchResult.indexer.name
    indexer = indexers.getIndexerByName(indexerName)
    link = searchResult.link

    # Log to database
    papiaccess = IndexerApiAccess(
        indexer=indexer.indexer,
        type="nzb",
        url=link,
        response_successful=None) if log_api_access else None
    try:
        papiaccess.username = request.authorization.username if request.authorization is not None else None
    except RuntimeError:
        pass
    papiaccess.save()
    pnzbdl = IndexerNzbDownload(searchResult=searchResult,
                                apiAccess=papiaccess,
                                mode=mode,
                                title=searchResult.title,
                                internal=internal)
    pnzbdl.save()

    return link, papiaccess, pnzbdl
示例#2
0
def get_avg_indexer_search_results_share():
    results = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        result = database.db.execute_sql(
            "select (100 * (select cast(sum(ps.resultsCount) as float) from indexersearch ps "
            "where ps.search_id in (select ps.search_id from indexersearch ps, search s where ps.indexer_id == %d and ps.search_id = s.id and ps.successful and (s.episode NOT NULL  or s.season not NULL  or s.identifier_key not null or s.query not null)) and ps.indexer_id == %d)) "
            "/ "
            "(select sum(ps.resultsCount) from indexersearch ps where ps.search_id in (select ps.search_id from indexersearch ps, search s where ps.indexer_id == %d and ps.search_id = s.id and ps.successful and (s.episode NOT NULL  or s.season not NULL  or s.identifier_key not null or s.query not null))) as sumAllResults"
            % (p.id, p.id, p.id)).fetchone()
        results.append({
            "name":
            p.name,
            "avgResultsShare":
            int(result[0]) if result[0] is not None else "N/A"
        })
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(
        results,
        key=lambda x: 0
        if x["avgResultsShare"] == "N/A" else x["avgResultsShare"],
        reverse=True)
    return results
示例#3
0
文件: stats.py 项目: gspu/nzbhydra
def get_avg_indexer_response_times(after, before):
    result = []
    response_times = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        where = (IndexerApiAccess.response_successful) & (IndexerApiAccess.indexer == p) & (IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)
        avg_response_time = IndexerApiAccess().select(fn.AVG(IndexerApiAccess.response_time)).where(where).tuples()[0][0]
        if avg_response_time:
            response_times.append({"name": p.name, "avgResponseTime": int(avg_response_time)})
    where = (IndexerApiAccess.response_successful) & (IndexerApiAccess.response_time is not None) & (IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)
    avg_response_time = IndexerApiAccess().select(fn.AVG(IndexerApiAccess.response_time)).where(where).tuples()[0][0]
    for i in response_times:
        delta = i["avgResponseTime"] - avg_response_time
        i["delta"] = delta
        result.append(i)
    result = sorted(result, key=lambda x: x["name"])
    result = sorted(result, key=lambda x: x["avgResponseTime"])

    return result
示例#4
0
def download_nzb_and_log(searchResultId):
    link, papiaccess, _ = get_indexer_nzb_link(searchResultId, "serve", True)
    indexerName = None
    try:
        indexerName = SearchResult.get(
            SearchResult.id == searchResultId).indexer.name
        indexer = indexers.getIndexerByName(indexerName)
        r = indexer.get(link, timeout=10)
        r.raise_for_status()

        papiaccess.response_successful = True
        papiaccess.response_time = r.elapsed.microseconds / 1000

        return IndexerNzbDownloadResult(content=r.content, headers=r.headers)
    except IndexerNotFoundException:
        if indexerName:
            logger.error("Unable to find indexer with name %s" % indexerName)
        else:
            logger.error("Unable to find indexer for search result id %s" %
                         searchResultId)
        return None
    except SearchResult.DoesNotExist:
        logger.error("Unable to find search result with ID %s" %
                     searchResultId)
        return None
    except RequestException as e:
        logger.error("Error while connecting to URL %s: %s" % (link, str(e)))
        papiaccess.error = str(e)
        return None
    finally:
        papiaccess.save()
示例#5
0
文件: api.py 项目: judhat2/nzbhydra
def download_nzb_and_log(searchResultId):
    link, papiaccess, _ = get_indexer_nzb_link(searchResultId, "serve", True)
    indexerName = None
    try:
        indexerName = SearchResult.get(SearchResult.id == searchResultId).indexer.name
        indexer = indexers.getIndexerByName(indexerName)
        r = indexer.get(link, timeout=10)
        r.raise_for_status()

        papiaccess.response_successful = True
        papiaccess.response_time = r.elapsed.microseconds / 1000

        return IndexerNzbDownloadResult(content=r.content, headers=r.headers)
    except IndexerNotFoundException:
        if indexerName:
            logger.error("Unable to find indexer with name %s" % indexerName)
        else:
            logger.error("Unable to find indexer for search result id %s" % searchResultId)
        return None
    except SearchResult.DoesNotExist:
        logger.error("Unable to find search result with ID %s" % searchResultId)
        return None
    except RequestException as e:
        logger.error("Error while connecting to URL %s: %s" % (link, str(e)))
        papiaccess.error = str(e)
        return None
    finally:
        papiaccess.save()
示例#6
0
def getIndexerDownloadStats():
    results = []
    allDownloadsCount = IndexerNzbDownload.select().count()
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        dlCount = IndexerNzbDownload().\
            select(Indexer.name, IndexerApiAccess.response_successful). \
            join(IndexerApiAccess, JOIN.LEFT_OUTER). \
            join(Indexer, JOIN.LEFT_OUTER).\
            where(Indexer.id == p).\
            count()
        results.append({
            "name":
            p.name,
            "total":
            dlCount,
            "share":
            100 / (allDownloadsCount / dlCount)
            if allDownloadsCount > 0 and dlCount > 0 else 0
        })
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: x["share"], reverse=True)
    return results
示例#7
0
def get_avg_indexer_response_times(after, before):
    result = []
    response_times = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        where = (IndexerApiAccess.response_successful) & (
            IndexerApiAccess.indexer == p
        ) & (IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)
        avg_response_time = IndexerApiAccess().select(
            fn.AVG(IndexerApiAccess.response_time)).where(where).tuples()[0][0]
        if avg_response_time:
            response_times.append({
                "name": p.name,
                "avgResponseTime": int(avg_response_time)
            })
    where = (IndexerApiAccess.response_successful) & (
        IndexerApiAccess.response_time is not None) & (
            IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)
    avg_response_time = IndexerApiAccess().select(
        fn.AVG(IndexerApiAccess.response_time)).where(where).tuples()[0][0]
    for i in response_times:
        delta = i["avgResponseTime"] - avg_response_time
        i["delta"] = delta
        result.append(i)
    result = sorted(result, key=lambda x: x["name"])
    result = sorted(result, key=lambda x: x["avgResponseTime"])

    return result
示例#8
0
def get_avg_indexer_search_results_share():
    results = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
            if indexer.settings.name == "Womble":
                logger.debug("Skipping download stats for Womble because we exclude update queries without specific query or ID")
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        result = database.db.execute_sql(
            """
            SELECT (100 *
            (SELECT cast(sum(ps.resultsCount) AS FLOAT)
             FROM indexersearch ps
             WHERE ps.search_id IN (SELECT ps.search_id
                                    FROM indexersearch ps, search s
                                    WHERE ps.indexer_id == %d AND ps.search_id = s.id AND ps.successful AND (s.episode NOT NULL OR s.season NOT NULL OR s.identifier_key NOT NULL OR s.query NOT NULL)) AND ps.indexer_id == %d))
           /
           (SELECT sum(ps.resultsCount)
            FROM indexersearch ps
            WHERE ps.search_id IN (SELECT ps.search_id
                                   FROM indexersearch ps, search s
                                   WHERE ps.indexer_id == %d AND ps.search_id = s.id AND ps.successful AND (s.episode NOT NULL OR s.season NOT NULL OR s.identifier_key NOT NULL OR s.query NOT NULL))) AS sumAllResults
             """
            % (p.id, p.id, p.id)).fetchone()
        avgResultsShare = int(result[0]) if result is not None and result[0] is not None else "N/A"

        result = database.db.execute_sql(
            """
            SELECT avg(
                CASE WHEN uniqueResults > 0
                  THEN
                    100 / (processedResults * 1.0 / uniqueResults)
                ELSE 0
                END) as avgUniqueResults
            FROM indexersearch
            WHERE processedResults IS NOT NULL AND uniqueResults IS NOT NULL
                  AND indexer_id == %d
            GROUP BY indexer_id;

            """
            % p.id).fetchone()
        if p.name in ["NZBIndex", "Binsearch", "NZBClub"]:
            avgUniqueResults = "-"
        elif result is not None and result[0] is not None:
            avgUniqueResults = int(result[0])
        else:
            avgUniqueResults = "N/A"
        results.append({"name": p.name, "avgResultsShare": avgResultsShare, "avgUniqueResults": avgUniqueResults})
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: 0 if x["avgResultsShare"] == "N/A" else x["avgResultsShare"], reverse=True)
    return results
示例#9
0
文件: api.py 项目: judhat2/nzbhydra
def get_nfo(searchresultid):
    try:
        searchResult = SearchResult.get(SearchResult.id == searchresultid)
        indexer = indexers.getIndexerByName(searchResult.indexer.name)
        has_nfo, nfo, message = indexer.get_nfo(searchResult.guid)
        return {"has_nfo": has_nfo, "nfo": nfo, "message": message}
    except IndexerNotFoundException as e:
        logger.error(e.message)
        return {"has_nfo": False, "error": "Unable to find indexer"}
示例#10
0
def get_nfo(searchresultid):
    try:
        searchResult = SearchResult.get(SearchResult.id == searchresultid)
        indexer = indexers.getIndexerByName(searchResult.indexer.name)
        has_nfo, nfo, message = indexer.get_nfo(searchResult.guid)
        return {"has_nfo": has_nfo, "nfo": nfo, "message": message}
    except IndexerNotFoundException as e:
        logger.error(e.message)
        return {"has_nfo": False, "error": "Unable to find indexer"}
示例#11
0
def get_avg_indexer_search_results_share(afterSql, beforeSql):
    results = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
            if indexer.settings.name == "Womble":
                logger.debug("Skipping download stats for Womble because we exclude update queries without specific query or ID")
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        innerSelect = """(SELECT ps.search_id
                                    FROM indexersearch ps, search s
                                    WHERE ps.indexer_id == %(id)d AND ps.search_id = s.id AND ps.successful AND (s.episode NOT NULL OR s.season NOT NULL OR s.identifier_key NOT NULL OR s.query NOT NULL)) AND ps.time > %(after)s and ps.time < %(before)s""" % {"id": p.id, "after": afterSql, "before": beforeSql}

        result = database.db.execute_sql(
            """
            SELECT (100 *
            (SELECT cast(sum(ps.resultsCount) AS FLOAT)
             FROM indexersearch ps
             WHERE ps.search_id IN %s AND ps.indexer_id == %d))
           /
           (SELECT sum(ps.resultsCount)
            FROM indexersearch ps
            WHERE ps.search_id IN %s) AS sumAllResults
             """
            % (innerSelect, p.id, innerSelect)).fetchone()
        avgResultsShare = int(result[0]) if result is not None and len(result) > 0 and result[0] is not None else "N/A"

        result = database.db.execute_sql(
            """
            SELECT avg(
                CASE WHEN uniqueResults > 0
                  THEN
                    100 / (processedResults * 1.0 / uniqueResults)
                ELSE 0
                END) as avgUniqueResults
            FROM indexersearch s
            WHERE processedResults IS NOT NULL AND uniqueResults IS NOT NULL
                  AND s.indexer_id == %(id)d AND s.time > %(after)s and s.time < %(before)s
            GROUP BY indexer_id;

            """
            % {"id": p.id, "after": afterSql, "before": beforeSql}).fetchone()
        if p.name in ["NZBIndex", "Binsearch", "NZBClub"]:
            avgUniqueResults = "-"
        elif result is not None and len(result) > 0 and result[0] is not None:
            avgUniqueResults = int(result[0])
        else:
            avgUniqueResults = "N/A"
        results.append({"name": p.name, "avgResultsShare": avgResultsShare, "avgUniqueResults": avgUniqueResults})
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: 0 if x["avgResultsShare"] == "N/A" else x["avgResultsShare"], reverse=True)
    return results
示例#12
0
def get_avg_indexer_search_results_share(afterSql, beforeSql):
    results = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        innerSelect = """(SELECT ps.search_id
                                    FROM indexersearch ps, search s
                                    WHERE ps.indexer_id == %(id)d AND ps.search_id = s.id AND ps.successful AND (s.episode NOT NULL OR s.season NOT NULL OR s.identifier_key NOT NULL OR s.query NOT NULL)) AND ps.time > %(after)s and ps.time < %(before)s""" % {"id": p.id, "after": afterSql,
                                                                                                                                                                                                                                                                   "before": beforeSql}

        result = database.db.execute_sql(
            """
            SELECT (100 *
            (SELECT cast(sum(ps.resultsCount) AS FLOAT)
             FROM indexersearch ps
             WHERE ps.search_id IN %s AND ps.indexer_id == %d))
           /
           (SELECT sum(ps.resultsCount)
            FROM indexersearch ps
            WHERE ps.search_id IN %s) AS sumAllResults
             """
            % (innerSelect, p.id, innerSelect)).fetchone()
        avgResultsShare = int(result[0]) if result is not None and len(result) > 0 and result[0] is not None else "N/A"

        result = database.db.execute_sql(
            """
            SELECT avg(
                CASE WHEN uniqueResults > 0
                  THEN
                    100 / (processedResults * 1.0 / uniqueResults)
                ELSE 0
                END) as avgUniqueResults
            FROM indexersearch s
            WHERE processedResults IS NOT NULL AND uniqueResults IS NOT NULL
                  AND s.indexer_id == %(id)d AND s.time > %(after)s and s.time < %(before)s
            GROUP BY indexer_id;

            """
            % {"id": p.id, "after": afterSql, "before": beforeSql}).fetchone()
        if p.name in ["NZBIndex", "Binsearch", "NZBClub"]:
            avgUniqueResults = "-"
        elif result is not None and len(result) > 0 and result[0] is not None:
            avgUniqueResults = int(result[0])
        else:
            avgUniqueResults = "N/A"
        results.append({"name": p.name, "avgResultsShare": avgResultsShare, "avgUniqueResults": avgUniqueResults})
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: 0 if x["avgResultsShare"] == "N/A" else x["avgResultsShare"], reverse=True)
    return results
示例#13
0
def getIndexerBasedDownloadStats(afterSql, beforeSql):
    enabledIndexerIds = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug(
                    "Skipping download stats for %s because it's disabled" %
                    p.name)
                continue
            enabledIndexerIds.append(str(p.id))
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
    enabledIndexerIds = ", ".join(enabledIndexerIds)
    query = """
    SELECT
      indexer.name,
      count(*) AS total,
      CASE WHEN count(*) > 0
        THEN
          100 / (1.0 * countall.countall / count(*))
      ELSE 0
      END
               AS share
    FROM
      indexernzbdownload dl,
      (SELECT count(*) AS countall
       FROM
         indexernzbdownload dl
         LEFT OUTER JOIN indexerapiaccess api
           ON dl.apiAccess_id = api.id
       WHERE api.indexer_id IN (%(enabledIndexerIds)s)
       AND api.time > %(afterSql)s AND api.time < %(beforeSql)s
       )
      countall
      LEFT OUTER JOIN indexerapiaccess api
        ON dl.apiAccess_id = api.id
      LEFT OUTER JOIN indexer indexer
        ON api.indexer_id = indexer.id
    WHERE api.indexer_id IN (%(enabledIndexerIds)s)
    GROUP BY indexer.id
    """ % {
        "enabledIndexerIds": enabledIndexerIds,
        "afterSql": afterSql,
        "beforeSql": beforeSql
    }
    stats = database.db.execute_sql(query).fetchall()
    stats = [{"name": x[0], "total": x[1], "share": x[2]} for x in stats]

    stats = sorted(stats, key=lambda x: x["name"])
    stats = sorted(stats, key=lambda x: x["share"], reverse=True)
    return stats
示例#14
0
文件: stats.py 项目: gspu/nzbhydra
def getIndexerBasedDownloadStats(afterSql, beforeSql):
    enabledIndexerIds = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s because it's disabled" % p.name)
                continue
            enabledIndexerIds.append(str(p.id))
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
    enabledIndexerIds = ", ".join(enabledIndexerIds)
    query = """
    SELECT
      indexer.name,
      count(*) AS total,
      CASE WHEN count(*) > 0
        THEN
          100 / (1.0 * countall.countall / count(*))
      ELSE 0
      END
               AS share
    FROM
      indexernzbdownload dl,
      (SELECT count(*) AS countall
       FROM
         indexernzbdownload dl
         LEFT OUTER JOIN indexerapiaccess api
           ON dl.apiAccess_id = api.id
       WHERE api.indexer_id IN (%(enabledIndexerIds)s)
       AND api.time > %(afterSql)s AND api.time < %(beforeSql)s
       )
      countall
      LEFT OUTER JOIN indexerapiaccess api
        ON dl.apiAccess_id = api.id
      LEFT OUTER JOIN indexer indexer
        ON api.indexer_id = indexer.id
    WHERE api.indexer_id IN (%(enabledIndexerIds)s)
    AND api.time > %(afterSql)s AND api.time < %(beforeSql)s
    GROUP BY indexer.id
    """ % {"enabledIndexerIds": enabledIndexerIds, "afterSql": afterSql, "beforeSql": beforeSql}
    stats = database.db.execute_sql(query).fetchall()
    stats = [{"name": x[0], "total": x[1], "share": x[2]} for x in stats]

    stats = sorted(stats, key=lambda x: x["name"])
    stats = sorted(stats, key=lambda x: x["share"], reverse=True)
    return stats
示例#15
0
def get_avg_indexer_search_results_share():
    results = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        result = database.db.execute_sql(
                "select (100 * (select cast(sum(ps.resultsCount) as float) from indexersearch ps where ps.search_id in (select ps.search_id from indexersearch ps where ps.indexer_id == %d) and ps.indexer_id == %d)) / (select sum(ps.resultsCount) from indexersearch ps where ps.search_id in (select ps.search_id from indexersearch ps where ps.indexer_id == %d)) as sumAllResults" % (
                    p.id, p.id, p.id)).fetchone()
        results.append({"name": p.name, "avgResultsShare": result[0] if result[0] is not None else "N/A"})
    return results
示例#16
0
def get_avg_indexer_access_success():
    dbResults = database.db.execute_sql(""" 
            SELECT
              p.name,
              failed.failed
              ,success.success
            FROM indexer p left outer join (SELECT
                               count(1)     AS failed,
                               p.indexer_id AS pid1
                             FROM indexerapiaccess p
                             WHERE NOT p.response_successful
                             GROUP BY p.indexer_id) AS failed on p.id == failed.pid1
            left outer join (SELECT
                              count(1)     AS success,
                              p.indexer_id AS pid2
                            FROM indexerapiaccess p
                            WHERE p.response_successful
                            GROUP BY p.indexer_id) AS success
            on success.pid2 = p.id
            """).fetchall()
    results = []
    for i in dbResults:
        name = i[0]
        try:
            indexer = getIndexerByName(name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % name)
            continue
        failed = i[1] if i[1] is not None else 0
        success = i[2] if i[2] is not None else 0
        sumall = failed + success
        failed_percent = (100 * failed) / sumall if sumall > 0 else "N/A"
        success_percent = (100 * success) / sumall if sumall > 0 else "N/A"
        results.append({
            "name": name,
            "failed": failed,
            "success": success,
            "failedPercent": failed_percent,
            "successPercent": success_percent
        })
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: x["successPercent"], reverse=True)
    return results
示例#17
0
def get_avg_indexer_access_success():
    results = database.db.execute_sql(
            """ 
            SELECT
              p.name,
              failed.failed
              ,success.success
            FROM indexer p left outer join (SELECT
                               count(1)     AS failed,
                               p.indexer_id AS pid1
                             FROM indexerapiaccess p
                             WHERE NOT p.response_successful
                             GROUP BY p.indexer_id) AS failed on p.id == failed.pid1
            left outer join (SELECT
                              count(1)     AS success,
                              p.indexer_id AS pid2
                            FROM indexerapiaccess p
                            WHERE p.response_successful
                            GROUP BY p.indexer_id) AS success
            on success.pid2 = p.id
            """).fetchall()
    result = []
    for i in results:
        name = i[0]
        try:
            indexer = getIndexerByName(name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % name)
            continue
        failed = i[1] if i[1] is not None else 0
        success = i[2] if i[2] is not None else 0
        sumall = failed + success
        failed_percent = (100 * failed) / sumall if sumall > 0 else "N/A"
        success_percent = (100 * success) / sumall if sumall > 0 else "N/A"
        result.append({"name": name, "failed": failed, "success": success, "failedPercent": failed_percent, "successPercent": success_percent})

    return result
示例#18
0
文件: api.py 项目: judhat2/nzbhydra
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    searchResult = SearchResult.get(SearchResult.id == searchResultId)
    indexerName = searchResult.indexer.name
    indexer = indexers.getIndexerByName(indexerName)
    link = searchResult.link

    # Log to database
    papiaccess = IndexerApiAccess(indexer=indexer.indexer, type="nzb", url=link, response_successful=None) if log_api_access else None
    try:
        papiaccess.username = request.authorization.username if request.authorization is not None else None
    except RuntimeError:
        pass
    papiaccess.save()
    pnzbdl = IndexerNzbDownload(searchResult=searchResult, apiAccess=papiaccess, mode=mode, title=searchResult.title, internal=internal)
    pnzbdl.save()

    return link, papiaccess, pnzbdl
示例#19
0
def getIndexerDownloadStats():
    results = []
    allDownloadsCount = IndexerNzbDownload.select().count()
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        dlCount = IndexerNzbDownload().\
            select(Indexer.name, IndexerApiAccess.response_successful). \
            join(IndexerApiAccess, JOIN.LEFT_OUTER). \
            join(Indexer, JOIN.LEFT_OUTER).\
            where(Indexer.id == p).\
            count()
        results.append({"name": p.name,
                        "total": dlCount,
                        "share": 100 / (allDownloadsCount / dlCount) if allDownloadsCount > 0 and dlCount > 0 else 0})
    return results
示例#20
0
def get_avg_indexer_access_success(afterSql, beforeSql):
    dbResults = database.db.execute_sql("""
        SELECT
          query1.name,
          query1.failed,
          query1.success,
          query2.average
        FROM (SELECT
                p.name,
                failed.failed,
                success.success,
                p.id AS indexer_id

              FROM indexer p LEFT OUTER JOIN (SELECT
                                                count(1)     AS failed,
                                                p.indexer_id AS pid1
                                              FROM indexerapiaccess p
                                              WHERE NOT p.response_successful AND p.time > %(after)s AND p.time < %(before)s
                                              GROUP BY p.indexer_id) AS failed ON p.id == failed.pid1
                LEFT OUTER JOIN (SELECT
                                   count(1)     AS success,
                                   p.indexer_id AS pid2
                                 FROM indexerapiaccess p
                                 WHERE p.response_successful AND p.time > %(after)s AND p.time < %(before)s
                                 GROUP BY p.indexer_id) AS success
                  ON success.pid2 = p.id) query1,

          (SELECT
             round(avg(u.sum)) AS average,
             indexer.name,
             indexer.id        AS indexer_id
           FROM
             (SELECT
                t.date,
                t.sum,
                t.indexer_id
              FROM
                (SELECT
                   count(*)     AS sum,
                   date(x.time) AS date,
                   x.indexer_id AS indexer_id
                 FROM
                   indexerapiaccess x
                WHERE
                   x.time > %(after)s AND x.time < %(before)s
                 GROUP BY
                   date(x.time),
                   x.indexer_id
                ) t
              WHERE t.indexer_id != 0) u
             LEFT JOIN indexer ON u.indexer_id = indexer.id
           GROUP BY u.indexer_id) query2

        WHERE query1.indexer_id == query2.indexer_id
        """ % {
        "before": beforeSql,
        "after": afterSql
    }).fetchall()
    results = []
    for i in dbResults:
        name = i[0]
        try:
            indexer = getIndexerByName(name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % name)
            continue
        failed = i[1] if i[1] is not None else 0
        success = i[2] if i[2] is not None else 0
        averagePerDay = i[3]
        sumall = failed + success
        failedPercent = (100 * failed) / sumall if sumall > 0 else "N/A"
        successPercent = (100 * success) / sumall if sumall > 0 else "N/A"
        results.append({
            "name": name,
            "failed": failed,
            "success": success,
            "failedPercent": failedPercent,
            "successPercent": successPercent,
            "averagePerDay": averagePerDay
        })
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: x["averagePerDay"], reverse=True)
    return results
示例#21
0
文件: stats.py 项目: gspu/nzbhydra
def get_avg_indexer_access_success(afterSql, beforeSql):
    dbResults = database.db.execute_sql(
        """
        SELECT
          query1.name,
          query1.failed,
          query1.success,
          query2.average
        FROM (SELECT
                p.name,
                failed.failed,
                success.success,
                p.id AS indexer_id

              FROM indexer p LEFT OUTER JOIN (SELECT
                                                count(1)     AS failed,
                                                p.indexer_id AS pid1
                                              FROM indexerapiaccess p
                                              WHERE NOT p.response_successful AND p.time > %(after)s AND p.time < %(before)s
                                              GROUP BY p.indexer_id) AS failed ON p.id == failed.pid1
                LEFT OUTER JOIN (SELECT
                                   count(1)     AS success,
                                   p.indexer_id AS pid2
                                 FROM indexerapiaccess p
                                 WHERE p.response_successful AND p.time > %(after)s AND p.time < %(before)s
                                 GROUP BY p.indexer_id) AS success
                  ON success.pid2 = p.id) query1,

          (SELECT
             round(avg(u.sum)) AS average,
             indexer.name,
             indexer.id        AS indexer_id
           FROM
             (SELECT
                t.date,
                t.sum,
                t.indexer_id
              FROM
                (SELECT
                   count(*)     AS sum,
                   date(x.time) AS date,
                   x.indexer_id AS indexer_id
                 FROM
                   indexerapiaccess x
                WHERE
                   x.time > %(after)s AND x.time < %(before)s
                 GROUP BY
                   date(x.time),
                   x.indexer_id
                ) t
              WHERE t.indexer_id != 0) u
             LEFT JOIN indexer ON u.indexer_id = indexer.id
           GROUP BY u.indexer_id) query2

        WHERE query1.indexer_id == query2.indexer_id
        """ % {"before": beforeSql, "after": afterSql}).fetchall()
    results = []
    for i in dbResults:
        name = i[0]
        try:
            indexer = getIndexerByName(name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % name)
            continue
        failed = i[1] if i[1] is not None else 0
        success = i[2] if i[2] is not None else 0
        averagePerDay = i[3]
        sumall = failed + success
        failedPercent = (100 * failed) / sumall if sumall > 0 else "N/A"
        successPercent = (100 * success) / sumall if sumall > 0 else "N/A"
        results.append({"name": name, "failed": failed, "success": success, "failedPercent": failedPercent, "successPercent": successPercent, "averagePerDay": averagePerDay})
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: x["averagePerDay"], reverse=True)
    return results