Beispiel #1
0
def get_nzb_downloads(page=0, limit=100, type=None):
    query = IndexerNzbDownload() \
        .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerNzbDownload.time, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), Search.internal, IndexerApiAccess.response_successful, IndexerApiAccess.username) \
        .join(SearchResult, JOIN.LEFT_OUTER) \
        .switch(IndexerNzbDownload) \
        .join(IndexerApiAccess, JOIN.LEFT_OUTER) \
        .join(Indexer, JOIN.LEFT_OUTER) \
        .switch(IndexerApiAccess) \
        .join(IndexerSearch, JOIN.LEFT_OUTER) \
        .join(Search, JOIN.LEFT_OUTER)

    if type == "Internal":
        query = query.where(Search.internal)
    elif type == "API":
        query = query.where(~Search.internal)

    total_downloads = query.count()
    nzb_downloads = list(
        query.order_by(IndexerNzbDownload.time.desc()).paginate(page,
                                                                limit).dicts())
    downloads = {
        "totalDownloads": total_downloads,
        "nzbDownloads": nzb_downloads
    }
    return downloads
Beispiel #2
0
def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None):
    columnNameToEntityMap = {
        "time": IndexerApiAccess.time,
        "indexer": Indexer.name,
        "title": IndexerNzbDownload.title,
        "access": IndexerNzbDownload.internal,
        "successful": IndexerApiAccess.response_successful,
        "username": IndexerApiAccess.username
    }

    query = IndexerNzbDownload() \
        .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerApiAccess.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \
        .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \
        .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER)

    query = extendQueryWithFilter(columnNameToEntityMap, filterModel, query)
    query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel,
                                   IndexerApiAccess.time.desc())

    total_downloads = query.count()
    nzb_downloads = list(query.paginate(page, limit).dicts())
    downloads = {
        "totalDownloads": total_downloads,
        "nzbDownloads": nzb_downloads
    }
    return downloads
Beispiel #3
0
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    searchResult = SearchResult.get(SearchResult.id == searchResultId)
    indexerName = searchResult.indexer.name
    indexer = indexers.getIndexerByName(indexerName)
    link = searchResult.link

    # Log to database
    papiaccess = IndexerApiAccess(
        indexer=indexer.indexer,
        type="nzb",
        url=link,
        response_successful=None) if log_api_access else None
    try:
        papiaccess.username = request.authorization.username if request.authorization is not None else None
    except RuntimeError:
        pass
    papiaccess.save()
    pnzbdl = IndexerNzbDownload(searchResult=searchResult,
                                apiAccess=papiaccess,
                                mode=mode,
                                title=searchResult.title,
                                internal=internal)
    pnzbdl.save()

    return link, papiaccess, pnzbdl
Beispiel #4
0
def getIndexerDownloadStats():
    results = []
    allDownloadsCount = IndexerNzbDownload.select().count()
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        dlCount = IndexerNzbDownload().\
            select(Indexer.name, IndexerApiAccess.response_successful). \
            join(IndexerApiAccess, JOIN.LEFT_OUTER). \
            join(Indexer, JOIN.LEFT_OUTER).\
            where(Indexer.id == p).\
            count()
        results.append({
            "name":
            p.name,
            "total":
            dlCount,
            "share":
            100 / (allDownloadsCount / dlCount)
            if allDownloadsCount > 0 and dlCount > 0 else 0
        })
    results = sorted(results, key=lambda x: x["name"])
    results = sorted(results, key=lambda x: x["share"], reverse=True)
    return results
Beispiel #5
0
def getTimeBasedDownloadStats():
    downloads = IndexerNzbDownload(). \
        select(Indexer.name, IndexerApiAccess.response_successful, IndexerNzbDownload.time). \
        join(IndexerApiAccess, JOIN.LEFT_OUTER). \
        join(Indexer, JOIN.LEFT_OUTER)
    downloadTimes = [arrow.get(x.time) for x in downloads]

    perDayOfWeek, perHourOfDay = calculcateTimeBasedStats(downloadTimes)

    return {"perDayOfWeek": perDayOfWeek, "perHourOfDay": perHourOfDay}
Beispiel #6
0
def getTimeBasedDownloadStats(after, before):
    downloads = list(IndexerNzbDownload(). \
        select(Indexer.name, IndexerApiAccess.response_successful, IndexerApiAccess.time). \
        where((IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)). \
        join(IndexerApiAccess, JOIN.LEFT_OUTER). \
        join(Indexer, JOIN.LEFT_OUTER).dicts())
    downloadTimes = [arrow.get(x["time"]).to(tz.tzlocal()) for x in downloads]

    perDayOfWeek, perHourOfDay = calculcateTimeBasedStats(downloadTimes)

    return {"perDayOfWeek": perDayOfWeek, "perHourOfDay": perHourOfDay}
Beispiel #7
0
def checkHitOrDownloadLimit(p):
    if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0:
        if p.settings.hitLimitResetTime:
            comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
            if comparisonTime > arrow.utcnow():
                comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1))  # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
        else:
            # Use rolling time window
            comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))
    if p.settings.hitLimit > 0:
        apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful)
        apiHits = apiHitsQuery.count()
        if apiHits >= p.settings.hitLimit:
            if p.settings.hitLimitResetTime:
                logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
            else:
                try:
                    firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit - 1).dicts())[0]["time"]).to("local")
                    nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                    logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                except IndexerApiAccess.DoesNotExist:
                    logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
            return False, "API limit reached"
        else:
            logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))

    if p.settings.downloadLimit > 0:
        downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime))
        downloads = downloadsQuery.count()
        if downloads >= p.settings.downloadLimit:
            if p.settings.hitLimitResetTime:
                logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime))
            else:
                try:
                    firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit - 1).limit(1).dicts())[0]["time"]).to("local")
                    nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                    logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                except IndexerApiAccess.DoesNotExist:
                    logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit))
            return False, "Download limit reached"
        else:
            logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute))

    return True, None
Beispiel #8
0
def getIndexerDownloadStats():
    results = []
    allDownloadsCount = IndexerNzbDownload.select().count()
    for p in Indexer.select().order_by(Indexer.name):
        dlCount = IndexerNzbDownload().select(
            Indexer.name, IndexerApiAccess.response_successful).join(
                IndexerSearch, JOIN.LEFT_OUTER).join(
                    Search, JOIN.LEFT_OUTER).switch(IndexerNzbDownload).join(
                        IndexerApiAccess, JOIN.LEFT_OUTER).join(
                            Indexer,
                            JOIN.LEFT_OUTER).where(Indexer.id == p).count()
        results.append({
            "name":
            p.name,
            "total":
            dlCount,
            "share":
            100 / (allDownloadsCount / dlCount)
            if allDownloadsCount > 0 and dlCount > 0 else 0
        })
    return results
Beispiel #9
0
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode,
                         log_api_access):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    for p in indexers.enabled_indexers:
        if p.name.strip() == indexer_name.strip():
            link = p.get_nzb_link(indexerguid, title)

            # Log to database
            indexer = Indexer.get(
                fn.lower(Indexer.name) == indexer_name.lower())
            papiaccess = IndexerApiAccess(
                indexer=p.indexer,
                type="nzb",
                url=link,
                response_successful=None,
                indexer_search=searchid) if log_api_access else None
            try:
                papiaccess.username = request.authorization.username if request.authorization is not None else None
            except RuntimeError:
                pass
            papiaccess.save()
            pnzbdl = IndexerNzbDownload(indexer=indexer,
                                        indexer_search=searchid,
                                        api_access=papiaccess,
                                        mode=mode,
                                        title=title,
                                        guid=indexerguid)
            pnzbdl.save()

            return link, papiaccess, pnzbdl

    else:
        logger.error("Did not find indexer with name %s" % indexer_name)
        return None, None, None