def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None): columnNameToEntityMap = { "time": IndexerApiAccess.time, "indexer": Indexer.name, "title": IndexerNzbDownload.title, "access": IndexerNzbDownload.internal, "successful": IndexerApiAccess.response_successful, "username": IndexerApiAccess.username } query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerApiAccess.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER) query = extendQueryWithFilter(columnNameToEntityMap, filterModel, query) query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel, IndexerApiAccess.time.desc()) total_downloads = query.count() nzb_downloads = list(query.paginate(page, limit).dicts()) downloads = { "totalDownloads": total_downloads, "nzbDownloads": nzb_downloads } return downloads
def get_nzb_downloads(page=0, limit=100, type=None): query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerNzbDownload.time, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), Search.internal, IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .join(SearchResult, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload) \ .join(IndexerApiAccess, JOIN.LEFT_OUTER) \ .join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerApiAccess) \ .join(IndexerSearch, JOIN.LEFT_OUTER) \ .join(Search, JOIN.LEFT_OUTER) if type == "Internal": query = query.where(Search.internal) elif type == "API": query = query.where(~Search.internal) total_downloads = query.count() nzb_downloads = list( query.order_by(IndexerNzbDownload.time.desc()).paginate(page, limit).dicts()) downloads = { "totalDownloads": total_downloads, "nzbDownloads": nzb_downloads } return downloads
def get_nzb_downloads(page=0, limit=100, type=None): where = (IndexerNzbDownload.indexer == Indexer.id) & (Search.id == IndexerSearch.search) & (IndexerApiAccess.indexer_search == IndexerSearch.id) if type == "Internal": where = where & Search.internal elif type == "API": where = where & (~Search.internal) query = IndexerNzbDownload().select(Indexer.name, IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.guid, Search.internal, IndexerApiAccess.response_successful).join(IndexerApiAccess).join(Indexer).join(IndexerSearch).join(Search).where(where) total_downloads = query.count() nzb_downloads = list(query.order_by(IndexerNzbDownload.time.desc()).group_by( IndexerNzbDownload.id).paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def get_nzb_downloads(page=0, limit=100, type=None): query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER) if type == "Internal": query = query.where(IndexerNzbDownload.internal) elif type == "API": query = query.where(~IndexerNzbDownload.internal) total_downloads = query.count() nzb_downloads = list(query.order_by(IndexerNzbDownload.time.desc()).paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def get_nzb_downloads(page=0, limit=100, type=None): query = IndexerNzbDownload()\ .select(Indexer.name, IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.guid, Search.internal, IndexerApiAccess.response_successful, IndexerApiAccess.username)\ .join(IndexerSearch, JOIN.LEFT_OUTER)\ .join(Search, JOIN.LEFT_OUTER)\ .switch(IndexerNzbDownload)\ .join(IndexerApiAccess, JOIN.LEFT_OUTER)\ .join(Indexer, JOIN.LEFT_OUTER) if type == "Internal": query = query.where(Search.internal) elif type == "API": query = query.where(~Search.internal) total_downloads = query.count() nzb_downloads = list(query.order_by(IndexerNzbDownload.time.desc()).paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def checkHitOrDownloadLimit(p): if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0: if p.settings.hitLimitResetTime: comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0) if comparisonTime > arrow.utcnow(): comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month) else: # Use rolling time window comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1)) if p.settings.hitLimit > 0: apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful) apiHits = apiHitsQuery.count() if apiHits >= p.settings.hitLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit - 1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit)) return False, "API limit reached" else: logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute)) if p.settings.downloadLimit > 0: downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime)) downloads = downloadsQuery.count() if downloads >= p.settings.downloadLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit - 1).limit(1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit)) return False, "Download limit reached" else: logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute)) return True, None
def checkHitOrDownloadLimit(p): if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0: if p.settings.hitLimitResetTime: comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0) if comparisonTime > arrow.utcnow(): comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month) else: # Use rolling time window comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1)) if p.settings.hitLimit > 0: apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful) apiHits = apiHitsQuery.count() if apiHits >= p.settings.hitLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit-1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit)) return False, "API limit reached" else: logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute)) if p.settings.downloadLimit > 0: downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime)) downloads = downloadsQuery.count() if downloads >= p.settings.downloadLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit-1).limit(1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit)) return False, "Download limit reached" else: logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute)) return True, None
def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None): columnNameToEntityMap = { "time": IndexerApiAccess.time, "indexer": Indexer.name, "title": IndexerNzbDownload.title, "access": IndexerNzbDownload.internal, "successful": IndexerApiAccess.response_successful, "username": IndexerApiAccess.username } query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerApiAccess.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER) query = extendQueryWithFilter(columnNameToEntityMap, filterModel, query) query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel, IndexerApiAccess.time.desc()) total_downloads = query.count() nzb_downloads = list(query.paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads