def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ searchResult = SearchResult.get(SearchResult.id == searchResultId) indexerName = searchResult.indexer.name indexer = indexers.getIndexerByName(indexerName) link = searchResult.link # Log to database papiaccess = IndexerApiAccess( indexer=indexer.indexer, type="nzb", url=link, response_successful=None) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(searchResult=searchResult, apiAccess=papiaccess, mode=mode, title=searchResult.title, internal=internal) pnzbdl.save() return link, papiaccess, pnzbdl
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode, log_api_access): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ for p in indexers.enabled_indexers: if p.name.strip() == indexer_name.strip(): link = p.get_nzb_link(indexerguid, title) # Log to database indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower()) papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=searchid) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode=mode, title=title, guid=indexerguid) pnzbdl.save() return link, papiaccess, pnzbdl else: logger.error("Did not find indexer with name %s" % indexer_name) return None, None, None
def getIndexerDownloadStats(): results = [] allDownloadsCount = IndexerNzbDownload.select().count() for p in Indexer.select().order_by(Indexer.name): try: indexer = getIndexerByName(p.name) if not indexer.settings.enabled: logger.debug("Skipping download stats for %s" % p.name) continue except IndexerNotFoundException: logger.error("Unable to find indexer %s in configuration" % p.name) continue dlCount = IndexerNzbDownload().\ select(Indexer.name, IndexerApiAccess.response_successful). \ join(IndexerApiAccess, JOIN.LEFT_OUTER). \ join(Indexer, JOIN.LEFT_OUTER).\ where(Indexer.id == p).\ count() results.append({ "name": p.name, "total": dlCount, "share": 100 / (allDownloadsCount / dlCount) if allDownloadsCount > 0 and dlCount > 0 else 0 }) results = sorted(results, key=lambda x: x["name"]) results = sorted(results, key=lambda x: x["share"], reverse=True) return results
def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None): columnNameToEntityMap = { "time": IndexerApiAccess.time, "indexer": Indexer.name, "title": IndexerNzbDownload.title, "access": IndexerNzbDownload.internal, "successful": IndexerApiAccess.response_successful, "username": IndexerApiAccess.username } query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerApiAccess.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER) query = extendQueryWithFilter(columnNameToEntityMap, filterModel, query) query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel, IndexerApiAccess.time.desc()) total_downloads = query.count() nzb_downloads = list(query.paginate(page, limit).dicts()) downloads = { "totalDownloads": total_downloads, "nzbDownloads": nzb_downloads } return downloads
def get_nzb_downloads(page=0, limit=100, type=None): where = (IndexerNzbDownload.indexer == Indexer.id) & (Search.id == IndexerSearch.search) & (IndexerApiAccess.indexer_search == IndexerSearch.id) if type == "Internal": where = where & Search.internal elif type == "API": where = where & (~Search.internal) query = IndexerNzbDownload().select(Indexer.name, IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.guid, Search.internal, IndexerApiAccess.response_successful).join(IndexerApiAccess).join(Indexer).join(IndexerSearch).join(Search).where(where) total_downloads = query.count() nzb_downloads = list(query.order_by(IndexerNzbDownload.time.desc()).group_by( IndexerNzbDownload.id).paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def get_nzb_downloads(page=0, limit=100, type=None): query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER) if type == "Internal": query = query.where(IndexerNzbDownload.internal) elif type == "API": query = query.where(~IndexerNzbDownload.internal) total_downloads = query.count() nzb_downloads = list(query.order_by(IndexerNzbDownload.time.desc()).paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def get_nzb_downloads(page=0, limit=100, type=None): query = IndexerNzbDownload()\ .select(Indexer.name, IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.guid, Search.internal, IndexerApiAccess.response_successful, IndexerApiAccess.username)\ .join(IndexerSearch, JOIN.LEFT_OUTER)\ .join(Search, JOIN.LEFT_OUTER)\ .switch(IndexerNzbDownload)\ .join(IndexerApiAccess, JOIN.LEFT_OUTER)\ .join(Indexer, JOIN.LEFT_OUTER) if type == "Internal": query = query.where(Search.internal) elif type == "API": query = query.where(~Search.internal) total_downloads = query.count() nzb_downloads = list(query.order_by(IndexerNzbDownload.time.desc()).paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def getIndexerDownloadStats(): results = [] allDownloadsCount = IndexerNzbDownload.select().count() for p in Indexer.select().order_by(Indexer.name): dlCount = IndexerNzbDownload().select(Indexer.name, IndexerApiAccess.response_successful).join(IndexerSearch, JOIN.LEFT_OUTER).join(Search, JOIN.LEFT_OUTER).switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER).where(Indexer.id == p).count() results.append({"name": p.name, "total": dlCount, "share": 100 / (allDownloadsCount / dlCount) if allDownloadsCount > 0 and dlCount > 0 else 0}) return results
def checkHitOrDownloadLimit(p): if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0: if p.settings.hitLimitResetTime: comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0) if comparisonTime > arrow.utcnow(): comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month) else: # Use rolling time window comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1)) if p.settings.hitLimit > 0: apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful) apiHits = apiHitsQuery.count() if apiHits >= p.settings.hitLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit-1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit)) return False, "API limit reached" else: logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute)) if p.settings.downloadLimit > 0: downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime)) downloads = downloadsQuery.count() if downloads >= p.settings.downloadLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit-1).limit(1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit)) return False, "Download limit reached" else: logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute)) return True, None
def checkHitOrDownloadLimit(p): if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0: if p.settings.hitLimitResetTime: comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0) if comparisonTime > arrow.utcnow(): comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month) else: # Use rolling time window comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1)) if p.settings.hitLimit > 0: apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful) apiHits = apiHitsQuery.count() if apiHits >= p.settings.hitLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit - 1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit)) return False, "API limit reached" else: logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute)) if p.settings.downloadLimit > 0: downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime)) downloads = downloadsQuery.count() if downloads >= p.settings.downloadLimit: if p.settings.hitLimitResetTime: logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime)) else: try: firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit - 1).limit(1).dicts())[0]["time"]).to("local") nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1)) logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm'))) except IndexerApiAccess.DoesNotExist: logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit)) return False, "Download limit reached" else: logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute)) return True, None
def getTimeBasedDownloadStats(): downloads = IndexerNzbDownload(). \ select(Indexer.name, IndexerApiAccess.response_successful, IndexerNzbDownload.time). \ join(IndexerApiAccess, JOIN.LEFT_OUTER). \ join(Indexer, JOIN.LEFT_OUTER) downloadTimes = [arrow.get(x.time) for x in downloads] perDayOfWeek, perHourOfDay = calculcateTimeBasedStats(downloadTimes) return {"perDayOfWeek": perDayOfWeek, "perHourOfDay": perHourOfDay}
def getTimeBasedDownloadStats(after, before): downloads = list(IndexerNzbDownload(). \ select(Indexer.name, IndexerApiAccess.response_successful, IndexerApiAccess.time). \ where((IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)). \ join(IndexerApiAccess, JOIN.LEFT_OUTER). \ join(Indexer, JOIN.LEFT_OUTER).dicts()) downloadTimes = [arrow.get(x["time"]).to(tz.tzlocal()) for x in downloads] perDayOfWeek, perHourOfDay = calculcateTimeBasedStats(downloadTimes) return {"perDayOfWeek": perDayOfWeek, "perHourOfDay": perHourOfDay}
def getIndexerDownloadStats(): results = [] allDownloadsCount = IndexerNzbDownload.select().count() for p in Indexer.select().order_by(Indexer.name): dlCount = IndexerNzbDownload().select( Indexer.name, IndexerApiAccess.response_successful).join( IndexerSearch, JOIN.LEFT_OUTER).join( Search, JOIN.LEFT_OUTER).switch(IndexerNzbDownload).join( IndexerApiAccess, JOIN.LEFT_OUTER).join( Indexer, JOIN.LEFT_OUTER).where(Indexer.id == p).count() results.append({ "name": p.name, "total": dlCount, "share": 100 / (allDownloadsCount / dlCount) if allDownloadsCount > 0 and dlCount > 0 else 0 }) return results
def get_nzb_downloads(page=0, limit=100, type=None): query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerNzbDownload.time, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), Search.internal, IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .join(SearchResult, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload) \ .join(IndexerApiAccess, JOIN.LEFT_OUTER) \ .join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerApiAccess) \ .join(IndexerSearch, JOIN.LEFT_OUTER) \ .join(Search, JOIN.LEFT_OUTER) if type == "Internal": query = query.where(Search.internal) elif type == "API": query = query.where(~Search.internal) total_downloads = query.count() nzb_downloads = list( query.order_by(IndexerNzbDownload.time.desc()).paginate(page, limit).dicts()) downloads = { "totalDownloads": total_downloads, "nzbDownloads": nzb_downloads } return downloads
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ searchResult = SearchResult.get(SearchResult.id == searchResultId) indexerName = searchResult.indexer.name indexer = indexers.getIndexerByName(indexerName) link = searchResult.link # Log to database papiaccess = IndexerApiAccess(indexer=indexer.indexer, type="nzb", url=link, response_successful=None) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(searchResult=searchResult, apiAccess=papiaccess, mode=mode, title=searchResult.title, internal=internal) pnzbdl.save() return link, papiaccess, pnzbdl
def get_nzb_link(indexer_name, guid, title, searchid): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ for p in indexers.enabled_indexers: if p.name == indexer_name: link = p.get_nzb_link(guid, title) # Log to database indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower()) papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=indexer) papiaccess.save() pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode="redirect") pnzbdl.save() return link else: logger.error("Did not find indexer with name %s" % indexer_name) return None
def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None): columnNameToEntityMap = { "time": IndexerApiAccess.time, "indexer": Indexer.name, "title": IndexerNzbDownload.title, "access": IndexerNzbDownload.internal, "successful": IndexerApiAccess.response_successful, "username": IndexerApiAccess.username } query = IndexerNzbDownload() \ .select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerApiAccess.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \ .switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \ .switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER) query = extendQueryWithFilter(columnNameToEntityMap, filterModel, query) query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel, IndexerApiAccess.time.desc()) total_downloads = query.count() nzb_downloads = list(query.paginate(page, limit).dicts()) downloads = {"totalDownloads": total_downloads, "nzbDownloads": nzb_downloads} return downloads
def download_nzb_and_log(indexer_name, provider_guid, title, searchid): """ Gets the NZB link from the indexer using the guid, downloads it and logs the download :param indexer_name: name of the indexer :param provider_guid: guid to build link :param title: the title to build the link :param searchid: the id of the IndexerSearch entry so we can link the download to a search :return: IndexerNzbDownloadResult """ for p in indexers.enabled_indexers: if p.name == indexer_name: link = p.get_nzb_link(provider_guid, title) indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower()) psearch = IndexerSearch.get((IndexerSearch.indexer == indexer) & (IndexerSearch.search == searchid)) papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, indexer_search=psearch) papiaccess.save() internallink, guid = get_nzb_link_and_guid(indexer_name, provider_guid, searchid, title) pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode="serve", title=title, guid=internallink) pnzbdl.save() try: r = p.get(link, timeout=10) r.raise_for_status() papiaccess.response_successful = True papiaccess.response_time = r.elapsed.microseconds / 1000 return IndexerNzbDownloadResult(content=r.content, headers=r.headers) except RequestException as e: logger.error("Error while connecting to URL %s: %s" % (link, str(e))) papiaccess.error = str(e) return None finally: papiaccess.save() else: return "Unable to find NZB link"
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode, log_api_access): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ for p in indexers.enabled_indexers: if p.name.strip() == indexer_name.strip(): link = p.get_nzb_link(indexerguid, title) # Log to database indexer = Indexer.get( fn.lower(Indexer.name) == indexer_name.lower()) papiaccess = IndexerApiAccess( indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=searchid) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode=mode, title=title, guid=indexerguid) pnzbdl.save() return link, papiaccess, pnzbdl else: logger.error("Did not find indexer with name %s" % indexer_name) return None, None, None
def getIndexerDownloadStats(): results = [] allDownloadsCount = IndexerNzbDownload.select().count() for p in Indexer.select().order_by(Indexer.name): try: indexer = getIndexerByName(p.name) if not indexer.settings.enabled: logger.debug("Skipping download stats for %s" % p.name) continue except IndexerNotFoundException: logger.error("Unable to find indexer %s in configuration" % p.name) continue dlCount = IndexerNzbDownload().\ select(Indexer.name, IndexerApiAccess.response_successful). \ join(IndexerApiAccess, JOIN.LEFT_OUTER). \ join(Indexer, JOIN.LEFT_OUTER).\ where(Indexer.id == p).\ count() results.append({"name": p.name, "total": dlCount, "share": 100 / (allDownloadsCount / dlCount) if allDownloadsCount > 0 and dlCount > 0 else 0}) return results