Ejemplo n.º 1
0
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode, log_api_access):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    for p in indexers.enabled_indexers:
        if p.name.strip() == indexer_name.strip():
            link = p.get_nzb_link(indexerguid, title)

            # Log to database
            indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower())
            papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=searchid) if log_api_access else None
            try:
                papiaccess.username = request.authorization.username if request.authorization is not None else None
            except RuntimeError:
                pass
            papiaccess.save()
            pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode=mode, title=title, guid=indexerguid)
            pnzbdl.save()

            return link, papiaccess, pnzbdl

    else:
        logger.error("Did not find indexer with name %s" % indexer_name)
        return None, None, None
Ejemplo n.º 2
0
    def get_url_with_papi_access(self, url, type, cookies=None, timeout=None, saveToDb=True):
        papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime)
        try:
            papiaccess.username = request.authorization.username if request.authorization is not None else None
        except RuntimeError:
            # Is thrown when we're searching which is run in a thread. When downloading NFOs or whatever this will work
            pass
        indexerStatus = None
        try:
            time_before = arrow.utcnow()
            response = self.get(url, cookies=cookies, timeout=timeout)
            response.raise_for_status()

            time_after = arrow.utcnow()
            papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000)
            papiaccess.response_successful = True
            self.debug("HTTP request to indexer completed in %dms" % papiaccess.response_time)
            indexerStatus = self.handle_indexer_success(doSaveIndexerStatus=saveToDb)
        except RequestException as e:
            self.error("Error while connecting to URL %s: %s" % (url, str(e)))
            papiaccess.error = "Connection failed: %s" % removeSensitiveData(str(e))
            response = None
            indexerStatus = self.handle_indexer_failure("Connection failed: %s" % removeSensitiveData(str(e)), saveIndexerStatus=saveToDb)
        finally:
            if saveToDb:
                self.saveIndexerStatus(papiaccess)
        return response, papiaccess, indexerStatus
Ejemplo n.º 3
0
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    searchResult = SearchResult.get(SearchResult.id == searchResultId)
    indexerName = searchResult.indexer.name
    indexer = indexers.getIndexerByName(indexerName)
    link = searchResult.link

    # Log to database
    papiaccess = IndexerApiAccess(
        indexer=indexer.indexer,
        type="nzb",
        url=link,
        response_successful=None) if log_api_access else None
    try:
        papiaccess.username = request.authorization.username if request.authorization is not None else None
    except RuntimeError:
        pass
    papiaccess.save()
    pnzbdl = IndexerNzbDownload(searchResult=searchResult,
                                apiAccess=papiaccess,
                                mode=mode,
                                title=searchResult.title,
                                internal=internal)
    pnzbdl.save()

    return link, papiaccess, pnzbdl
Ejemplo n.º 4
0
def get_avg_indexer_response_times(after, before):
    result = []
    response_times = []
    for p in Indexer.select().order_by(Indexer.name):
        try:
            indexer = getIndexerByName(p.name)
            if not indexer.settings.enabled:
                logger.debug("Skipping download stats for %s" % p.name)
                continue
        except IndexerNotFoundException:
            logger.error("Unable to find indexer %s in configuration" % p.name)
            continue
        where = (IndexerApiAccess.response_successful) & (
            IndexerApiAccess.indexer == p
        ) & (IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)
        avg_response_time = IndexerApiAccess().select(
            fn.AVG(IndexerApiAccess.response_time)).where(where).tuples()[0][0]
        if avg_response_time:
            response_times.append({
                "name": p.name,
                "avgResponseTime": int(avg_response_time)
            })
    where = (IndexerApiAccess.response_successful) & (
        IndexerApiAccess.response_time is not None) & (
            IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)
    avg_response_time = IndexerApiAccess().select(
        fn.AVG(IndexerApiAccess.response_time)).where(where).tuples()[0][0]
    for i in response_times:
        delta = i["avgResponseTime"] - avg_response_time
        i["delta"] = delta
        result.append(i)
    result = sorted(result, key=lambda x: x["name"])
    result = sorted(result, key=lambda x: x["avgResponseTime"])

    return result
Ejemplo n.º 5
0
 def get_url_with_papi_access(self, url, type, cookies=None, timeout=None, saveToDb=True):
     papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime)
     try:
         papiaccess.username = request.authorization.username if request.authorization is not None else None
     except RuntimeError:
         #Is thrown when we're searching which is run in a thread. When downloading NFOs or whatever this will work
         pass
     indexerStatus = None
     try:
         time_before = arrow.utcnow()
         response = self.get(url, cookies=cookies, timeout=timeout)
         response.raise_for_status()
         time_after = arrow.utcnow()
         papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000)
         papiaccess.response_successful = True
         indexerStatus = self.handle_indexer_success(saveIndexerStatus=saveToDb)
     except RequestException as e:
         self.logger.error("Error while connecting to URL %s: %s" % (url, str(e)))
         papiaccess.error = "Connection failed: %s" % removeSensitiveData(str(e))
         response = None
         indexerStatus = self.handle_indexer_failure("Connection failed: %s" % removeSensitiveData(str(e)), saveIndexerStatus=saveToDb)
     finally:
         if saveToDb:
             papiaccess.save()
     return response, papiaccess, indexerStatus
Ejemplo n.º 6
0
def checkHitOrDownloadLimit(p):
    if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0:
        if p.settings.hitLimitResetTime:
            comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
            if comparisonTime > arrow.utcnow():
                comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1))  # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
        else:
            # Use rolling time window
            comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))
    if p.settings.hitLimit > 0:
        apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful)
        apiHits = apiHitsQuery.count()
        if apiHits >= p.settings.hitLimit:
            if p.settings.hitLimitResetTime:
                logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
            else:
                try:
                    firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit-1).dicts())[0]["time"]).to("local")
                    nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                    logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                except IndexerApiAccess.DoesNotExist:
                    logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
            return False, "API limit reached"
        else:
            logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))

    if p.settings.downloadLimit > 0:
        downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime))
        downloads = downloadsQuery.count()
        if downloads >= p.settings.downloadLimit:
            if p.settings.hitLimitResetTime:
                logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime))
            else:
                try:
                    firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit-1).limit(1).dicts())[0]["time"]).to("local")
                    nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                    logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                except IndexerApiAccess.DoesNotExist:
                    logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit))
            return False, "Download limit reached"
        else:
            logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute))

    return True, None
Ejemplo n.º 7
0
def checkHitOrDownloadLimit(p):
    if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0:
        if p.settings.hitLimitResetTime:
            comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
            if comparisonTime > arrow.utcnow():
                comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1))  # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
        else:
            # Use rolling time window
            comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))
    if p.settings.hitLimit > 0:
        apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful)
        apiHits = apiHitsQuery.count()
        if apiHits >= p.settings.hitLimit:
            if p.settings.hitLimitResetTime:
                logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
            else:
                try:
                    firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit - 1).dicts())[0]["time"]).to("local")
                    nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                    logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                except IndexerApiAccess.DoesNotExist:
                    logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
            return False, "API limit reached"
        else:
            logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))

    if p.settings.downloadLimit > 0:
        downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime))
        downloads = downloadsQuery.count()
        if downloads >= p.settings.downloadLimit:
            if p.settings.hitLimitResetTime:
                logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime))
            else:
                try:
                    firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit - 1).limit(1).dicts())[0]["time"]).to("local")
                    nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                    logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                except IndexerApiAccess.DoesNotExist:
                    logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit))
            return False, "Download limit reached"
        else:
            logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute))

    return True, None
Ejemplo n.º 8
0
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    searchResult = SearchResult.get(SearchResult.id == searchResultId)
    indexerName = searchResult.indexer.name
    indexer = indexers.getIndexerByName(indexerName)
    link = searchResult.link

    # Log to database
    papiaccess = IndexerApiAccess(indexer=indexer.indexer, type="nzb", url=link, response_successful=None) if log_api_access else None
    try:
        papiaccess.username = request.authorization.username if request.authorization is not None else None
    except RuntimeError:
        pass
    papiaccess.save()
    pnzbdl = IndexerNzbDownload(searchResult=searchResult, apiAccess=papiaccess, mode=mode, title=searchResult.title, internal=internal)
    pnzbdl.save()

    return link, papiaccess, pnzbdl
Ejemplo n.º 9
0
def get_nzb_link(indexer_name, guid, title, searchid):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    for p in indexers.enabled_indexers:
        if p.name == indexer_name:
            link = p.get_nzb_link(guid, title)

            # Log to database
            indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower())
            papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=indexer)
            papiaccess.save()
            pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode="redirect")
            pnzbdl.save()

            return link

    else:
        logger.error("Did not find indexer with name %s" % indexer_name)
        return None
Ejemplo n.º 10
0
    def testIndexersApiLimits(self):

        config.settings.searching.generate_queries = []
        self.newznab1.hitLimit = 3
        self.newznab1.hitLimitResetTime = None
        config.settings.indexers = [self.newznab1]
        read_indexers_from_config()
        search_request = SearchRequest()
        indexers = search.pick_indexers(search_request)
        self.assertEqual(1, len(indexers))
        dbsearch = Search(internal=True, time=arrow.utcnow().datetime)
        dbsearch.save()
        indexer = Indexer().get(name="newznab1")
        
        #Two accesses one and 12 hours ago
        IndexerApiAccess(indexer=indexer, search=dbsearch, time=arrow.utcnow().replace(hours=-1).datetime, type="search", url="", response_successful=True).save()
        IndexerApiAccess(indexer=indexer, search=dbsearch, time=arrow.utcnow().replace(hours=-12).datetime, type="search", url="", response_successful=True).save()
        self.assertEqual(1, len(search.pick_indexers(search_request)))

        #Another one 20 hours ago, so limit should be reached
        IndexerApiAccess(indexer=indexer, search=dbsearch, time=arrow.utcnow().replace(hours=-20).datetime, type="search", url="", response_successful=True).save()
        self.assertEqual(0, len(search.pick_indexers(search_request)))
Ejemplo n.º 11
0
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode,
                         log_api_access):
    """
    Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called
    when the NZB will be actually downloaded later (by us or a downloader) 
    :return: str
    """
    for p in indexers.enabled_indexers:
        if p.name.strip() == indexer_name.strip():
            link = p.get_nzb_link(indexerguid, title)

            # Log to database
            indexer = Indexer.get(
                fn.lower(Indexer.name) == indexer_name.lower())
            papiaccess = IndexerApiAccess(
                indexer=p.indexer,
                type="nzb",
                url=link,
                response_successful=None,
                indexer_search=searchid) if log_api_access else None
            try:
                papiaccess.username = request.authorization.username if request.authorization is not None else None
            except RuntimeError:
                pass
            papiaccess.save()
            pnzbdl = IndexerNzbDownload(indexer=indexer,
                                        indexer_search=searchid,
                                        api_access=papiaccess,
                                        mode=mode,
                                        title=title,
                                        guid=indexerguid)
            pnzbdl.save()

            return link, papiaccess, pnzbdl

    else:
        logger.error("Did not find indexer with name %s" % indexer_name)
        return None, None, None
Ejemplo n.º 12
0
def get_indexer_response_times():
    result = []
    for p in Indexer.select().order_by(Indexer.name):
        result.append({
            "key":
            p.name,
            "values": [{
                "responseTime": x.response_time,
                "date": x.time.timestamp
            } for x in IndexerApiAccess().select(
                IndexerApiAccess.response_time, IndexerApiAccess.time).where(
                    (IndexerApiAccess.response_successful)
                    & (IndexerApiAccess.indexer == p)).join(Indexer).limit(1)]
        })
    return result
Ejemplo n.º 13
0
def get_avg_indexer_response_times():
    result = []
    response_times = []
    for p in Indexer.select().order_by(Indexer.name):

        avg_response_time = IndexerApiAccess().select(
            fn.AVG(IndexerApiAccess.response_time)).where(
                (IndexerApiAccess.response_successful)
                & (IndexerApiAccess.indexer == p)).tuples()[0][0]
        if avg_response_time:
            response_times.append({
                "name": p.name,
                "avgResponseTime": avg_response_time
            })
    avg_response_time = IndexerApiAccess().select(
        fn.AVG(IndexerApiAccess.response_time)).where(
            (IndexerApiAccess.response_successful)
            & (IndexerApiAccess.response_time is not None)).tuples()[0][0]
    for i in response_times:
        delta = i["avgResponseTime"] - avg_response_time
        i["delta"] = delta
        result.append(i)

    return result
Ejemplo n.º 14
0
def download_nzb_and_log(indexer_name, provider_guid, title, searchid):
    """
    Gets the NZB link from the indexer using the guid, downloads it and logs the download

    :param indexer_name: name of the indexer
    :param provider_guid: guid to build link
    :param title: the title to build the link
    :param searchid: the id of the IndexerSearch entry so we can link the download to a search
    :return: IndexerNzbDownloadResult
    """
    for p in indexers.enabled_indexers:
        if p.name == indexer_name:

            link = p.get_nzb_link(provider_guid, title)
            indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower())
            psearch = IndexerSearch.get((IndexerSearch.indexer == indexer) & (IndexerSearch.search == searchid))
            papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, indexer_search=psearch)
            papiaccess.save()

            internallink, guid = get_nzb_link_and_guid(indexer_name, provider_guid, searchid, title)
            pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode="serve", title=title, guid=internallink)
            pnzbdl.save()
            try:
                r = p.get(link, timeout=10)
                r.raise_for_status()

                papiaccess.response_successful = True
                papiaccess.response_time = r.elapsed.microseconds / 1000

                return IndexerNzbDownloadResult(content=r.content, headers=r.headers)
            except RequestException as e:
                logger.error("Error while connecting to URL %s: %s" % (link, str(e)))
                papiaccess.error = str(e)
                return None
            finally:
                papiaccess.save()
    else:
        return "Unable to find NZB link"
Ejemplo n.º 15
0
    def get_url_with_papi_access(self, url, type, cookies=None, timeout=None):
        papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime)

        try:
            time_before = arrow.utcnow()
            response = self.get(url, cookies=cookies, timeout=timeout)
            response.raise_for_status()
            time_after = arrow.utcnow()
            papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000)
            papiaccess.response_successful = True
            self.handle_indexer_success()
        except RequestException as e:
            self.logger.error("Error while connecting to URL %s: %s" % (url, str(e)))
            papiaccess.error = "Connection failed: %s" % str(e)
            response = None
            self.handle_indexer_failure("Connection failed: %s" % str(e))
        finally:
            papiaccess.save()
        return response, papiaccess
Ejemplo n.º 16
0
def pick_indexers(search_request, internal=True):
    # type: (nzbhydra.search.SearchRequest, bool) -> List[nzbhydra.search_modules.SearchModule]
    query_supplied = True if search_request.query else False
    queryCanBeGenerated = None  #Store if we can generate a query from IDs. Initiall true but when we need this the first time and query generation fails we set it to false
    picked_indexers = []
    selected_indexers = search_request.indexers.split(
        "|") if search_request.indexers is not None else None
    for p in indexers.enabled_indexers:
        if not p.settings.enabled:
            logger.debug("Did not pick %s because it is disabled" % p)
            continue
        if internal and p.settings.accessType == "external":
            logger.debug(
                "Did not pick %s because it is only enabled for external searches"
                % p)
            continue
        if not internal and p.settings.accessType == "internal":
            logger.debug(
                "Did not pick %s because it is only enabled for internal searches"
                % p)
            continue
        if selected_indexers and p.name not in selected_indexers:
            logger.debug(
                "Did not pick %s because it was not selected by the user" % p)
            continue
        try:
            status = p.indexer.status.get()
            if status.disabled_until > arrow.utcnow(
            ) and not config.settings.searching.ignoreTemporarilyDisabled:
                logger.info(
                    "Did not pick %s because it is disabled temporarily due to an error: %s"
                    % (p, status.reason))
                continue
        except IndexerStatus.DoesNotExist:
            pass

        if p.settings.hitLimit > 0:
            if p.settings.hitLimitResetTime:
                hitLimitResetTime = arrow.get(p.settings.hitLimitResetTime)
                comparisonTime = arrow.now().replace(
                    hour=hitLimitResetTime.hour,
                    minute=hitLimitResetTime.minute,
                    second=0)
                if comparisonTime > arrow.now():
                    comparisonTime = arrow.get(
                        comparisonTime.datetime - timedelta(days=1)
                    )  #Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
            else:
                comparisonTime = arrow.now().replace(hour=0,
                                                     minute=0,
                                                     second=0)

            apiHits = IndexerApiAccess().select().where(
                (IndexerApiAccess.indexer == p.indexer)
                & (IndexerApiAccess.time > comparisonTime)
                & IndexerApiAccess.response_successful).count()
            if apiHits > p.settings.hitLimit:
                logger.info(
                    "Did not pick %s because its API hit limit of %d was reached"
                    % (p, p.settings.hitLimit))
                continue
            else:
                logger.debug(
                    "%s has had %d of a maximum of %d API hits since %02d:%02d"
                    % (p, apiHits, p.settings.hitLimit, comparisonTime.hour,
                       comparisonTime.minute))

        if (query_supplied or search_request.identifier_key
                is not None) and not p.supports_queries:
            logger.debug(
                "Did not pick %s because a query was supplied but the indexer does not support queries"
                % p)
            continue

        # Here on we check if we could supply the indexer with generated/retrieved data like the title of a series
        if not query_supplied and p.needs_queries and search_request.identifier_key is None:
            logger.debug(
                "Did not pick %s because no query was supplied but the indexer needs queries"
                % p)
            continue

        # If we can theoretically do that we must try to actually get the title, otherwise the indexer won't be able to search
        allow_query_generation = (
            config.InternalExternalSelection.internal
            in config.settings.searching.generate_queries
            and internal) or (config.InternalExternalSelection.external
                              in config.settings.searching.generate_queries
                              and not internal)
        if search_request.identifier_key is not None and not canUseIdKey(
                p, search_request.identifier_key):
            if not (allow_query_generation and p.generate_queries):
                logger.debug(
                    "Did not pick %s because search will be done by an identifier and the indexer or system wide settings don't allow query generation"
                    % p)
                continue
            else:
                if queryCanBeGenerated is None:
                    try:
                        title = infos.convertId(
                            search_request.identifier_key, "title",
                            search_request.identifier_value)
                        if title:
                            search_request.title = title
                            queryCanBeGenerated = True
                        else:
                            queryCanBeGenerated = False
                    except:
                        queryCanBeGenerated = False
                        logger.debug(
                            "Unable to get title for supplied ID. Indexers that don't support the ID will be skipped"
                        )
                if not queryCanBeGenerated:
                    logger.debug(
                        "Did not pick %s because search will be done by an identifier and retrieval of the title for query generation failed"
                        % p)
                    continue

        logger.debug("Picked %s" % p)
        picked_indexers.append(p)

    return picked_indexers
Ejemplo n.º 17
0
    def execute_queries(self, queries, searchRequest):
        if len(queries) == 0:
            return QueriesExecutionResult(didsearch=False, results=[], indexerSearchEntry=None, indexerApiAccessEntry=None, indexerStatus=None, total=0, loaded_results=0, total_known=True, has_more=False)
        results = []
        executed_queries = set()
        psearch = IndexerSearch(indexer=self.indexer)
        papiaccess = IndexerApiAccess()
        indexerStatus = None
        #psearch.save()
        total_results = 0
        total_known = False
        has_more = False
        while len(queries) > 0:
            query = queries.pop()
            if query in executed_queries:
                # To make sure that in case an offset is reported wrong or we have a bug we don't get stuck in an endless loop 
                continue

            try:
                request, papiaccess, indexerStatus = self.get_url_with_papi_access(query, "search", saveToDb=False)
                papiaccess.indexer_search = psearch

                executed_queries.add(query)
                #papiaccess.save()

                if request is not None:
                    self.check_auth(request.text)
                    self.debug("Successfully loaded URL %s" % request.url)
                    try:

                        parsed_results = self.process_query_result(request.content, searchRequest)
                        results.extend(parsed_results.entries)  # Retrieve the processed results
                        queries.extend(parsed_results.queries)  # Add queries that were added as a result of the parsing, e.g. when the next result page should also be loaded
                        total_results += parsed_results.total
                        total_known = parsed_results.total_known
                        has_more = parsed_results.has_more

                        papiaccess.response_successful = True
                        self.handle_indexer_success(False)
                    except IndexerResultParsingException as e:
                        self.error("Error while processing search results from indexer %s" % e)
                    except Exception:
                        self.exception("Error while processing search results from indexer %s" % self)
                        raise IndexerResultParsingException("Error while parsing the results from indexer", self)
            except IndexerAuthException as e:
                self.error("Unable to authorize with %s: %s" % (e.search_module, e.message))
                papiaccess.error = "Authorization error :%s" % e.message
                self.handle_indexer_failure(reason="Authentication failed", disable_permanently=True)
                papiaccess.response_successful = False
            except IndexerAccessException as e:
                self.error("Unable to access %s: %s" % (e.search_module, e.message))
                papiaccess.error = "Access error: %s" % e.message
                self.handle_indexer_failure(reason="Access failed")
                papiaccess.response_successful = False
            except IndexerResultParsingException as e:
                papiaccess.exception = "Access error: %s" % e.message
                self.handle_indexer_failure(reason="Parsing results failed")
                papiaccess.response_successful = False
            except Exception as e:
                self.exception("An error error occurred while searching: %s", e)
                if papiaccess is not None:
                    papiaccess.error = "Unknown error :%s" % e
                    papiaccess.response_successful = False
            finally:
                if papiaccess is not None:
                    #papiaccess.save()
                    psearch.successful = papiaccess.response_successful
                else:
                    self.error("Unable to save API response to database")
                psearch.resultsCount = total_results
                #psearch.save()
        return QueriesExecutionResult(didsearch= True, results=results, indexerSearchEntry=psearch, indexerApiAccessEntry=papiaccess, indexerStatus=indexerStatus, total=total_results, loaded_results=len(results), total_known=total_known, has_more=has_more)
Ejemplo n.º 18
0
    def testThatDatabaseValuesAreStored(self):
        with self.app.test_request_context('/'):
            with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
                newznabItems = [
                    [mockbuilder.buildNewznabItem(title="title1", pubdate=arrow.get(1000).format("ddd, DD MMM YYYY HH:mm:ss Z"), size=1000, indexer_name="newznab1")],
                    [mockbuilder.buildNewznabItem(title="title2", pubdate=arrow.get(1000).format("ddd, DD MMM YYYY HH:mm:ss Z"), size=1000, indexer_name="newznab2")]
                ]

                self.prepareSearchMocks(rsps, indexerCount=len(newznabItems), newznabItems=newznabItems)
                # Make the second access unsuccessful
                rsps._urls.pop(1)
                rsps.add(responses.GET, r".*",
                         body="an error message", status=500,
                         content_type='application/x-html')

                searchRequest = SearchRequest(type="search", query="aquery", category="acategory", identifier_key="imdbid", identifier_value="animdbid", season=1, episode=2, indexers="newznab1|newznab2")
                result = search.search(searchRequest)
                results = result["results"]
                self.assertEqual(1, len(results))

                dbSearch = Search().get()
                self.assertEqual(True, dbSearch.internal)
                self.assertEqual("aquery", dbSearch.query)
                self.assertEqual("All", dbSearch.category)
                self.assertEqual("imdbid", dbSearch.identifier_key)
                self.assertEqual("animdbid", dbSearch.identifier_value)
                self.assertEqual("1", dbSearch.season)
                self.assertEqual("2", dbSearch.episode)
                self.assertEqual("search", dbSearch.type)
                self.assertEqual(18, dbSearch.time.hour)

                indexerSearch1 = IndexerSearch.get(IndexerSearch.indexer == Indexer.get(Indexer.name == "newznab1"))
                self.assertEqual(indexerSearch1.search, dbSearch)
                self.assertEqual(18, indexerSearch1.time.hour)

                indexerSearch2 = IndexerSearch.get(IndexerSearch.indexer == Indexer.get(Indexer.name == "newznab2"))
                self.assertEqual(indexerSearch2.search, dbSearch)
                self.assertEqual(18, indexerSearch2.time.hour)

                calledUrls = sorted([x.request.url for x in rsps.calls])

                indexerApiAccess1 = IndexerApiAccess.get(IndexerApiAccess.indexer == Indexer.get(Indexer.name == "newznab1"))
                self.assertEqual(indexerSearch1, indexerApiAccess1.indexer_search)
                self.assertEqual(18, indexerApiAccess1.time.hour)
                self.assertEqual("search", indexerApiAccess1.type)
                self.assertEqual(calledUrls[0], indexerApiAccess1.url)
                self.assertTrue(indexerApiAccess1.response_successful)
                self.assertEqual(0, indexerApiAccess1.response_time)
                self.assertIsNone(indexerApiAccess1.error)

                indexerApiAccess2 = IndexerApiAccess.get(IndexerApiAccess.indexer == Indexer.get(Indexer.name == "newznab2"))
                self.assertEqual(indexerSearch2, indexerApiAccess2.indexer_search)
                self.assertEqual(18, indexerApiAccess2.time.hour)
                self.assertEqual("search", indexerApiAccess2.type)
                self.assertEqual(calledUrls[1], indexerApiAccess2.url)
                self.assertFalse(indexerApiAccess2.response_successful)
                self.assertIsNone(indexerApiAccess2.response_time)
                self.assertTrue("Connection refused" in indexerApiAccess2.error)

                indexerStatus2 = IndexerStatus.get(IndexerStatus.indexer == Indexer.get(Indexer.name == "newznab2"))
                self.assertEqual(1, indexerStatus2.level)
                self.assertTrue("Connection refused" in indexerStatus2.reason)
Ejemplo n.º 19
0
    def execute_queries(self, queries, searchRequest):
        if len(queries) == 0:
            return QueriesExecutionResult(didsearch=False,
                                          results=[],
                                          indexerSearchEntry=None,
                                          indexerApiAccessEntry=None,
                                          indexerStatus=None,
                                          total=0,
                                          loaded_results=0,
                                          total_known=True,
                                          has_more=False,
                                          rejected=self.getRejectedCountDict())
        results = []
        executed_queries = set()
        psearch = IndexerSearch(indexer=self.indexer)
        papiaccess = IndexerApiAccess()
        indexerStatus = None
        total_results = 0
        total_known = False
        has_more = False
        rejected = self.getRejectedCountDict()
        while len(queries) > 0:
            query = queries.pop()
            if query in executed_queries:
                # To make sure that in case an offset is reported wrong or we have a bug we don't get stuck in an endless loop
                continue

            try:
                request, papiaccess, indexerStatus = self.get_url_with_papi_access(
                    query, "search", saveToDb=False)
                papiaccess.indexer_search = psearch

                executed_queries.add(query)

                if request is not None:
                    if request.text == "":
                        raise IndexerResultParsingException(
                            "Indexer returned an empty page", self)
                    self.check_auth(request.text)
                    self.debug("Successfully loaded URL %s" % request.url)
                    try:

                        parsed_results = self.process_query_result(
                            request.content, searchRequest)
                        results.extend(parsed_results.entries
                                       )  # Retrieve the processed results
                        queries.extend(
                            parsed_results.queries
                        )  # Add queries that were added as a result of the parsing, e.g. when the next result page should also be loaded
                        total_results += parsed_results.total
                        total_known = parsed_results.total_known
                        has_more = parsed_results.has_more
                        rejected = parsed_results.rejected

                        papiaccess.response_successful = True
                        indexerStatus = self.handle_indexer_success(False)
                    except Exception:
                        self.exception(
                            "Error while processing search results from indexer %s"
                            % self)
                        raise IndexerResultParsingException(
                            "Error while parsing the results from indexer",
                            self)
            except IndexerAuthException as e:
                papiaccess.error = "Authorization error :%s" % e.message
                self.error(papiaccess.error)
                indexerStatus = self.handle_indexer_failure(
                    reason="Authentication failed", disable_permanently=True)
                papiaccess.response_successful = False
            except IndexerAccessException as e:
                papiaccess.error = "Access error: %s" % e.message
                self.error(papiaccess.error)
                indexerStatus = self.handle_indexer_failure(
                    reason="Access failed")
                papiaccess.response_successful = False
            except IndexerResultParsingException as e:
                papiaccess.error = "Access error: %s" % e.message
                self.error(papiaccess.error)
                indexerStatus = self.handle_indexer_failure(
                    reason="Parsing results failed")
                papiaccess.response_successful = False
            except Exception as e:
                self.exception("An error error occurred while searching: %s",
                               e)
                if papiaccess is not None:
                    papiaccess.error = "Unknown error :%s" % e
                    papiaccess.response_successful = False
            finally:
                if papiaccess is not None:
                    psearch.successful = papiaccess.response_successful
                else:
                    self.error("Unable to save API response to database")
                psearch.resultsCount = total_results
        return QueriesExecutionResult(didsearch=True,
                                      results=results,
                                      indexerSearchEntry=psearch,
                                      indexerApiAccessEntry=papiaccess,
                                      indexerStatus=indexerStatus,
                                      total=total_results,
                                      loaded_results=len(results),
                                      total_known=total_known,
                                      has_more=has_more,
                                      rejected=rejected)
Ejemplo n.º 20
0
def pick_indexers(search_request):
    # type: (nzbhydra.search.SearchRequest, bool) -> List[nzbhydra.search_modules.SearchModule]
    query_supplied = True if search_request.query else False
    queryCanBeGenerated = None  # Store if we can generate a query from IDs. Initiall true but when we need this the first time and query generation fails we set it to false
    picked_indexers = []
    selected_indexers = search_request.indexers.split("|") if search_request.indexers is not None else None
    notPickedReasons = {}

    for p in indexers.enabled_indexers:
        if not p.settings.enabled:
            logger.debug("Did not pick %s because it is disabled" % p)
            add_not_picked_indexer(notPickedReasons, "Disabled", p.name)
            continue
        if search_request.internal and p.settings.accessType == "external":
            logger.debug("Did not pick %s because it is only enabled for external searches" % p)
            add_not_picked_indexer(notPickedReasons, "Disabled for API searches", p.name)
            continue
        if not search_request.internal and p.settings.accessType == "internal":
            logger.debug("Did not pick %s because it is only enabled for internal searches" % p)
            add_not_picked_indexer(notPickedReasons, "Disabled for API searches", p.name)
            continue
        if selected_indexers and p.name not in selected_indexers:
            logger.debug("Did not pick %s because it was not selected by the user" % p)
            add_not_picked_indexer(notPickedReasons, "Not selected by user", p.name)
            continue
        try:
            status = p.indexer.status.get()
            if status.disabled_until and status.disabled_until > arrow.utcnow() and not config.settings.searching.ignoreTemporarilyDisabled:
                logger.info("Did not pick %s because it is disabled temporarily due to an error: %s" % (p, status.reason))
                add_not_picked_indexer(notPickedReasons, "Temporarily disabled", p.name)
                continue
        except IndexerStatus.DoesNotExist:
            pass
        if hasattr(p.settings, "categories") and len(p.settings.categories) > 0:
            if search_request.category.category.name != "all" and search_request.category.category.name not in p.settings.categories:
                logger.debug("Did not pick %s because it is not enabled for category %s" % (p, search_request.category.category.pretty))
                add_not_picked_indexer(notPickedReasons, "Disabled for this category %s" % search_request.category.category.pretty, p.name)
                continue
        if p.settings.hitLimit > 0:
            if p.settings.hitLimitResetTime:
                comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
                if comparisonTime > arrow.utcnow():
                    comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1))  # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
            else:
                # Use rolling time window
                comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))

            apiHits = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful).count()
            if apiHits >= p.settings.hitLimit:
                if p.settings.hitLimitResetTime:
                    logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
                else:
                    try:
                        firstHitTimeInWindow = IndexerApiAccess().select().where(IndexerApiAccess.indexer == p.indexer & IndexerApiAccess.response_successful).order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit).limit(1).get().time.datetime
                        nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
                        logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
                    except IndexerApiAccess.DoesNotExist:
                        logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
                add_not_picked_indexer(notPickedReasons, "API limit reached", p.name)
                continue
            else:
                logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))

        if (query_supplied or search_request.identifier_key is not None) and not p.supports_queries:
            logger.debug("Did not pick %s because a query was supplied but the indexer does not support queries" % p)
            add_not_picked_indexer(notPickedReasons, "Does not support queries", p.name)
            continue

        # Here on we check if we could supply the indexer with generated/retrieved data like the title of a series
        if not query_supplied and p.needs_queries and search_request.identifier_key is None:
            logger.debug("Did not pick %s because no query was supplied but the indexer needs queries" % p)
            add_not_picked_indexer(notPickedReasons, "Query needed", p.name)
            continue

        # If we can theoretically do that we must try to actually get the title, otherwise the indexer won't be able to search 
        allow_query_generation = (config.InternalExternalSelection.internal in config.settings.searching.generate_queries and search_request.internal) or (config.InternalExternalSelection.external in config.settings.searching.generate_queries and not search_request.internal)
        if search_request.identifier_key is not None and not canUseIdKey(p, search_request.identifier_key):
            if not (allow_query_generation and p.generate_queries):
                logger.debug("Did not pick %s because search will be done by an identifier and the indexer or system wide settings don't allow query generation" % p)
                add_not_picked_indexer(notPickedReasons, "Does not support ID based searches", p.name)
                continue
            else:
                if queryCanBeGenerated is None:
                    try:
                        title = infos.convertId(search_request.identifier_key, "title", search_request.identifier_value)
                        if title:
                            search_request.title = title
                            queryCanBeGenerated = True
                        else:
                            queryCanBeGenerated = False
                    except:
                        queryCanBeGenerated = False
                        logger.debug("Unable to get title for supplied ID. Indexers that don't support the ID will be skipped")
                if not queryCanBeGenerated:
                    logger.debug("Did not pick %s because search will be done by an identifier and retrieval of the title for query generation failed" % p)
                    add_not_picked_indexer(notPickedReasons, "Does not support ID based searches", p.name)
                    continue

        logger.debug("Picked %s" % p)
        picked_indexers.append(p)
    if len(picked_indexers) == 0:
        warning = "No indexeres were selected for this search:"
        for reason, notPickedIndexers in notPickedReasons.items():
            warning += "\r\n%s: %s" % (reason, ", ".join(notPickedIndexers))
        logger.warn(warning)

    return picked_indexers
Ejemplo n.º 21
0
    def testThatDatabaseValuesAreStored(self):
        with self.app.test_request_context('/'):
            with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
                newznabItems = [
                    [mockbuilder.buildNewznabItem(title="title1", pubdate=arrow.get(1000).format("ddd, DD MMM YYYY HH:mm:ss Z"), size=1000, indexer_name="newznab1")],
                    [mockbuilder.buildNewznabItem(title="title2", pubdate=arrow.get(1000).format("ddd, DD MMM YYYY HH:mm:ss Z"), size=1000, indexer_name="newznab2")]
                ]

                self.prepareSearchMocks(rsps, indexerCount=len(newznabItems), newznabItems=newznabItems)
                # Make the second access unsuccessful
                rsps._urls.pop(1)
                rsps.add(responses.GET, r".*",
                         body="an error message", status=500,
                         content_type='application/x-html')

                searchRequest = SearchRequest(type="search", query="aquery", category="acategory", identifier_key="imdbid", identifier_value="animdbid", season=1, episode=2, indexers="newznab1|newznab2")
                result = search.search(searchRequest)
                results = result["results"]
                self.assertEqual(1, len(results))

                dbSearch = Search().get()
                self.assertEqual(True, dbSearch.internal)
                self.assertEqual("aquery", dbSearch.query)
                self.assertEqual("All", dbSearch.category)
                self.assertEqual("imdbid", dbSearch.identifier_key)
                self.assertEqual("animdbid", dbSearch.identifier_value)
                self.assertEqual("1", dbSearch.season)
                self.assertEqual("2", dbSearch.episode)
                self.assertEqual("search", dbSearch.type)
                self.assertEqual(18, dbSearch.time.hour)

                indexerSearch1 = IndexerSearch.get(IndexerSearch.indexer == Indexer.get(Indexer.name == "newznab1"))
                self.assertEqual(indexerSearch1.search, dbSearch)
                self.assertEqual(18, indexerSearch1.time.hour)

                indexerSearch2 = IndexerSearch.get(IndexerSearch.indexer == Indexer.get(Indexer.name == "newznab2"))
                self.assertEqual(indexerSearch2.search, dbSearch)
                self.assertEqual(18, indexerSearch2.time.hour)

                calledUrls = sorted([x.request.url for x in rsps.calls])

                indexerApiAccess1 = IndexerApiAccess.get(IndexerApiAccess.indexer == Indexer.get(Indexer.name == "newznab1"))
                self.assertEqual(indexerSearch1, indexerApiAccess1.indexer_search)
                self.assertEqual(18, indexerApiAccess1.time.hour)
                self.assertEqual("search", indexerApiAccess1.type)
                self.assertEqual(calledUrls[0], indexerApiAccess1.url)
                self.assertTrue(indexerApiAccess1.response_successful)
                self.assertEqual(0, indexerApiAccess1.response_time)
                self.assertIsNone(indexerApiAccess1.error)

                indexerApiAccess2 = IndexerApiAccess.get(IndexerApiAccess.indexer == Indexer.get(Indexer.name == "newznab2"))
                self.assertEqual(indexerSearch2, indexerApiAccess2.indexer_search)
                self.assertEqual(18, indexerApiAccess2.time.hour)
                self.assertEqual("search", indexerApiAccess2.type)
                self.assertEqual(calledUrls[1], indexerApiAccess2.url)
                self.assertFalse(indexerApiAccess2.response_successful)
                self.assertIsNone(indexerApiAccess2.response_time)
                self.assertTrue("Connection refused" in indexerApiAccess2.error)

                indexerStatus2 = IndexerStatus.get(IndexerStatus.indexer == Indexer.get(Indexer.name == "newznab2"))
                self.assertEqual(1, indexerStatus2.level)
                self.assertTrue("Connection refused" in indexerStatus2.reason)