def get_url_with_papi_access(self, url, type, cookies=None, timeout=None, saveToDb=True): papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime) try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: #Is thrown when we're searching which is run in a thread. When downloading NFOs or whatever this will work pass indexerStatus = None try: time_before = arrow.utcnow() response = self.get(url, cookies=cookies, timeout=timeout) response.raise_for_status() time_after = arrow.utcnow() papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000) papiaccess.response_successful = True indexerStatus = self.handle_indexer_success(saveIndexerStatus=saveToDb) except RequestException as e: self.error("Error while connecting to URL %s: %s" % (url, str(e))) papiaccess.error = "Connection failed: %s" % removeSensitiveData(str(e)) response = None indexerStatus = self.handle_indexer_failure("Connection failed: %s" % removeSensitiveData(str(e)), saveIndexerStatus=saveToDb) finally: if saveToDb: papiaccess.save() return response, papiaccess, indexerStatus
def get_url_with_papi_access(self, url, type, cookies=None, timeout=None, saveToDb=True): papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime) try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: #Is thrown when we're searching which is run in a thread. When downloading NFOs or whatever this will work pass indexerStatus = None try: time_before = arrow.utcnow() response = self.get(url, cookies=cookies, timeout=timeout) response.raise_for_status() time_after = arrow.utcnow() papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000) papiaccess.response_successful = True indexerStatus = self.handle_indexer_success(saveIndexerStatus=saveToDb) except RequestException as e: self.logger.error("Error while connecting to URL %s: %s" % (url, str(e))) papiaccess.error = "Connection failed: %s" % removeSensitiveData(str(e)) response = None indexerStatus = self.handle_indexer_failure("Connection failed: %s" % removeSensitiveData(str(e)), saveIndexerStatus=saveToDb) finally: if saveToDb: papiaccess.save() return response, papiaccess, indexerStatus
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ searchResult = SearchResult.get(SearchResult.id == searchResultId) indexerName = searchResult.indexer.name indexer = indexers.getIndexerByName(indexerName) link = searchResult.link # Log to database papiaccess = IndexerApiAccess( indexer=indexer.indexer, type="nzb", url=link, response_successful=None) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(searchResult=searchResult, apiAccess=papiaccess, mode=mode, title=searchResult.title, internal=internal) pnzbdl.save() return link, papiaccess, pnzbdl
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode, log_api_access): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ for p in indexers.enabled_indexers: if p.name.strip() == indexer_name.strip(): link = p.get_nzb_link(indexerguid, title) # Log to database indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower()) papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=searchid) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode=mode, title=title, guid=indexerguid) pnzbdl.save() return link, papiaccess, pnzbdl else: logger.error("Did not find indexer with name %s" % indexer_name) return None, None, None
def get_url_with_papi_access(self, url, type, cookies=None, timeout=None): papiaccess = IndexerApiAccess(indexer=self.indexer, type=type, url=url, time=arrow.utcnow().datetime) try: time_before = arrow.utcnow() response = self.get(url, cookies=cookies, timeout=timeout) response.raise_for_status() time_after = arrow.utcnow() papiaccess.response_time = (time_after - time_before).seconds * 1000 + ((time_after - time_before).microseconds / 1000) papiaccess.response_successful = True self.handle_indexer_success() except RequestException as e: self.logger.error("Error while connecting to URL %s: %s" % (url, str(e))) papiaccess.error = "Connection failed: %s" % str(e) response = None self.handle_indexer_failure("Connection failed: %s" % str(e)) finally: papiaccess.save() return response, papiaccess
def get_indexer_nzb_link(searchResultId, mode, log_api_access, internal=False): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ searchResult = SearchResult.get(SearchResult.id == searchResultId) indexerName = searchResult.indexer.name indexer = indexers.getIndexerByName(indexerName) link = searchResult.link # Log to database papiaccess = IndexerApiAccess(indexer=indexer.indexer, type="nzb", url=link, response_successful=None) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(searchResult=searchResult, apiAccess=papiaccess, mode=mode, title=searchResult.title, internal=internal) pnzbdl.save() return link, papiaccess, pnzbdl
def get_nzb_link(indexer_name, guid, title, searchid): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ for p in indexers.enabled_indexers: if p.name == indexer_name: link = p.get_nzb_link(guid, title) # Log to database indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower()) papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=indexer) papiaccess.save() pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode="redirect") pnzbdl.save() return link else: logger.error("Did not find indexer with name %s" % indexer_name) return None
def download_nzb_and_log(indexer_name, provider_guid, title, searchid): """ Gets the NZB link from the indexer using the guid, downloads it and logs the download :param indexer_name: name of the indexer :param provider_guid: guid to build link :param title: the title to build the link :param searchid: the id of the IndexerSearch entry so we can link the download to a search :return: IndexerNzbDownloadResult """ for p in indexers.enabled_indexers: if p.name == indexer_name: link = p.get_nzb_link(provider_guid, title) indexer = Indexer.get(fn.lower(Indexer.name) == indexer_name.lower()) psearch = IndexerSearch.get((IndexerSearch.indexer == indexer) & (IndexerSearch.search == searchid)) papiaccess = IndexerApiAccess(indexer=p.indexer, type="nzb", url=link, indexer_search=psearch) papiaccess.save() internallink, guid = get_nzb_link_and_guid(indexer_name, provider_guid, searchid, title) pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode="serve", title=title, guid=internallink) pnzbdl.save() try: r = p.get(link, timeout=10) r.raise_for_status() papiaccess.response_successful = True papiaccess.response_time = r.elapsed.microseconds / 1000 return IndexerNzbDownloadResult(content=r.content, headers=r.headers) except RequestException as e: logger.error("Error while connecting to URL %s: %s" % (link, str(e))) papiaccess.error = str(e) return None finally: papiaccess.save() else: return "Unable to find NZB link"
def get_indexer_nzb_link(indexer_name, indexerguid, title, searchid, mode, log_api_access): """ Build a link that leads to the actual NZB of the indexer using the given informations. We log this as indexer API access and NZB download because this is only called when the NZB will be actually downloaded later (by us or a downloader) :return: str """ for p in indexers.enabled_indexers: if p.name.strip() == indexer_name.strip(): link = p.get_nzb_link(indexerguid, title) # Log to database indexer = Indexer.get( fn.lower(Indexer.name) == indexer_name.lower()) papiaccess = IndexerApiAccess( indexer=p.indexer, type="nzb", url=link, response_successful=None, indexer_search=searchid) if log_api_access else None try: papiaccess.username = request.authorization.username if request.authorization is not None else None except RuntimeError: pass papiaccess.save() pnzbdl = IndexerNzbDownload(indexer=indexer, indexer_search=searchid, api_access=papiaccess, mode=mode, title=title, guid=indexerguid) pnzbdl.save() return link, papiaccess, pnzbdl else: logger.error("Did not find indexer with name %s" % indexer_name) return None, None, None