def _checkAuth(self, data): try: responseSoup = etree.ElementTree(etree.XML(data)) except Exception: return True if responseSoup.getroot().tag == 'error': code = responseSoup.getroot().get('code') if code == '100': raise exceptions.AuthException( "Your API key for " + self.provider.name + " is incorrect, check your config.") elif code == '101': raise exceptions.AuthException( "Your account on " + self.provider.name + " has been suspended, contact the administrator.") elif code == '102': raise exceptions.AuthException( "Your account isn't allowed to use the API on " + self.provider.name + ", contact the administrator") else: logger.log( u"Unknown error given from " + self.provider.name + ": " + responseSoup.getroot().get('description'), logger.ERROR) return False return True
def _checkAuthFromData(self, data): try: parsedXML = parseString(data) except Exception: return False if parsedXML.documentElement.tagName == 'error': code = parsedXML.documentElement.getAttribute('code') if code == '100': raise exceptions.AuthException( "Your API key for " + self.name + " is incorrect, check your config.") elif code == '101': raise exceptions.AuthException( "Your account on " + self.name + " has been suspended, contact the administrator.") elif code == '102': raise exceptions.AuthException( "Your account isn't allowed to use the API on " + self.name + ", contact the administrator") else: logger.log( u"Unknown error given from " + self.name + ": " + parsedXML.documentElement.getAttribute('description'), logger.ERROR) return False return True
class NewznabCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll newznab providers every 15 minutes max self.minTime = 15 def _getRSSData(self): params = { "t": "tvsearch", "age": sickbeard.USENET_RETENTION, "cat": '5040,5030' } if self.provider.key: params['apikey'] = self.provider.key url = self.provider.url + 'api?' + urllib.urlencode(params) logger.log(self.provider.name + " cache update URL: " + url, logger.DEBUG) data = self.provider.getURL(url) # hack this in until it's fixed server side if data and not data.startswith('<?xml'): data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data return data def _checkAuth(self, data): try: responseSoup = etree.ElementTree(etree.XML(data)) except Exception, e: return True if responseSoup.getroot().tag == 'error': code = responseSoup.getroot().get('code') if code == '100': raise exceptions.AuthException( "Your API key for " + self.provider.name + " is incorrect, check your config.") elif code == '101': raise exceptions.AuthException( "Your account on " + self.provider.name + " has been suspended, contact the administrator.") elif code == '102': raise exceptions.AuthException( "Your account isn't allowed to use the API on " + self.provider.name + ", contact the administrator") else: logger.log( u"Unknown error given from " + self.provider.name + ": " + responseSoup.getroot().get('description'), logger.ERROR) return False return True
def http_error_default(self, url, fp, errcode, errmsg, headers): # if newzbin is throttling us, wait seconds and try again if errcode == 400: newzbinErrCode = int(headers.getheader('X-DNZB-RCode')) if newzbinErrCode == 450: rtext = str(headers.getheader('X-DNZB-RText')) result = re.search("wait (\d+) seconds", rtext) elif newzbinErrCode == 401: raise exceptions.AuthException( "Newzbin username or password incorrect") elif newzbinErrCode == 402: raise exceptions.AuthException( "Newzbin account not premium status, can't download NZBs") logger.log("Newzbin throttled our NZB downloading, pausing for " + result.group(1) + "seconds") time.sleep(int(result.group(1))) raise exceptions.NewzbinAPIThrottled()
def updateCache(self): if not self.shouldUpdate(): return data = self._getRSSData() # as long as the http request worked we count this as an update if data: self.setLastUpdate() else: return [] # now that we've loaded the current RSS feed lets delete the old cache logger.log(u"Clearing " + self.provider.name + " cache and updating with new information") self._clearCache() if not self._checkAuth(data): raise exceptions.AuthException("Your authentication info for " + self.provider.name + " is incorrect, check your config") try: responseSoup = etree.ElementTree(etree.XML(data)) items = responseSoup.getiterator('item') except Exception, e: logger.log( u"Error trying to load " + self.provider.name + " RSS feed: " + str(e).decode('utf-8'), logger.ERROR) logger.log(u"Feed contents: " + repr(data), logger.DEBUG) return []
def updateCache(self): if not self.shouldUpdate(): return data = self._getRSSData() # as long as the http request worked we count this as an update if data: self.setLastUpdate() else: return [] # now that we've loaded the current RSS feed lets delete the old cache logger.log(u"Clearing " + self.provider.name + " cache and updating with new information") self._clearCache() if not self._checkAuth(data): raise exceptions.AuthException("Your authentication info for " + self.provider.name + " is incorrect, check your config") try: parsedJSON = json.loads(data) items = parsedJSON['data'] except Exception, e: logger.log(u"Error trying to load " + self.provider.name + " RSS feed: " + ex(e), logger.ERROR) logger.log(u"Feed contents: " + repr(data), logger.DEBUG) return []
def _checkAuth(self): if sickbeard.NZBSRUS_UID in (None, "") or sickbeard.NZBSRUS_HASH in (None, ""): raise exceptions.AuthException( "NZBs'R'US authentication details are empty, check your config" )
def _doSearch(self, search, show=None, retention=0): params = {'user': sickbeard.OMGWTFNZBS_UID, 'api': sickbeard.OMGWTFNZBS_KEY, 'eng': 1, 'catid': '19,20', # SD,HD 'retention': sickbeard.USENET_RETENTION, 'search': search} if retention or not params['retention']: params['retention'] = retention url = 'https://api.omgwtfnzbs.org/json?' + urllib.urlencode(params) logger.log(u"omgwtfnzbs search url: " + url, logger.DEBUG) data = self.getURL(url) if not data: logger.log(u"omgwtfnzbs returned no json data", logger.DEBUG) return[] try: items = json.loads(data) except ValueError: logger.log(u"Error trying to decode omgwtfnzbs json response", logger.ERROR) return [] results = [] if 'notice' in items: if 'api information is incorrect' in items.get('notice'): raise exceptions.AuthException("omgwtfnzbs authentication details are incorrect") else: logger.log(u"omgwtfnzbs notice: " + items.get('notice'), logger.DEBUG) else: for item in items: if 'release' in item and 'getnzb' in item: results.append(item) return results
def _getRSSData(self): if not sickbeard.TORRENTBYTES_URL: raise exceptions.AuthException("TorrentBytes requires an RSS URL to work correctly") url = sickbeard.TORRENTBYTES_URL logger.log(u"TorrentBytes cache update URL: " + url, logger.DEBUG) data = self.provider.getURL(url) return data
def _getRSSData(self): if not sickbeard.TORRENTLEECH_KEY: raise exceptions.AuthException( "TorrentLeech requires an API key to work correctly") url = 'http://rss.torrentleech.org/' + sickbeard.TORRENTLEECH_KEY logger.log(u"TorrentLeech cache update URL: " + url, logger.DEBUG) data = self.provider.getURL(url) return data
def updateCache(self): if not self.shouldUpdate(): return if self._checkAuth(None): data = self._getRSSData() # As long as we got something from the provider we count it as an update if data: self.setLastUpdate() else: return [] logger.log(u"Clearing " + self.provider.name + " cache and updating with new information") self._clearCache() parsedJSON = helpers.parse_json(data) if parsedJSON is None: logger.log( u"Error trying to load " + self.provider.name + " JSON feed", logger.ERROR) return [] if self._checkAuth(parsedJSON): if parsedJSON and 'data' in parsedJSON: items = parsedJSON['data'] else: logger.log( u"Resulting JSON from " + self.provider.name + " isn't correct, not parsing it", logger.ERROR) return [] cl = [] for item in items: ci = self._parseItem(item) if ci is not None: cl.append(ci) if len(cl) > 0: myDB = self._getDB() myDB.mass_action(cl) else: raise exceptions.AuthException( "Your authentication info for " + self.provider.name + " is incorrect, check your config") else: return []
def _doSearch(self, curString, show=None, age=None): curString = curString.replace('.', ' ') params = { "action": "search", "q": curString.encode('utf-8'), "dl": 1, "i": sickbeard.NZBS_UID, "h": sickbeard.NZBS_HASH, "age": sickbeard.USENET_RETENTION, "num": 100, "type": 1 } searchURL = self.url + "rss.php?" + urllib.urlencode(params) logger.log(u"Search string: " + searchURL, logger.DEBUG) data = self.getRSSFeed(searchURL) # Pause to avoid 503's time.sleep(5) if data == None: logger.log(u"Error trying to load NZBs.org RSS feed: " + searchURL, logger.ERROR) return [] items = data.entries results = [] for curItem in items: (title, url) = self._get_title_and_url(curItem) if not title or not url: logger.log( u"The XML returned from the NZBs.org RSS feed is incomplete, this result is unusable: " + data, logger.ERROR) continue if "&i=" not in url and "&h=" not in url: raise exceptions.AuthException( "The NZBs.org result URL has no auth info which means your UID/hash are incorrect, check your config" ) results.append(curItem) return results
def _parseItem(self, item): description = helpers.get_xml_text(item.getElementsByTagName('description')[0]) if "wrong passkey or username" in description: raise exceptions.AuthException("TorrentBytes URL invalid") (title, url) = self.provider._get_title_and_url(item) if not title or not url: logger.log(u"The XML returned from the Torrentbytes RSS feed is incomplete, this result is unusable", logger.ERROR) return logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG) self._addCacheEntry(title, url)
def _parseItem(self, item): (title, url) = self.provider._get_title_and_url(item) if title == 'Feeds Error': logger.log("There's an error in the feed, probably bad auth info", logger.DEBUG) raise exceptions.AuthException("Invalid Newzbin username/password") if not title or not url: logger.log("The XML returned from the "+self.provider.name+" feed is incomplete, this result is unusable", logger.ERROR) return quality = self.provider.getQuality(item) logger.log("Found quality "+str(quality), logger.DEBUG) logger.log("Adding item from RSS to cache: "+title, logger.DEBUG) self._addCacheEntry(title, url, quality=quality)
def _parseItem(self, item): (title, url) = self.provider._get_title_and_url(item) if title == 'Feeds Error': logger.log("There's an error in the feed, probably bad auth info", logger.DEBUG) raise exceptions.AuthException("Invalid Newzbin username/password") if not title or not url: logger.log( "The XML returned from the " + self.provider.name + " feed is incomplete, this result is unusable", logger.ERROR) return logger.log( u"RSS Feed provider: [" + self.provider.name + "] Attempting to add item to cache: " + title, logger.DEBUG) self._addCacheEntry(title, url)
def _parseItem(self, item): description = helpers.get_xml_text( item.getElementsByTagName('description')[0]) if "Your RSS key is invalid" in description: raise exceptions.AuthException("TorrentLeech key invalid") (title, url) = self.provider._get_title_and_url(item) # torrentleech converts dots to spaces, undo this title = title.replace(' ', '.') if not title or not url: logger.log( u"The XML returned from the TorrentLeech RSS feed is incomplete, this result is unusable", logger.ERROR) return logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG) self._addCacheEntry(title, url)
def updateCache(self): if not self.shouldUpdate(): return # get all records since the last timestamp # if not sickbeard.NZBTO_USER or not sickbeard.NZBTO_PASS: raise exceptions.AuthException("nzb.to authentication details are empty, check your config") else: #if user and pass are ok, log us in self.provider.proxy = sickbeard.NZBTO_PROXY self.provider.session.post("http://nzb.to/login.php", data={"action": "login", "username": sickbeard.NZBTO_USER, "password": sickbeard.NZBTO_PASS, "bind_ip": "on", "Submit": ".%3AEinloggen%3A.", "ret_url": ""}) url = "http://nzb.to/?p=list&cat=13&sa_Video-Genre=3221225407&sort=post_date&order=desc&amount=100" urlArgs = {'q': '', "rpp": 50, #max 50 "ns": 1, #nospam "szs":16, #min 100MB "sp":1 #nopass } #url += urllib.urlencode(urlArgs) logger.log(u"NZBto cache update URL: "+ url, logger.DEBUG) data = self.provider._doSearch("cache") if not data: return #logger.log(u"{0}".format(data)) self.setLastUpdate() # now that we've got the latest releases lets delete the old cache logger.log(u"Clearing nzb.to cache and updating with new information") self._clearCache() for item in data: self._parseItem(item)
def _parseItem(self, item): if item.findtext('title') != None and item.findtext( 'title') == "You must be logged in to view this feed": raise exceptions.AuthException( "TVBinz authentication details are incorrect, check your config" ) if item.findtext('title') == None or item.findtext('link') == None: logger.log( u"The XML returned from the TVBinz RSS feed is incomplete, this result is unusable: " + str(item), logger.ERROR) return title = item.findtext('title') url = item.findtext('link').replace('&', '&') sInfo = item.find('{http://tvbinz.net/rss/tvb/}seriesInfo') if sInfo == None: logger.log( u"No series info, this is some kind of non-standard release, ignoring it", logger.DEBUG) return logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG) quality = Quality.nameQuality(title) if sInfo.findtext('{http://tvbinz.net/rss/tvb/}tvrID') == None: tvrid = 0 else: tvrid = int(sInfo.findtext('{http://tvbinz.net/rss/tvb/}tvrID')) # since TVBinz normalizes the scene names it's more reliable to parse the episodes out myself # than to rely on it, because it doesn't support multi-episode numbers in the feed self._addCacheEntry(title, url, tvrage_id=tvrid, quality=quality)
class NZBsProvider(generic.NZBProvider): def __init__(self): generic.NZBProvider.__init__(self, "NZBs.org Old") self.supportsBacklog = True self.cache = NZBsCache(self) self.url = 'https://secure.nzbs.org/' def isEnabled(self): return sickbeard.NZBS def _checkAuth(self): if sickbeard.NZBS_UID in (None, "") or sickbeard.NZBS_HASH in (None, ""): raise exceptions.AuthException( "NZBs.org authentication details are empty, check your config") def _get_season_search_strings(self, show, season, scene=False): return [ '^' + x for x in show_name_helpers.makeSceneSeasonSearchString( show, season, scene=scene) ] def _get_episode_search_strings(self, ep_obj): return [ '^' + x for x in show_name_helpers.makeSceneSearchString(ep_obj) ] def _doSearch(self, curString, show=None): curString = curString.replace('.', ' ') params = { "action": "search", "q": curString.encode('utf-8'), "dl": 1, "i": sickbeard.NZBS_UID, "h": sickbeard.NZBS_HASH, "age": sickbeard.USENET_RETENTION, "num": 100, "type": 1 } searchURL = self.url + "rss.php?" + urllib.urlencode(params) logger.log(u"Search string: " + searchURL, logger.DEBUG) data = self.getURL(searchURL) # Pause to avoid 503's time.sleep(5) if data == None: return [] try: parsedXML = parseString(data) items = parsedXML.getElementsByTagName('item') except Exception, e: logger.log(u"Error trying to load NZBs.org RSS feed: " + ex(e), logger.ERROR) return [] results = [] for curItem in items: (title, url) = self._get_title_and_url(curItem) if not title or not url: logger.log( u"The XML returned from the NZBs.org RSS feed is incomplete, this result is unusable: " + data, logger.ERROR) continue if "&i=" not in url and "&h=" not in url: raise exceptions.AuthException( "The NZBs.org result URL has no auth info which means your UID/hash are incorrect, check your config" ) results.append(curItem) return results
def _checkItemAuth(self, title, url): if "&i=" not in url and "&h=" not in url: raise exceptions.AuthException( "The NZBs.org result URL has no auth info which means your UID/hash are incorrect, check your config" )
def _checkAuth(self): if sickbeard.BINSEARCH_MAX in (None, "") or sickbeard.BINSEARCH_ALT in (None, ""): raise exceptions.AuthException( "binsearch parameters details are empty, check your config")
def _checkAuth(self): if sickbeard.NEWZBIN_USERNAME in ( None, "") or sickbeard.NEWZBIN_PASSWORD in (None, ""): raise exceptions.AuthException( "Newzbin authentication details are empty, check your config")
def _checkAuth(self): if not sickbeard.NZBTO_USER or not sickbeard.NZBTO_PASS: raise exceptions.AuthException("nzb.to authentication details are empty, check your config") else: self.proxy = sickbeard.NZBTO_PROXY
def _checkAuth(self): if sickbeard.TVBINZ_UID in (None, "") or sickbeard.TVBINZ_HASH in ( None, "") or sickbeard.TVBINZ_AUTH in (None, ""): raise exceptions.AuthException( "TVBinz authentication details are empty, check your config")
def _checkAuth(self): if not sickbeard.OMGWTFNZBS_UID or not sickbeard.OMGWTFNZBS_KEY: raise exceptions.AuthException( "omgwtfnzbs authentication details are empty, check your config" )
item_list = [] try: responseSoup = etree.ElementTree(etree.XML(data)) items = responseSoup.getiterator('item') except Exception, e: logger.log("Error trying to load Newzbin RSS feed: " + ex(e), logger.ERROR) return [] for cur_item in items: title = cur_item.findtext('title') if title == 'Feed Error': raise exceptions.AuthException( "The feed wouldn't load, probably because of invalid auth info" ) item_list.append(cur_item) return item_list def _getRSSData(self, search=None): params = { 'searchaction': 'Search', 'fpn': 'p', 'category': 8, 'u_nfo_posts_only': 0, 'u_url_posts_only': 0, 'u_comment_posts_only': 0,
def findEpisode(episode, forceQuality=None): if episode.status == DISCBACKLOG: logger.log( "NZBMatrix doesn't support disc backlog. Use newzbin or download it manually from NZBMatrix" ) return [] logger.log("Searching NZBMatrix for " + episode.prettyName()) if forceQuality != None: epQuality = forceQuality elif episode.show.quality == BEST: epQuality = ANY else: epQuality = episode.show.quality if epQuality == SD: quality = {"catid": 6} elif epQuality == HD: quality = {"catid": 41} else: quality = {} sceneSearchStrings = sickbeard.helpers.makeSceneSearchString(episode) results = [] for curString in sceneSearchStrings: params = { "search": curString.replace(".", " ").encode('utf-8'), "age": sickbeard.USENET_RETENTION, "username": sickbeard.NZBMATRIX_USERNAME, "apikey": sickbeard.NZBMATRIX_APIKEY } params.update(quality) searchURL = "https://nzbmatrix.com/api-nzb-search.php?" + urllib.urlencode( params) logger.log("Search string: " + searchURL, logger.DEBUG) f = urllib.urlopen(searchURL) searchResult = "".join(f.readlines()) f.close() if searchResult.startswith("error:"): err = searchResult.split(":")[1] if err == "nothing_found": continue elif err == "invalid_login" or err == "invalid_api": raise exceptions.AuthException( "NZBMatrix username or API key is incorrect") logger.log("An error was encountered during the search: " + err, logger.ERROR) for curResult in searchResult.split("|"): resultDict = {} lines = curResult.split("\n") for info in lines: curInfo = info.strip(";").split(":") if len(curInfo) != 2: continue resultDict[curInfo[0]] = curInfo[1] if len(resultDict) == 0: continue if epQuality == HD and "720p" not in resultDict["NZBNAME"]: logger.log( "Ignoring result " + resultDict["NZBNAME"] + " because it doesn't contain 720p in the name", logger.DEBUG) continue result = sickbeard.classes.NZBSearchResult(episode) result.provider = NZBMATRIX result.url = "http://nzbmatrix.com/api-nzb-download.php?id=" + resultDict[ "NZBID"] + "&username="******"&apikey=" + sickbeard.NZBMATRIX_APIKEY result.extraInfo = [resultDict["NZBNAME"]] result.quality = epQuality results.append(result) return results
class NewznabProvider(generic.NZBProvider): def __init__(self, name, url, key=''): generic.NZBProvider.__init__(self, name) self.cache = NewznabCache(self) self.url = url self.key = key # if a provider doesn't need an api key then this can be false self.needs_auth = True self.enabled = True self.supportsBacklog = True self.default = False def configStr(self): return self.name + '|' + self.url + '|' + self.key + '|' + str(int(self.enabled)) def imageName(self): if ek.ek(os.path.isfile, ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', 'providers', self.getID()+'.gif')): return self.getID()+'.gif' return 'newznab.gif' def isEnabled(self): return self.enabled def _get_season_search_strings(self, show, season=None): if not show: return [{}] to_return = [] # add new query strings for exceptions name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid) + [show.name] for cur_exception in name_exceptions: cur_params = {} # search directly by tvrage id if show.tvrid: cur_params['rid'] = show.tvrid # if we can't then fall back on a very basic name search else: cur_params['q'] = sanitizeSceneName(cur_exception) if season != None: # air-by-date means &season=2010&q=2010.03, no other way to do it atm if show.air_by_date: cur_params['season'] = season.split('-')[0] if 'q' in cur_params: cur_params['q'] += '.' + season.replace('-', '.') else: cur_params['q'] = season.replace('-', '.') else: cur_params['season'] = season # hack to only add a single result if it's a rageid search if not ('rid' in cur_params and to_return): to_return.append(cur_params) return to_return def _get_episode_search_strings(self, ep_obj): params = {} if not ep_obj: return [params] # search directly by tvrage id if ep_obj.show.tvrid: params['rid'] = ep_obj.show.tvrid # if we can't then fall back on a very basic name search else: params['q'] = sanitizeSceneName(ep_obj.show.name) if ep_obj.show.air_by_date: date_str = str(ep_obj.airdate) params['season'] = date_str.partition('-')[0] params['ep'] = date_str.partition('-')[2].replace('-','/') else: params['season'] = ep_obj.season params['ep'] = ep_obj.episode to_return = [params] # only do exceptions if we are searching by name if 'q' in params: # add new query strings for exceptions name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid) for cur_exception in name_exceptions: # don't add duplicates if cur_exception == ep_obj.show.name: continue cur_return = params.copy() cur_return['q'] = sanitizeSceneName(cur_exception) to_return.append(cur_return) return to_return def _doGeneralSearch(self, search_string): return self._doSearch({'q': search_string}) #def _doSearch(self, show, season=None, episode=None, search=None): def _doSearch(self, search_params, show=None): params = {"t": "tvsearch", "maxage": sickbeard.USENET_RETENTION, "limit": 100, "cat": '5030,5040'} if search_params: params.update(search_params) if self.key: params['apikey'] = self.key searchURL = self.url + 'api?' + urllib.urlencode(params) logger.log(u"Search url: " + searchURL, logger.DEBUG) data = self.getURL(searchURL) if not data: return [] # hack this in until it's fixed server side if not data.startswith('<?xml'): data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data try: responseSoup = etree.ElementTree(etree.XML(data)) items = responseSoup.getiterator('item') except Exception, e: logger.log(u"Error trying to load "+self.name+" RSS feed: "+ex(e), logger.ERROR) logger.log(u"RSS data: "+data, logger.DEBUG) return [] if responseSoup.getroot().tag == 'error': code = responseSoup.getroot().get('code') if code == '100': raise exceptions.AuthException("Your API key for "+self.name+" is incorrect, check your config.") elif code == '101': raise exceptions.AuthException("Your account on "+self.name+" has been suspended, contact the administrator.") elif code == '102': raise exceptions.AuthException("Your account isn't allowed to use the API on "+self.name+", contact the administrator") else: logger.log(u"Unknown error given from "+self.name+": "+responseSoup.getroot().get('description'), logger.ERROR) return [] if responseSoup.getroot().tag != 'rss': logger.log(u"Resulting XML from "+self.name+" isn't RSS, not parsing it", logger.ERROR) return [] results = [] for curItem in items: title = curItem.findtext('title') url = curItem.findtext('link') if not title or not url: logger.log(u"The XML returned from the "+self.name+" RSS feed is incomplete, this result is unusable: "+data, logger.ERROR) continue url = url.replace('&','&') results.append(curItem) return results