def download(self, data={}, movie={}, filedata=None): log.info('Sending "%s" to SABnzbd.', data.get('name')) params = { 'apikey': self.conf('api_key'), 'cat': self.conf('category'), 'mode': 'addurl', 'nzbname': self.createNzbName(data, movie), } if filedata: if len(filedata) < 50: log.error('No proper nzb available: %s', (filedata)) return False # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb nzb_filename = self.createFileName(data, filedata, movie) params['mode'] = 'addfile' else: params['name'] = data.get('url') url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(params) try: if params.get('mode') is 'addfile': sab = self.urlopen( url, timeout=60, params={'nzbfile': (ss(nzb_filename), filedata)}, multipart=True, show_error=False, headers={'User-Agent': Env.getIdentifier()}) else: sab = self.urlopen(url, timeout=60, show_error=False, headers={'User-Agent': Env.getIdentifier()}) except URLError: log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0)) return False except: log.error( 'Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0)) return False result = sab.strip() if not result: log.error('SABnzbd didn\'t return anything.') return False log.debug('Result text from SAB: %s', result[:40]) if result[:2] == 'ok': log.info('NZB sent to SAB successfully.') return True else: log.error(result[:40]) return False
def call(self, request_params, use_json=True, **kwargs): url = cleanHost(self.conf('host'), ssl=self.conf('ssl')) + 'api?' + tryUrlencode( mergeDicts(request_params, { 'apikey': self.conf('api_key'), 'output': 'json' })) data = self.urlopen(url, timeout=60, show_error=False, verify_ssl=False, headers={'User-Agent': Env.getIdentifier()}, **kwargs) if use_json: d = json.loads(data) if d.get('error'): log.error('Error getting data from SABNZBd: %s', d.get('error')) return {} return d.get(request_params['mode']) or d else: return data
def download(self, url='', nzb_id=''): host = urlparse(url).hostname if self.limits_reached.get(host): # Try again in 3 hours if self.limits_reached[host] > time.time() - 10800: return 'try_next' try: data = self.urlopen(url, show_error=False, headers={'User-Agent': Env.getIdentifier()}) self.limits_reached[host] = False return data except HTTPError as e: sc = e.response.status_code if sc in [503, 429]: response = e.read().lower() if sc == 429 or 'maximum api' in response or 'download limit' in response: if not self.limits_reached.get(host): log.error( 'Limit reached / to many requests for newznab provider: %s', host) self.limits_reached[host] = time.time() return 'try_next' log.error('Failed download from %s: %s', (host, traceback.format_exc())) return 'try_next'
def download(self, url = '', nzb_id = ''): host = urlparse(url).hostname if self.limits_reached.get(host): # Try again in 3 hours if self.limits_reached[host] > time.time() - 10800: return 'try_next' try: data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) self.limits_reached[host] = False return data except HTTPError as e: sc = e.response.status_code if sc in [503, 429]: response = e.read().lower() if sc == 429 or 'maximum api' in response or 'download limit' in response: if not self.limits_reached.get(host): log.error('Limit reached / to many requests for newznab provider: %s', host) self.limits_reached[host] = time.time() return 'try_next' log.error('Failed download from %s: %s', (host, traceback.format_exc())) return 'try_next'
def download(self, url = '', nzb_id = ''): try: return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False) except: log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) return 'try_next'
def download(self, url="", nzb_id=""): try: return self.urlopen(url, headers={"User-Agent": Env.getIdentifier()}, show_error=False) except: log.error("Failed getting nzb from %s: %s", (self.getName(), traceback.format_exc())) return "try_next"
def download(self, url = '', nzb_id = ''): try: return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False) except: log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) return 'try_next'
def _searchOnHost(self, host, movie, quality, results): arguments = tryUrlencode({ 'imdbid': movie['library']['identifier'].replace('tt', ''), 'apikey': host['api_key'], 'extended': 1 }) url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments) nzbs = self.getRSSData(url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None for item in nzb: if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break if not date: date = self.getTextElement(nzb, 'pubDate') nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() name = self.getTextElement(nzb, 'title') if not name: continue results.append({ 'id': nzb_id, 'provider_extra': host['host'], 'name': self.getTextElement(nzb, 'title'), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': (self.getUrl(host['host'], self.urls['download']) % nzb_id) + self.getApiExt(host), 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), nzb_id), 'content': self.getTextElement(nzb, 'description'), })
def _searchOnHost(self, host, movie, quality, results): arguments = tryUrlencode({ 'imdbid': movie['library']['identifier'].replace('tt', ''), 'apikey': host['api_key'], 'extended': 1 }) url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments) nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None spotter = None for item in nzb: if date and spotter: break if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break # Get the name of the person who posts the spot if item.attrib.get('name') == 'poster': if "@spot.net" in item.attrib.get('value'): spotter = item.attrib.get('value').split("@")[0] continue if not date: date = self.getTextElement(nzb, 'pubDate') nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() name = self.getTextElement(nzb, 'title') if not name: continue name_extra = '' if spotter: name_extra = spotter results.append({ 'id': nzb_id, 'provider_extra': urlparse(host['host']).hostname or host['host'], 'name': name, 'name_extra': name_extra, 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)), 'content': self.getTextElement(nzb, 'description'), 'score': host['extra_score'], })
def _search(self, movie, quality, results): cat_id_string = 'cat=%s' % ','.join(['%s' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'imdbid': movie['library']['identifier'].replace('tt', ''), 'apikey': self.conf('api_key'), 'extended': 1 }) url = '%s&%s&%s' % ((self.urls['search']), arguments, cat_id_string) nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None for item in nzb: if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break if not date: date = self.getTextElement(nzb, 'pubDate') nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() name = self.getTextElement(nzb, 'title') if not name: continue results.append({ 'id': nzb_id, 'name': self.getTextElement(nzb, 'title'), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': self.urls['download'] % tryUrlencode(nzb_id) + self.getApiExt(), 'detail_url': self.urls['detail'] % tryUrlencode(nzb_id), 'content': self.getTextElement(nzb, 'description'), })
def _search(self, movie, quality, results): cat_id_string = '&'.join(['c%s=1' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'searchtext': 'imdb:' + movie['library']['identifier'][2:], 'uid': self.conf('userid'), 'key': self.conf('api_key'), 'age': Env.setting('retention', section = 'nzb'), }) # check for english_only if self.conf('english_only'): arguments += '&lang0=1&lang3=1&lang1=1' url = '%s&%s&%s' % (self.urls['search'], arguments , cat_id_string) nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) for nzb in nzbs: title = self.getTextElement(nzb, 'name') if 'error' in title.lower(): continue nzb_id = self.getTextElement(nzb, 'id') size = int(round(int(self.getTextElement(nzb, 'size')) / 1048576)) age = int(round((time.time() - int(self.getTextElement(nzb, 'postdate'))) / 86400)) results.append({ 'id': nzb_id, 'name': title, 'age': age, 'size': size, 'url': self.urls['download'] % id + self.getApiExt() + self.getTextElement(nzb, 'key'), 'detail_url': self.urls['detail'] % nzb_id, 'description': self.getTextElement(nzb, 'addtext'), })
def createItems(self, url, cache_key, host, movie = None, quality = None, for_feed = False): results = [] data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results results = [] for nzb in nzbs: date = '' size = 0 for item in nzb: if item.attrib.get('name') == 'size': size = item.attrib.get('value') elif item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') if date is '': log.debug('Date not parsed properly or not available for %s: %s', (host['host'], self.getTextElement(nzb, "title"))) if size is 0: log.debug('Size not parsed properly or not available for %s: %s', (host['host'], self.getTextElement(nzb, "title"))) id = self.getTextElement(nzb, "guid").split('/')[-1:].pop() new = { 'id': id, 'provider': self.getName(), 'type': 'nzb', 'name': self.getTextElement(nzb, "title"), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(size) / 1024 / 1024, 'url': (self.getUrl(host['host'], self.urls['download']) % id) + self.getApiExt(host), 'download': self.download, 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), id), 'content': self.getTextElement(nzb, "description"), } if not for_feed: is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = True, single = True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single = True) results.append(new) self.found(new) else: results.append(new) return results except SyntaxError: log.error('Failed to parse XML response from Newznab: %s', host) return results
def download(self, url = '', nzb_id = ''): return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()})
def _searchOnHost(self, host, media, quality, results): query = self.buildUrl(media, host) url = '%s&%s' % (self.getUrl(host['host']), query) nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None spotter = None for item in nzb: if date and spotter: break if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break # Get the name of the person who posts the spot if item.attrib.get('name') == 'poster': if "@spot.net" in item.attrib.get('value'): spotter = item.attrib.get('value').split("@")[0] continue if not date: date = self.getTextElement(nzb, 'pubDate') nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() name = self.getTextElement(nzb, 'title') if not name: continue name_extra = '' if spotter: name_extra = spotter description = '' if "@spot.net" in nzb_id: try: # Get details for extended description to retrieve passwords query = self.buildDetailsUrl(nzb_id, host['api_key']) url = '%s&%s' % (self.getUrl(host['host']), query) nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0] description = self.getTextElement(nzb_details, 'description') # Extract a password from the description password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1) if password: name = name + ' {{%s}}' % password.strip() except: log.debug('Error getting details of "%s": %s', (name, traceback.format_exc())) results.append({ 'id': nzb_id, 'provider_extra': urlparse(host['host']).hostname or host['host'], 'name': toUnicode(name), 'name_extra': name_extra, 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), 'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id), 'content': self.getTextElement(nzb, 'description'), 'description': description, 'score': host['extra_score'], })
def _searchOnHost(self, host, media, quality, results): query = self.buildUrl(media, host) url = "%s&%s" % (self.getUrl(host["host"]), query) nzbs = self.getRSSData(url, cache_timeout=1800, headers={"User-Agent": Env.getIdentifier()}) for nzb in nzbs: date = None spotter = None for item in nzb: if date and spotter: break if item.attrib.get("name") == "usenetdate": date = item.attrib.get("value") break # Get the name of the person who posts the spot if item.attrib.get("name") == "poster": if "@spot.net" in item.attrib.get("value"): spotter = item.attrib.get("value").split("@")[0] continue if not date: date = self.getTextElement(nzb, "pubDate") nzb_id = self.getTextElement(nzb, "guid").split("/")[-1:].pop() name = self.getTextElement(nzb, "title") if not name: continue name_extra = "" if spotter: name_extra = spotter description = "" if "@spot.net" in nzb_id: try: # Get details for extended description to retrieve passwords query = self.buildDetailsUrl(nzb_id, host["api_key"]) url = "%s&%s" % (self.getUrl(host["host"]), query) nzb_details = self.getRSSData(url, cache_timeout=1800, headers={"User-Agent": Env.getIdentifier()})[ 0 ] description = self.getTextElement(nzb_details, "description") # Extract a password from the description password = re.search( "(?:" + self.passwords_regex + ")(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$", description, flags=re.I ).group(1) if password: name += " {{%s}}" % password.strip() except: log.debug('Error getting details of "%s": %s', (name, traceback.format_exc())) results.append( { "id": nzb_id, "provider_extra": urlparse(host["host"]).hostname or host["host"], "name": toUnicode(name), "name_extra": name_extra, "age": self.calculateAge(int(time.mktime(parse(date).timetuple()))), "size": int(self.getElement(nzb, "enclosure").attrib["length"]) / 1024 / 1024, "url": ((self.getUrl(host["host"]) + self.urls["download"]) % tryUrlencode(nzb_id)) + self.getApiExt(host), "detail_url": (cleanHost(host["host"]) + self.urls["detail"]) % tryUrlencode(nzb_id), "content": self.getTextElement(nzb, "description"), "description": description, "score": host["extra_score"], } )
def download(self, url='', nzb_id=''): return self.urlopen(url, headers={'User-Agent': Env.getIdentifier()})
def search(self, movie, quality): results = [] if self.isDisabled(): return results cat_id_string = '&'.join( ['c%s=1' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'searchtext': 'imdb:' + movie['library']['identifier'][2:], 'uid': self.conf('userid'), 'key': self.conf('api_key'), 'age': Env.setting('retention', section='nzb'), }) # check for english_only if self.conf('english_only'): arguments += "&lang0=1&lang3=1&lang1=1" url = "%s&%s&%s" % (self.urls['search'], arguments, cat_id_string) cache_key = 'nzbsrus_1.%s.%s' % (movie['library'].get('identifier'), cat_id_string) single_cat = True data = self.getCache(cache_key, url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'results/result') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: title = self.getTextElement(nzb, "name") if 'error' in title.lower(): continue id = self.getTextElement(nzb, "id") size = int( round(int(self.getTextElement(nzb, "size")) / 1048576)) age = int( round((time.time() - int(self.getTextElement(nzb, "postdate"))) / 86400)) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': title, 'age': age, 'size': size, 'url': self.urls['download'] % id + self.getApiExt() + self.getTextElement(nzb, "key"), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "addtext"), 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=True, single=True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from Nzbsrus.com')
def createItems(self, url, cache_key, host, movie = None, quality = None, for_feed = False): results = [] data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results results = [] for nzb in nzbs: date = '' size = 0 for item in nzb: if item.attrib.get('name') == 'size': size = item.attrib.get('value') elif item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') if date is '': log.debug('Date not parsed properly or not available for %s: %s', (host['host'], self.getTextElement(nzb, "title"))) if size is 0: log.debug('Size not parsed properly or not available for %s: %s', (host['host'], self.getTextElement(nzb, "title"))) id = self.getTextElement(nzb, "guid").split('/')[-1:].pop() new = { 'id': id, 'provider': self.getName(), 'provider_extra': host['host'], 'type': 'nzb', 'name': self.getTextElement(nzb, "title"), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(size) / 1024 / 1024, 'url': (self.getUrl(host['host'], self.urls['download']) % id) + self.getApiExt(host), 'download': self.download, 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), id), 'content': self.getTextElement(nzb, "description"), } if not for_feed: is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = True, single = True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single = True) results.append(new) self.found(new) else: results.append(new) return results except SyntaxError: log.error('Failed to parse XML response from Newznab: %s', host) return results
def _searchOnHost(self, host, movie, quality, results): arguments = tryUrlencode( { 'imdbid': movie['library']['identifier'].replace('tt', ''), 'apikey': host['api_key'], 'extended': 1 }) + ('&%s' % host['custom_tag'] if host.get('custom_tag') else '') url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments) nzbs = self.getRSSData(url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None spotter = None for item in nzb: if date and spotter: break if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break # Get the name of the person who posts the spot if item.attrib.get('name') == 'poster': if "@spot.net" in item.attrib.get('value'): spotter = item.attrib.get('value').split("@")[0] continue if not date: date = self.getTextElement(nzb, 'pubDate') nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() name = self.getTextElement(nzb, 'title') if not name: continue name_extra = '' if spotter: name_extra = spotter results.append({ 'id': nzb_id, 'provider_extra': urlparse(host['host']).hostname or host['host'], 'name': toUnicode(name), 'name_extra': name_extra, 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)), 'content': self.getTextElement(nzb, 'description'), 'score': host['extra_score'], })
def download(self, data = {}, movie = {}, filedata = None): log.info('Sending "%s" to SABnzbd.', data.get('name')) params = { 'apikey': self.conf('api_key'), 'cat': self.conf('category'), 'mode': 'addurl', 'nzbname': self.createNzbName(data, movie), } if filedata: if len(filedata) < 50: log.error('No proper nzb available: %s', (filedata)) return False # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb nzb_filename = self.createFileName(data, filedata, movie) params['mode'] = 'addfile' else: params['name'] = data.get('url') url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(params) try: if params.get('mode') is 'addfile': sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) else: sab = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) except URLError: log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0)) return False except: log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0)) return False result = sab.strip() if not result: log.error('SABnzbd didn\'t return anything.') return False log.debug('Result text from SAB: %s', result[:40]) if result[:2] == 'ok': log.info('NZB sent to SAB successfully.') return True else: log.error(result[:40]) return False
def search(self, movie, quality): results = [] if self.isDisabled(): return results cat_ids = ','.join(['%s' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'term': movie['library']['identifier'], 'subcat': cat_ids, 'username': self.conf('username'), 'apikey': self.conf('api_key'), 'searchin': 'weblink', 'maxage': Env.setting('retention', section = 'nzb'), 'english': self.conf('english_only'), }) url = "%s?%s" % (self.urls['search'], arguments) cache_key = 'nzbmatrix.%s.%s' % (movie['library'].get('identifier'), cat_ids) data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: title = self.getTextElement(nzb, "title") if 'error' in title.lower(): continue id = int(self.getTextElement(nzb, "link").split('&')[0].partition('id=')[2]) size = self.getTextElement(nzb, "description").split('<br /><b>')[2].split('> ')[1] date = str(self.getTextElement(nzb, "description").split('<br /><b>')[3].partition('Added:</b> ')[2]) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': title, 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': self.parseSize(size), 'url': self.urls['download'] % id + self.getApiExt(), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "description"), 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = True, single = True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single = True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBMatrix.com')
def search(self, movie, quality): results = [] if self.isDisabled(): return results cat_ids = ','.join( ['%s' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'term': movie['library']['identifier'], 'subcat': cat_ids, 'username': self.conf('username'), 'apikey': self.conf('api_key'), 'searchin': 'weblink', 'maxage': Env.setting('retention', section='nzb'), 'english': self.conf('english_only'), }) url = "%s?%s" % (self.urls['search'], arguments) cache_key = 'nzbmatrix.%s.%s' % (movie['library'].get('identifier'), cat_ids) data = self.getCache(cache_key, url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: title = self.getTextElement(nzb, "title") if 'error' in title.lower(): continue id = int( self.getTextElement( nzb, "link").split('&')[0].partition('id=')[2]) size = self.getTextElement( nzb, "description").split('<br /><b>')[2].split('> ')[1] date = str( self.getTextElement(nzb, "description").split( '<br /><b>')[3].partition('Added:</b> ')[2]) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': title, 'age': self.calculateAge( int(time.mktime(parse(date).timetuple()))), 'size': self.parseSize(size), 'url': self.urls['download'] % id + self.getApiExt(), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "description"), 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=True, single=True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBMatrix.com')
def _searchOnHost(self, host, media, quality, results): query = self.buildUrl(media, host) url = '%s%s' % (self.getUrl(host['host']), query) nzbs = self.getRSSData(url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None spotter = None for item in nzb: if date and spotter: break if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break # Get the name of the person who posts the spot if item.attrib.get('name') == 'poster': if "@spot.net" in item.attrib.get('value'): spotter = item.attrib.get('value').split("@")[0] continue if not date: date = self.getTextElement(nzb, 'pubDate') nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() name = self.getTextElement(nzb, 'title') if not name: continue name_extra = '' if spotter: name_extra = spotter description = '' if "@spot.net" in nzb_id: try: # Get details for extended description to retrieve passwords query = self.buildDetailsUrl(nzb_id, host['api_key']) url = '%s%s' % (self.getUrl(host['host']), query) nzb_details = self.getRSSData( url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()})[0] description = self.getTextElement(nzb_details, 'description') # Extract a password from the description password = re.search( '(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags=re.I).group(1) if password: name += ' {{%s}}' % password.strip() except: log.debug('Error getting details of "%s": %s', (name, traceback.format_exc())) results.append({ 'id': nzb_id, 'provider_extra': urlparse(host['host']).hostname or host['host'], 'name': toUnicode(name), 'name_extra': name_extra, 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), 'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id), 'content': self.getTextElement(nzb, 'description'), 'description': description, 'score': host['extra_score'], })
def search(self, movie, quality): results = [] if self.isDisabled(): return results cat_id_string = '&'.join(['c%s=1' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'searchtext': 'imdb:' + movie['library']['identifier'][2:], 'uid': self.conf('userid'), 'key': self.conf('api_key'), 'age': Env.setting('retention', section = 'nzb'), }) # check for english_only if self.conf('english_only'): arguments += "&lang0=1&lang3=1&lang1=1" url = "%s&%s&%s" % (self.urls['search'], arguments , cat_id_string) cache_key = 'nzbsrus_1.%s.%s' % (movie['library'].get('identifier'), cat_id_string) single_cat = True data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'results/result') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: title = self.getTextElement(nzb, "name") if 'error' in title.lower(): continue id = self.getTextElement(nzb, "id") size = int(round(int(self.getTextElement(nzb, "size")) / 1048576)) age = int(round((time.time() - int(self.getTextElement(nzb, "postdate"))) / 86400)) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': title, 'age': age, 'size': size, 'url': self.urls['download'] % id + self.getApiExt() + self.getTextElement(nzb, "key"), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "addtext"), 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = True, single = True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single = True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from Nzbsrus.com')
def call(self, request_params, use_json = True, **kwargs): url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, { 'apikey': self.conf('api_key'), 'output': 'json' })) data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs) if use_json: d = json.loads(data) if d.get('error'): log.error('Error getting data from SABNZBd: %s', d.get('error')) return {} return d.get(request_params['mode']) or d else: return data
def _search(self, movie, quality, results): # Get nbzs arguments = tryUrlencode({ 'q': movie['library']['identifier'].replace('tt', ''), 'sf': quality.get('size_min'), }) nzbs = self.getJsonData(self.urls['search'] % arguments, headers = {'User-Agent': Env.getIdentifier()}) for nzb in nzbs: results.append({ 'id': nzb['guid'], 'url': nzb['nzb'], 'detail_url': self.urls['details'] % nzb['guid'], 'name': nzb['name'], 'age': self.calculateAge(int(nzb['postdate'])), 'size': tryInt(nzb['size']) / 1024 / 1024, 'score': 5 if nzb['votes']['upvotes'] > nzb['votes']['downvotes'] else 0 })