def _searchOnTitle(self, title, media, quality, results): search_url = self.urls['search'] # Create search parameters search_params = self.buildUrl(title, media, quality) min_seeds = try_int(self.conf('minimal_seeds')) if min_seeds: search_params += ' seed > %s' % (min_seeds - 1) rss_data = self.getRSSData(search_url % search_params) if rss_data: try: for result in rss_data: name = self.get_text_element(result, 'title') detail_url = self.get_text_element(result, 'link') description = self.get_text_element(result, 'description') magnet = split_string(detail_url, '/')[-1] magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % ( magnet.upper(), try_url_encode(name), try_url_encode( 'udp://tracker.openbittorrent.com/announce')) reg = re.search( 'Size: (?P<size>\d+) (?P<unit>[KMG]B) Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description)) size = reg.group('size') unit = reg.group('unit') seeds = reg.group('seeds').replace(',', '') peers = reg.group('peers').replace(',', '') multiplier = 1 if unit == 'GB': multiplier = 1000 elif unit == 'KB': multiplier = 0 results.append({ 'id': magnet, 'name': six.text_type(name), 'url': magnet_url, 'detail_url': detail_url, 'size': try_int(size) * multiplier, 'seeders': try_int(seeds), 'leechers': try_int(peers), }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def buildUrl(self, media): q = try_url_encode({ 'q': '%s' % fire_event('library.query', media, single=True), }) query = try_url_encode({ 'ig': 1, 'rpp': 200, 'st': 5, 'sp': 1, 'ns': 1, }) return '%s&%s' % (q, query)
def buildDetailsUrl(self, nzb_id, api_key): query = try_url_encode({ 't': 'details', 'id': nzb_id, 'apikey': api_key, }) return query
def _searchOnTitle(self, title, movie, quality, results): q = '%s %s' % (title, movie['info']['year']) params = try_url_encode({ 'search': q, 'catid': ','.join([str(x) for x in self.getCatId(quality)]), 'user': self.conf('username', default = ''), 'api': self.conf('api_key', default = ''), }) if len(self.conf('custom_tag')) > 0: params = '%s&%s' % (params, self.conf('custom_tag')) nzbs = self.getJsonData(self.urls['search'] % params) if isinstance(nzbs, list): for nzb in nzbs: results.append({ 'id': nzb.get('nzbid'), 'name': to_unicode(nzb.get('release')), 'age': self.calculateAge(try_int(nzb.get('usenetage'))), 'size': try_int(nzb.get('sizebytes')) / 1024 / 1024, 'url': nzb.get('getnzb'), 'detail_url': nzb.get('details'), 'description': nzb.get('weblink') })
def search(self, q, limit=12): if self.is_disabled(): return [] name_year = fire_event('scanner.name_year', q, single=True) if not name_year or (name_year and not name_year.get('name')): name_year = {'name': q} cache_key = 'omdbapi.cache.%s' % q url = self.urls['search'] % (self.getApiKey(), try_url_encode( { 't': name_year.get('name'), 'y': name_year.get('year', '') })) cached = self.getCache(cache_key, url, timeout=3, headers={'User-Agent': Env.getIdentifier()}) if cached: result = self.parseMovie(cached) if result.get('titles') and len(result.get('titles')) > 0: log.info( 'Found: %s', result['titles'][0] + ' (' + str(result.get('year')) + ')') return [result] return [] return []
def findViaAlternative(self, group): results = {'480p': [], '720p': [], '1080p': []} movie_name = get_title(group) url = "%s?%s" % (self.urls['backup'], try_url_encode({'s': movie_name})) try: data = self.getCache('hdtrailers.alt.%s' % get_identifier(group), url, show_error=False) except HTTPError: log.debug('No alternative page found for: %s', movie_name) data = None if not data: return results try: html = BeautifulSoup(data, parse_only = self.only_tables_tags) result_table = html.find_all('h2', text = re.compile(movie_name)) for h2 in result_table: if 'trailer' in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent['href']) except: pass except AttributeError: log.debug('No trailers found in via alternative.') return results
def getAuthorizationUrl(self, host=None, **kwargs): callback_url = clean_host(host) + '%snotify.%s.credentials/' % ( Env.get('api_base').lstrip('/'), self.getName().lower()) oauth_consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret) oauth_client = oauth2.Client(oauth_consumer) resp, content = oauth_client.request( self.urls['request'], 'POST', body=try_url_encode({'oauth_callback': callback_url})) if resp['status'] != '200': log.error( 'Invalid response from Twitter requesting temp token: %s', resp['status']) return { 'success': False, } else: self.request_token = dict(parse_qsl(content)) auth_url = self.urls['authorize'] + ( "?oauth_token=%s" % self.request_token['oauth_token']) log.info('Redirecting to "%s"', auth_url) return { 'success': True, 'url': auth_url, }
def notify(self, message='', data=None, listener=None): if not data: data = {} # default for devices device_default = [None] apikey = self.conf('apikey') if apikey is not None: # Add apikey to request url self.url = self.url + '&apikey=' + apikey # If api key is present, default to sending to all devices device_default = ['group.all'] devices = self.getDevices() or device_default successful = 0 for device in devices: response = self.urlopen( self.url % (self.default_title, try_url_encode( to_unicode(message)), device, self.icon)) if response: successful += 1 else: log.error( 'Unable to push notification to Join device with ID %s' % device) return successful == len(devices)
def getSourceUrl(self, repo=None, repo_name=None, branch=None): return self.getJsonData(self.urls['updater'] % try_url_encode({ 'repo': repo, 'name': repo_name, 'branch': branch, }), headers=self.getRequestHeaders())
def buildUrl(self, title, media, quality): query = try_url_encode({ 'search': '"%s" %s' % (title, media['info']['year']), 'cat': self.getCatId(quality)[0], }) return query
def buildUrl(self, media, quality): query = try_url_encode({ 'search': fire_event('library.query', media, single=True), 'cat': self.getCatId(quality)[0] }) return query
def getMessages(self, last_check=0): data = self.getJsonData(self.urls['messages'] % try_url_encode({ 'last_check': last_check, }), headers=self.getRequestHeaders(), cache_timeout=10) return data
def buildUrl(self, title, media, quality): cat_id = self.getCatId(quality)[0] url = self.urls['search'] % (cat_id, cat_id) arguments = try_url_encode({ 'search': '%s %s' % (title, media['info']['year']), 'method': 2, }) query = "%s&%s" % (url, arguments) return query
def buildUrl(self, media, host): arguments = try_url_encode({ 'user': host['name'], 'passkey': host['pass_key'], 'imdbid': get_identifier(media), 'search': get_title(media) + ' ' + str(media['info']['year']), }) return '%s?%s' % (host['host'], arguments)
def buildUrl(self, media, quality): query = try_url_encode({ 'q': get_identifier(media), 'm': 'n', 'max': 400, 'adv_age': Env.setting('retention', 'nzb'), 'adv_sort': 'date', 'adv_col': 'on', 'adv_nfo': 'on', 'xminsize': quality.get('size_min'), 'xmaxsize': quality.get('size_max'), }) return query
def _buildUrl(self, query, quality): cat_ids = self.getCatId(quality) if not cat_ids: log.warning('Unable to find category ids for identifier "%s"', quality.get('identifier')) return None query = query.replace('"', '') return self.urls['search'] % ("&".join( ("%d=" % x) for x in cat_ids), try_url_encode(query).replace('%', '%%'))
def _searchOnTitle(self, title, movie, quality, results): url = self.urls['search'] % (try_url_encode('%s %s' % (title.replace( ':', ''), movie['info']['year'])), self.getCatId(quality)[0]) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find(attrs={'id': 'torrenttable'}) if not result_table: log.error('failed to generate result_table') return entries = result_table.find_all('tr') for result in entries[1:]: cells = result.find_all('td') link = result.find('a', attrs={'class': 'index'}) torrent_id = link['href'].replace('download.php/', '').split('/')[0] torrent_file = link['href'].replace('download.php/', '').split('/')[1] size = self.parseSize(cells[5].contents[0] + cells[5].contents[2]) name_row = cells[1].contents[0] name = name_row.getText() seeders_row = cells[6].contents[0] seeders = seeders_row.getText() results.append({ 'id': torrent_id, 'name': name, 'url': self.urls['download'] % (torrent_id, torrent_file), 'detail_url': self.urls['detail'] % torrent_id, 'size': size, 'seeders': seeders, }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def call(self, call, parameters=None, is_repeat=False, auth=True, *args, **kwargs): # Login first if not parameters: parameters = {} if not self.session_id and auth: self.login() # Always add session id to request if self.session_id: parameters['sessionid'] = self.session_id params = try_url_encode(parameters) url = clean_host(self.conf('host')) + 'api/' + call try: data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout=0, show_error=False, **kwargs) if data: return data except HTTPError as e: sc = e.response.status_code if sc == 403: # Try login and do again if not is_repeat: self.login() return self.call(call, parameters=parameters, is_repeat=True, **kwargs) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) return {}
def request(self, command, client): url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % ( client['address'], client['port'], try_url_encode(command)) headers = {} try: self.plex.urlopen(url, headers=headers, timeout=3, show_error=False) except Exception as err: log.error("Couldn't sent command to Plex: %s", err) return False return True
def _searchOnTitle(self, title, movie, quality, results): movieTitle = try_url_encode( '%s %s' % (title.replace(':', ''), movie['info']['year'])) url = self.urls['search'] % (self.getSceneOnly(), movieTitle) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs={'id': 'torrent_table'}) if not result_table: return entries = result_table.find_all('tr', attrs={'class': 'torrent'}) for result in entries: link = result.find('a', attrs={'dir': 'ltr'}) url = result.find('span', attrs={ 'title': 'Download' }).parent tds = result.find_all('td') size = tds[5].contents[0].strip('\n ') results.append({ 'id': link['href'].replace('torrents.php?id=', '').split('&')[0], 'name': link.contents[0], 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['download'] % link['href'], 'size': self.parseSize(size), 'seeders': try_int(tds[len(tds) - 2].string), 'leechers': try_int(tds[len(tds) - 1].string), }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def request(self, call='', params={}, return_key=None): params = dict((k, v) for k, v in list(params.items()) if v) params = try_url_encode(params) try: url = 'https://api.themoviedb.org/3/%s?api_key=%s%s' % ( call, self.getApiKey(), '&%s' % params if params else '') data = self.getJsonData(url, show_error=False) except: log.debug('Movie not found: %s, %s', (call, params)) data = None if data and return_key and return_key in data: data = data.get(return_key) return data
class Join(Notification): # URL for request url = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?title=%s&text=%s&deviceId=%s&icon=%s' # URL for notification icon icon = try_url_encode( 'https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/icons/android.png' ) def notify(self, message='', data=None, listener=None): if not data: data = {} # default for devices device_default = [None] apikey = self.conf('apikey') if apikey is not None: # Add apikey to request url self.url = self.url + '&apikey=' + apikey # If api key is present, default to sending to all devices device_default = ['group.all'] devices = self.getDevices() or device_default successful = 0 for device in devices: response = self.urlopen( self.url % (self.default_title, try_url_encode( to_unicode(message)), device, self.icon)) if response: successful += 1 else: log.error( 'Unable to push notification to Join device with ID %s' % device) return successful == len(devices) def getDevices(self): return split_string(self.conf('devices'))
def addToLibrary(self, message = None, group = None): if self.is_disabled(): return if not group: group = {} host = self.conf('host') mount = self.conf('mount') database = self.conf('database') if mount: log.debug('Try to mount network drive via url: %s', mount) try: self.urlopen(mount) except: return False params = { 'arg0': 'scanner_start', 'arg1': database, 'arg2': 'background', 'arg3': '', } params = try_url_encode(params) update_url = 'http://%(host)s:8008/metadata_database?%(params)s' % {'host': host, 'params': params} try: response = self.urlopen(update_url) except: return False try: et = etree.fromstring(response) result = et.findtext('returnValue') except SyntaxError as e: log.error('Unable to parse XML returned from the Popcorn Hour: %s', e) return False if int(result) > 0: log.error('Popcorn Hour returned an errorcode: %s', result) return False else: log.info('NMJ started background scan') return True
def buildUrl(self, media, host): query = try_url_encode({ 't': 'movie', 'imdbid': get_identifier(media).replace('tt', ''), 'apikey': host['api_key'], 'extended': 1 }) if len(host.get('custom_tag', '')) > 0: query = '%s&%s' % (query, host.get('custom_tag')) if len(host['custom_category']) > 0: query = '%s&cat=%s' % (query, host['custom_category']) return query
def _searchOnTitle(self, title, movie, quality, results): url = self.urls['search'] % ( try_url_encode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0]) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'border': '1'}) if not result_table: return entries = result_table.find_all('tr') for result in entries[1:]: cells = result.find_all('td') link = cells[1].find('a', attrs = {'class': 'index'}) full_id = link['href'].replace('details.php?id=', '') torrent_id = full_id[:7] name = to_unicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip() results.append({ 'id': torrent_id, 'name': name, 'url': self.urls['download'] % (torrent_id, name), 'detail_url': self.urls['detail'] % torrent_id, 'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]), 'seeders': try_int(cells[8].find('span').contents[0]), 'leechers': try_int(cells[9].find('span').contents[0]), }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def call(self, request_params, use_json=True, **kwargs): url = clean_host(self.conf('host'), ssl=self.conf('ssl')) + 'api?' + try_url_encode( merge_dictionaries(request_params, { 'apikey': self.conf('api_key'), 'output': 'json' })) data = self.urlopen(url, timeout=60, show_error=False, headers={'User-Agent': Env.getIdentifier()}, **kwargs) if use_json: d = json.loads(data) if d.get('error'): log.error('Error getting data from SABNZBd: %s', d.get('error')) return {} return d.get(request_params['mode']) or d else: return data
def _search(self, media, quality, results): movie_title = get_title(media) quality_id = quality['identifier'] params = merge_dictionaries(self.quality_search_params[quality_id].copy(), { 'order_by': 'relevance', 'order_way': 'descending', 'searchstr': get_identifier(media) }) url = '%s?json=noredirect&%s' % (self.urls['torrent'], try_url_encode(params)) res = self.getJsonData(url) try: if not 'Movies' in res: return authkey = res['AuthKey'] passkey = res['PassKey'] for ptpmovie in res['Movies']: if not 'Torrents' in ptpmovie: log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year'])) continue log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents']))) for torrent in ptpmovie['Torrents']: torrent_id = try_int(torrent['Id']) torrentdesc = '' torrentscore = 0 if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']: torrentdesc += ' HQ' if self.conf('prefer_golden'): torrentscore += 5000 if 'FreeleechType' in torrent: torrentdesc += ' Freeleech' if self.conf('prefer_freeleech'): torrentscore += 7000 if 'Scene' in torrent and torrent['Scene']: torrentdesc += ' Scene' if self.conf('prefer_scene'): torrentscore += 2000 if self.conf('no_scene'): torrentscore -= 2000 if 'RemasterTitle' in torrent and torrent['RemasterTitle']: torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle']) torrent_name = torrent['ReleaseName'] + ' - %s' % torrentdesc def extra_check(item): return self.torrentMeetsQualitySpec(item, quality_id) results.append({ 'id': torrent_id, 'name': torrent_name, 'Source': torrent['Source'], 'Checked': 'true' if torrent['Checked'] else 'false', 'Resolution': torrent['Resolution'], 'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey), 'detail_url': self.urls['detail'] % torrent_id, 'date': try_int(time.mktime(parse(torrent['UploadTime']).timetuple())), 'size': try_int(torrent['Size']) / (1024 * 1024), 'seeders': try_int(torrent['Seeders']), 'leechers': try_int(torrent['Leechers']), 'score': torrentscore, 'extra_check': extra_check, }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def buildUrl(self, title, media, quality): return try_url_encode('%s %s' % (title, media['info']['year']))
def _searchOnTitle(self, title, movie, quality, results): movieTitle = try_url_encode( '%s-%s' % (title.replace(':', '').replace(' ', '-'), movie['info']['year'])) next_page = True current_page = 1 max_page = self.conf('max_pages') while next_page and current_page <= max_page and not self.shuttingDown( ): next_page = False url = self.urls['search'] % (movieTitle[:1], movieTitle, current_page) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs={'class': 'download'}) if not result_table: return entries = result_table.find_all('tr') for result in entries: if result.find('td', attrs={'class': 'n'}): link = result.find('td', attrs={ 'class': 'n' }).find('a') url = result.find('td', attrs={ 'class': 'm' }).find('a') tds = result.find_all('td') size = tds[5].contents[0].strip('\n ') age = tds[2].contents[0].strip('\n ') results.append({ 'id': link['href'].split('/')[2], 'name': link['title'], 'url': url['href'], 'detail_url': self.urls['detail'] % link['href'], 'size': self.parseSize(size), 'age': self.ageToDays(age), 'seeders': try_int(tds[len(tds) - 2].string), 'leechers': try_int(tds[len(tds) - 1].string), }) elif result.find('td', attrs={'id': 'pages'}): page_td = result.find('td', attrs={'id': 'pages'}) next_title = 'Downloads | Page %s' % ( current_page + 1) if page_td.find('a', attrs={'title': next_title}): next_page = True except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) current_page += 1
def buildUrl(self, media, page, cats): return (try_url_encode( '"%s"' % fire_event('library.query', media, single=True)), page, ','.join(str(x) for x in cats))