def search_newznab(self, url_base, apikey, **params): ''' Searches Newznab/Torznab for movie url_base (str): base url for all requests (https://indexer.com/) apikey (str): api key for indexer params (dict): parameters to url encode and append to url Creates url based off url_base. Appends url-encoded **params to url. Returns list of dicts of search results ''' url = '{}api?apikey={}&{}'.format(url_base, apikey, urllib.parse.urlencode(params)) logging.info('SEARCHING: {}api?apikey=APIKEY&{}'.format( url_base, urllib.parse.urlencode(params))) proxy_enabled = core.CONFIG['Server']['Proxy']['enabled'] try: if proxy_enabled and proxy.whitelist(url) is True: response = Url.open(url, proxy_bypass=True).text else: response = Url.open(url).text return self.parse_newznab_xml(response) except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Newz/TorzNab backlog search.', exc_info=True) return []
def search(imdbid): proxy_enabled = core.CONFIG['Server']['Proxy']['enabled'] logging.info('Searching ThePirateBay for {}.'.format(imdbid)) url = 'https://www.thepiratebay.org/search/{}/0/99/200'.format(imdbid) headers = {'Cookie': 'lw=s'} try: if proxy_enabled and Proxy.whitelist( 'https://www.thepiratebay.org') is True: response = Url.open(url, proxy_bypass=True, headers=headers).text else: response = Url.open(url, headers=headers).text if response: return ThePirateBay.parse(response, imdbid) else: return [] except (SystemExit, KeyboardInterrupt): raise except Exception as e: # noqa logging.error('ThePirateBay search failed.', exc_info=True) return []
def search(imdbid, term): proxy_enabled = core.CONFIG['Server']['Proxy']['enabled'] logging.info( 'Performing backlog search on TorrentDownloads for {}.'.format(imdbid)) url = 'http://www.torrentdownloads.me/rss.xml?type=search&search={}'.format( term) try: if proxy_enabled and core.proxy.whitelist( 'http://www.torrentdownloads.me') is True: response = Url.open(url, proxy_bypass=True).text else: response = Url.open(url).text if response: return _parse(response, imdbid) else: return [] except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('TorrentDownloads search failed.', exc_info=True) return []
def search(imdbid, term): proxy_enabled = core.CONFIG['Server']['Proxy']['enabled'] logging.info('Performing backlog search on YTS for {}.'.format(imdbid)) url = 'https://yts.ag/api/v2/list_movies.json?limit=1&query_term={}'.format( imdbid) try: if proxy_enabled and core.proxy.whitelist( 'https://www.yts.ag') is True: response = Url.open(url, proxy_bypass=True).text else: response = Url.open(url).text if response: r = json.loads(response) if r['data']['movie_count'] < 1: return [] else: return _parse(r['data']['movies'][0], imdbid, term) else: return [] except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('YTS search failed.', exc_info=True) return []
def _set_label(torrent, label, url): ''' Sets label for download torrent: str hash of torrent to apply label label: str name of label to apply url: str url of deluge web interface Returns bool ''' global command_id label = label_fix.sub('', label.lower()).replace(' ', '') logging.info('Applying label {} to torrent {} in Deluge Web UI.'.format( label, torrent)) command = {'method': 'label.get_labels', 'params': [], 'id': command_id} command_id += 1 try: response = Url.open(url, post_data=json.dumps(command), headers=headers).text deluge_labels = json.loads(response).get('result') or [] except Exception as e: logging.error('Unable to get labels from Deluge Web UI.', exc_info=True) return False if label not in deluge_labels: logging.info('Adding label {} to Deluge.'.format(label)) command = {'method': 'label.add', 'params': [label], 'id': command_id} command_id += 1 try: sc = Url.open(url, post_data=json.dumps(command), headers=headers).status_code if sc != 200: logging.error('Deluge Web UI response {}.'.format(sc)) return False except Exception as e: logging.error('Delugeweb get_labels.', exc_info=True) return False try: command = { 'method': 'label.set_torrent', 'params': [torrent.lower(), label], 'id': command_id } command_id += 1 sc = Url.open(url, post_data=json.dumps(command), headers=headers).status_code if sc != 200: logging.error('Deluge Web UI response {}.'.format(sc)) return False except Exception as e: logging.error('Delugeweb set_torrent.', exc_info=True) return False return True
def _download_magnet(data, path): ''' Resolves magnet link to torrent file data (dict): release information file (str): absolute path to FILE in which to save file Attempts to use magnet2torrent.com to resolve to torrent file. If that fails, iterates through bt_cache sites and attempts to get download. The downloaded content is ran through bencode (via core.helpers.Torrent) to make sure the hash from the torrent file (or whatever content was download) matches the hash submitted. Returns bool ''' magnet_hash = data['guid'].upper() try: logging.info( 'Attempting to resolve torrent hash through magnet2torrent.com') dl_bytes = Url.open('http://magnet2torrent.com/upload/', post_data={ 'magnet': 'magnet:?xt=urn:btih:{}'.format(magnet_hash) }, stream=True).content if _verify_torrent(dl_bytes, magnet_hash): logging.info('Torrent found on magnet2torrent.com') with open(path, 'wb') as f: f.write(dl_bytes) del dl_bytes return True except Exception as e: logging.warning('Unable to reach magnet2torrent.com', exc_info=True) for i in bt_cache: try: url = i.format(magnet_hash) logging.info( 'Attempting to resolve torrent hash through {}'.format(url)) dl_bytes = Url.open(url, stream=True).content if _verify_torrent(dl_bytes, magnet_hash): logging.info('Torrent found at {}'.format(url)) with open(path, 'wb') as f: f.write(dl_bytes) del dl_bytes return True else: continue except Exception as e: logging.warning( 'Unable to resolve magnet hash through {}.'.format(i), exc_info=True) continue logging.warning( 'Torrent hash {} not found on any torrent cache.'.format(magnet_hash)) return False
def get_imdbid(self, tmdbid=None, title=None, year=''): ''' Gets imdbid from tmdbid or title and year tmdbid: str TMDB movie id # title: str movie title year str year of movie release MUST supply either tmdbid or title. Year is optional with title, but results are more reliable with it. Returns str imdbid or None on failure ''' if not tmdbid and not title: logging.warning( 'Neither tmdbid or title supplied. Unable to find imdbid.') return None if not tmdbid: title = Url.normalize(title) year = Url.normalize(year) url = 'https://api.themoviedb.org/3/search/movie?api_key={}&language=en-US&query={}&year={}&page=1&include_adult=false'.format( _k(b'tmdb'), title, year) while self.get_tokens() < 3: sleep(0.3) self.use_token() try: results = json.loads(Url.open(url).text) results = results['results'] if results: tmdbid = results[0]['id'] else: return None except (SystemExit, KeyboardInterrupt): raise except Exception as e: # noqa logging.error('Error attempting to get TMDBID from TMDB.', exc_info=True) return None url = 'https://api.themoviedb.org/3/movie/{}?api_key={}'.format( tmdbid, _k(b'tmdb')) while self.get_tokens() < 3: sleep(0.3) self.use_token() try: results = json.loads(Url.open(url).text) return results.get('imdb_id') except Exception as e: # noqa logging.error('Error attempting to get IMDBID from TMDB.', exc_info=True) return None
def add_torrent(data): ''' Adds torrent or magnet to qbittorrent data: dict of torrrent/magnet information Adds torrents to default/path/<category> Returns dict {'response': True, 'download_id': 'id'} {'response': False, 'error': 'exception'} ''' conf = core.CONFIG['Downloader']['Torrent']['QBittorrent'] host = conf['host'] port = conf['port'] base_url = u'{}:{}/'.format(host, port) user = conf['user'] password = conf['pass'] # check cookie validity while getting default download dir download_dir = QBittorrent._get_download_dir(base_url) if not download_dir: if QBittorrent._login(base_url, user, password) is not True: return {'response': False, 'error': 'Incorrect usename or password.'} download_dir = QBittorrent._get_download_dir(base_url) if not download_dir: return {'response': False, 'error': 'Unable to get path information.'} # if we got download_dir we can connect. post_data = {} post_data['urls'] = data['torrentfile'] post_data['savepath'] = u'{}{}'.format(download_dir, conf['category']) post_data['category'] = conf['category'] url = u'{}command/download'.format(base_url) post_data = urllib.urlencode(post_data) request = Url.request(url, post_data=post_data) request.add_header('cookie', QBittorrent.cookie) try: Url.open(request) # QBit returns an empty string downloadid = Torrent.get_hash(data['torrentfile']) return {'response': True, 'downloadid': downloadid} except (SystemExit, KeyboardInterrupt): raise except Exception, e: logging.error(u'QBittorrent connection test failed.', exc_info=True) return {'response': False, 'error': str(e.reason)}
def add_torrent(data): ''' Adds torrent or magnet to qbittorrent data: dict of torrrent/magnet information Adds torrents to default/path/<category> Returns dict {'response': True, 'download_id': 'id'} {'response': False, 'error': 'exception'} ''' logging.info('Sending torrent {} to QBittorrent.'.format( data['title'])) conf = core.CONFIG['Downloader']['Torrent']['QBittorrent'] host = conf['host'] port = conf['port'] base_url = '{}:{}/'.format(host, port) user = conf['user'] password = conf['pass'] if QBittorrent.cookie is None: QBittorrent._login(base_url, user, password) download_dir = QBittorrent._get_download_dir(base_url) if not download_dir: return { 'response': False, 'error': 'Unable to get path information.' } # if we got download_dir we can connect. post_data = {} post_data['urls'] = data['torrentfile'] post_data['savepath'] = '{}{}'.format(download_dir, conf['category']) post_data['category'] = conf['category'] url = '{}command/download'.format(base_url) headers = {'cookie': QBittorrent.cookie} try: Url.open(url, post_data=post_data, headers=headers) # QBit returns an empty string downloadid = Torrent.get_hash(data['torrentfile']) return {'response': True, 'downloadid': downloadid} except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('QBittorrent connection test failed.', exc_info=True) return {'response': False, 'error': str(e)}
def get_imdbid(tmdbid=None, title=None, year=''): ''' Gets imdbid from tmdbid or title and year tmdbid (str): themoviedatabase id # title (str): movie title year (str/int): year of movie release MUST supply either tmdbid or title. Year is optional with title, but results are more reliable with it. Returns str imdbid ''' if not tmdbid and not title: logging.warning( 'Neither tmdbid or title supplied. Unable to find imdbid.') return '' if not tmdbid: title = Url.normalize(title) year = Url.normalize(year) url = 'https://api.themoviedb.org/3/search/movie?api_key={}&language=en-US&query={}&year={}&page=1&include_adult={}'.format( _k(b'tmdb'), title, year, 'true' if core.CONFIG['Search']['allowadult'] else 'false') TheMovieDatabase._use_token() try: results = json.loads(Url.open(url).text) results = results['results'] if results: tmdbid = results[0]['id'] else: return '' except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Error attempting to get TMDBID from TMDB.', exc_info=True) return '' url = 'https://api.themoviedb.org/3/movie/{}?api_key={}'.format( tmdbid, _k(b'tmdb')) TheMovieDatabase._use_token() try: results = json.loads(Url.open(url).text) return results.get('imdb_id') except Exception as e: logging.error('Error attempting to get IMDBID from TMDB.', exc_info=True) return ''
def _send(method, post_data=None): ''' Sends API request to QBittorrent method (str): name of method to call. *must* include category (ie 'query/preferences') post_data (dict): post data to send with request <optional> Returns str text response from QBit ''' conf = core.CONFIG['Downloader']['Torrent']['QBittorrent'] if not QBittorrent.cookie: r = QBittorrent._login('{}:{}/'.format(conf['host'], conf['port']), conf['user'], conf['pass']) if r is not True: logging.error('Unable to connect to QBittorrent: {}'.format(r)) return False url = '{}:{}/{}'.format(conf['host'], conf['port'], method) try: response = Url.open(url, post_data=post_data, headers={'cookie': QBittorrent.cookie}) except Exception as e: logging.error('Unable to contact QBittorrent API.', exc_info=True) raise APIConnectionError(response.status_code, response.reason) if response.status_code == 403: logging.info('QBittorrent request unauthorized.') QBittorrent.cookie = None u = '{}:{}/'.format(conf['host'], conf['port']) if QBittorrent._login(u, conf['user'], conf['pass']) is not True: raise APIConnectionError('403', 'Unable to log in to QBittorrent.') else: try: response = Url.open(url, post_data=post_data, headers={'cookie': QBittorrent.cookie}) except Exception as e: logging.error('Unable to contact QBittorrent API.', exc_info=True) raise APIConnectionError(response.status_code, response.reason) elif response.status_code != 200: logging.error('QBittorrent API call failed: {}'.format( response.reason)) raise APIConnectionError(response.status_code, response.reason) return response.text
def _search_imdbid(self, imdbid): url = u'https://api.themoviedb.org/3/find/{}?language=en-US&external_source=imdb_id'.format(imdbid) logging.info('Searching TMDB {}'.format(url)) url = url + '&api_key={}'.format(_k('tmdb')) request = Url.request(url) while self.get_tokens() < 3: sleep(0.5) self.use_token() try: response = Url.open(request) results = json.loads(response) if results['movie_results'] == []: return [''] else: response = results['movie_results'][0] response['imdbid'] = imdbid return [response] except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa logging.error(u'Error searching for IMDBID on TMDB.', exc_info=True) return ['']
def get_trailer(self, title_date): ''' Gets trailer embed url from Youtube. :param title_date: str movie title and date ("Movie Title 2016") Attempts to connect 3 times in case Youtube is down or not responding Can fail if no response is recieved. Returns str or None ''' search_term = Url.encode((title_date + '+trailer')) search_string = u"https://www.googleapis.com/youtube/v3/search?part=snippet&q={}&maxResults=1&key={}".format(search_term, _k('youtube')) request = Url.request(search_string) tries = 0 while tries < 3: try: response = Url.open(request) results = json.loads(response) return results['items'][0]['id']['videoId'] except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa if tries == 2: logging.error(u'Unable to get trailer from Youtube.', exc_info=True) tries += 1
def _search_tmdbid(self, tmdbid): url = u'https://api.themoviedb.org/3/movie/{}?language=en-US&append_to_response=alternative_titles,external_ids,release_dates'.format(tmdbid) logging.info('Searching TMDB {}'.format(url)) url = url + '&api_key={}'.format(_k('tmdb')) request = Url.request(url) request = Url.request(url) while self.get_tokens() < 3: sleep(0.3) self.use_token() try: response = Url.open(request) results = json.loads(response) if results.get('status_code'): logging.warning(results.get('status_code')) return [''] else: return [results] except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa logging.error(u'Error searching for TMDBID on TMDB.', exc_info=True) return ['']
def sync_feed(): ''' Gets feed from popular-movies (https://github.com/sjlu/popular-movies) Gets raw feed (JSON) and adds all new movies with _sync_new_movies Returns bool ''' movies = None logging.info('Syncing Steven Lu\'s popular movie feed.') try: movies = json.loads( Url.open( 'https://s3.amazonaws.com/popular-movies/movies.json').text) except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Popular feed request failed.', exc_info=True) return False if movies: logging.info('Found {} movies in popular movies.'.format(len(movies))) _sync_new_movies(movies) logging.info('Popular movies sync complete.') return True else: return False
def trailer(title_date): ''' Gets trailer embed ID from Youtube. title_date (str): movie title and date ('Movie Title 2016') Attempts to connect 3 times in case Youtube is down or not responding Can fail if no response is received. Returns str ''' logging.info('Getting trailer url from YouTube for {}'.format(title_date)) search_term = Url.normalize((title_date + '+trailer')) url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&q={}&maxResults=1&key={}'.format(search_term, _k(b'youtube')) tries = 0 while tries < 3: try: results = json.loads(Url.open(url).text) return results['items'][0]['id']['videoId'] except (SystemExit, KeyboardInterrupt): raise except Exception as e: if tries == 2: logging.error('Unable to get trailer from Youtube.', exc_info=True) tries += 1 return ''
def get_libraries(url, token): ''' Gets list of libraries in server url url: url and port of plex server token: plex auth token for server Returns list of dicts ''' headers = {'X-Plex-Token': token} url = '{}/library/sections'.format(url) try: r = Url.open(url, headers=headers) xml = r.text except Exception as e: #noqa logging.error('Unable to contact Plex server.', exc_info=True) return {'response', False, 'error', 'Unable to contact Plex server.'} libs = [] try: root = ET.fromstring(xml) for directory in root.findall('Directory'): lib = directory.attrib lib['path'] = directory.find('Location').attrib['path'] libs.append(lib) except Exception as e: #noqa logging.error('Unable to parse Plex xml.', exc_info=True) return {'response', False, 'error', 'Unable to parse Plex xml.'} return {'response': True, 'libraries': libs}
def _get_caps(self, url, apikey): ''' Gets caps for indexer url url (str): url of torznab indexer apikey (str): api key for indexer Gets indexer caps from CAPS table Returns list of caps ''' logging.info('Getting caps for {}'.format(url)) url = '{}api?apikey={}&t=caps'.format(url, apikey) try: xml = Url.open(url).text root = ET.fromstring(xml) caps = root[0].find('movie-search').attrib['supportedParams'] core.sql.write('CAPS', {'url': url, 'caps': caps}) except Exception as e: return None return caps.split(',')
def _search_imdbid(self, imdbid): ''' Search TMDB for imdb id # imdbid (str): imdb id # Returns list of results ''' logging.info('Searching TheMovieDB for IMDB ID: {}.'.format(imdbid)) url = 'https://api.themoviedb.org/3/find/{}?language=en-US&external_source=imdb_id&append_to_response=alternative_titles,external_ids,release_dates'.format(imdbid) logging.info('Searching TMDB {}'.format(url)) url = url + '&api_key={}'.format(_k(b'tmdb')) self.use_token() try: results = json.loads(Url.open(url).text) if results['movie_results'] == []: return [] else: response = results['movie_results'][0] response['imdbid'] = imdbid return [response] except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Error searching for IMDBID on TMDB.', exc_info=True) return []
def _login(url, username, password): logging.info('Attempting to log in to QBittorrent.') post_data = {'username': username, 'password': password} url = '{}login'.format(url) try: response = Url.open(url, post_data=post_data) QBittorrent.cookie = response.headers.get('Set-Cookie') if response.text == 'Ok.': logging.info('Successfully connected to QBittorrent.') return True elif response.text == 'Fails.': logging.warning('Incorrect usename or password QBittorrent.') return 'Incorrect usename or password' else: logging.warning(response.text) return response.text except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('qbittorrent test_connection', exc_info=True) return '{}.'.format(str(e))
def cancel_download(downloadid): global command_id logging.info('Cancelling download {} in Deluge Web UI'.format(downloadid)) conf = core.CONFIG['Downloader']['Torrent']['DelugeWeb'] host = conf['host'] port = conf['port'] url = '{}:{}/json'.format(host, port) if cookie is None: _login(url, conf['pass']) command = { 'method': 'core.remove_torrent', 'params': [downloadid.lower(), True], 'id': command_id } command_id += 1 post_data = json.dumps(command) headers['cookie'] = cookie try: response = Url.open(url, post_data=post_data, headers=headers) response = json.loads(response.text) return response['result'] except Exception as e: logging.error('delugeweb get_download_dir', exc_info=True) return {'response': False, 'error': str(e)}
def _login(url, password): global command_id global cookie logging.info('Logging in to Deluge Web UI.') command = {'method': 'auth.login', 'params': [password], 'id': command_id} command_id += 1 post_data = json.dumps(command) try: response = Url.open(url, post_data=post_data, headers=headers) cookie = response.headers.get('Set-Cookie') if cookie is None: return 'Incorrect password.' body = json.loads(response.text) if body['error'] is None: return True else: return response.msg except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('DelugeWeb test_connection', exc_info=True) return '{}.'.format(e)
def get_movies(server, libid, token): headers = {'X-Plex-Token': token} url = '{}/library/sections/{}/all'.format(server, libid) try: r = Url.open(url, headers=headers) xml = r.text except Exception as e: #noqa logging.error('Unable to contact Plex server.', exc_info=True) return {'response', False, 'error', 'Unable to contact Plex server.'} movies = [] try: root = ET.fromstring(xml) for i in root: movie = {} movies.append(movie) except Exception as e: #noqa logging.error('Unable to parse Plex xml.', exc_info=True) return {'response', False, 'error', 'Unable to parse Plex xml.'} return {'response': True, 'movies': movies}
def _search_rss(self, movies): ''' Search rss feed for applicable releases movies: list of dicts of movies If found, marks movie in database as predb:'found' and status:'Wanted' Does not return ''' db_update = {'predb': 'found', 'status': 'Wanted', 'predb_backlog': 1} logging.info('Checking predb rss for {}'.format(', '.join( i['title'] for i in movies))) try: feed = Url.open('https://predb.me/?cats=movies&rss=1').text items = self._parse_predb_xml(feed) for movie in movies: title = movie['title'] year = str(movie['year']) imdbid = movie['imdbid'] if self._fuzzy_match(items, title, year): logging.info('{} {} found on predb.me RSS.'.format( title, year)) core.sql.update_multiple_values('MOVIES', db_update, imdbid=imdbid) continue except Exception as e: logging.error('Unable to read predb rss.', exc_info=True)
def test_connection(indexer, apikey): ''' Tests connection to NewzNab API ''' if not indexer: return {'response': False, 'error': 'Indexer field is blank.'} while indexer[-1] == '/': indexer = indexer[:-1] response = {} logging.info(u'Testing connection to {}.'.format(indexer)) url = u'{}/api?apikey={}&t=search&id=tt0063350'.format(indexer, apikey) request = Url.request(url) try: response = Url.open(request) except (SystemExit, KeyboardInterrupt): raise except Exception, e: # noqa logging.error(u'Newz/TorzNab connection check.', exc_info=True) return { 'response': False, 'message': 'No connection could be made because the target machine actively refused it.' }
def _search_tmdbid(self, tmdbid): ''' Search TMDB for tmdbid tmdbid (str): themoviedatabase id # Returns list of results ''' logging.info('Searching TheMovieDB for TMDB ID: {}.'.format(tmdbid)) url = 'https://api.themoviedb.org/3/movie/{}?language=en-US&append_to_response=alternative_titles,external_ids,release_dates'.format(tmdbid) logging.info('Searching TMDB {}'.format(url)) url = url + '&api_key={}'.format(_k(b'tmdb')) self.use_token() try: response = Url.open(url) if response.status_code != 200: logging.warning('Unable to reach TMDB, error {}'.format(response.status_code)) return [] else: results = json.loads(response.text) results['imdbid'] = results.pop('imdb_id') return [results] except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Error searching for TMDBID on TMDB.', exc_info=True) return []
def get_category(self, cat, tmdbid=None): ''' get popular movies from TMDB cat (str): category of movies to retrieve tmdbid (str): tmdb id# to use for suggestions or similar tmdbid required for section=similar, otherwise can be ignored. Returns list of results ''' if cat == 'similar': if tmdbid is None: return [] url = 'https://api.themoviedb.org/3/movie/{}/similar?&language=en-US&page=1'.format(tmdbid) else: url = 'https://api.themoviedb.org/3/movie/{}?api_key=APIKEY&language=en-US&page=1'.format(cat) url += '&api_key={}'.format(_k(b'tmdb')) self.use_token() try: results = json.loads(Url.open(url).text) if results.get('success') == 'false': logging.warning('Bad request to TheMovieDatabase.') return [] else: return results['results'] except Exception as e: logging.error('Unable to read {} movies from TheMovieDB.'.format(cat), exc_info=True) return []
def test_connection(data): ''' Tests connectivity to Put.IO data: dict of Put.IO server information Return True on success or str error message on failure ''' logging.info('Testing connection to Put.IO.') if not core.CONFIG['Downloader']['Torrent']['PutIO']['oauthtoken']: logging.debug( 'Cannot execute Put.IO method -- no OAuth Token in config.') return 'No Application Token. Create Application token and enter in settings.' response = Url.open( url_base.format( 'account/info', core.CONFIG['Downloader']['Torrent']['PutIO']['oauthtoken'])) if response.status_code != 200: return '{}: {}'.format(response.status_code, response.reason) response = json.loads(response.text) if response['status'] != 'OK': logging.debug('Cannot connect to Put.IO: {}'.format( response['error_message'])) return response['error_message'] else: return True
def test_connection(data): ''' Tests connectivity to Sabnzbd :para data: dict of Sab server information Tests if we can get Sab's stats using server info in 'data' Return True on success or str error message on failure ''' logging.info('Testing connection to SABnzbd.') host = data['host'] port = data['port'] api = data['api'] url = 'http://{}:{}/sabnzbd/api?apikey={}&mode=server_stats'.format( host, port, api) try: response = Url.open(url).text if 'error' in response: return response return True except (SystemExit, KeyboardInterrupt): raise except Exception as e: logging.error('Sabnzbd connection test failed.', exc_info=True) return '{}.'.format(e)
def _get_caps(self, url_base, apikey): ''' Gets caps for indexer url url_base (str): url of torznab indexer apikey (str): api key for indexer Gets indexer caps from CAPS table Returns list of caps ''' logging.info('Getting caps for {}'.format(url_base)) url = '{}api?apikey={}&t=caps'.format(url_base, apikey) try: xml = Url.open(url).text caps = gdata.data(fromstring( xml))['caps']['searching']['movie-search']['supportedParams'] core.sql.write('CAPS', {'url': url_base, 'caps': caps}) except Exception as e: logging.warning('', exc_info=True) return None return caps.split(',')