def __init__(self, name): self.name = name self.anime_only = False self.bt_cache_urls = [ #'http://torcache.net/torrent/{torrent_hash}.torrent', 'http://torrentproject.se/torrent/{torrent_hash}.torrent', 'http://thetorrent.org/torrent/{torrent_hash}.torrent', 'http://btdig.com/torrent/{torrent_hash}.torrent', # 'http://torrage.com/torrent/{torrent_hash}.torrent', 'http://itorrents.org/torrent/{torrent_hash}.torrent', ] self.cache = TVCache(self) self.enable_backlog = False self.enable_daily = False self.enabled = False self.headers = {'User-Agent': UA_POOL.random} self.proper_strings = ['PROPER|REPACK|REAL'] self.provider_type = None self.public = False self.search_fallback = False self.search_mode = None self.session = make_session() self.show = None self.supports_absolute_numbering = False self.supports_backlog = True self.url = '' self.urls = {} # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider self.enable_cookies = False self.cookies = '' self.rss_cookies = '' shuffle(self.bt_cache_urls)
def __init__(self, show_id, title, indexer, indexer_id, cache_subfolder='recommended', rating=None, votes=None, image_href=None, image_src=None): """ Create a show recommendation :param show_id: as provided by the list provider :param title: of the show as displayed in the recommended show page :param indexer: used to map the show to :param indexer_id: a mapped indexer_id for indexer :param cache_subfolder: to store images :param rating: of the show in percent :param votes: number of votes :param image_href: the href when clicked on the show image (poster) :param image_src: the url to the "cached" image (poster) """ self.show_id = show_id self.title = title self.indexer = indexer self.indexer_id = indexer_id self.cache_subfolder = cache_subfolder self.rating = rating self.votes = votes self.image_href = image_href self.image_src = image_src # Check if the show is currently already in the db self.show_in_list = self.indexer_id in {show.indexerid for show in sickbeard.showList if show.indexerid} self.session = helpers.make_session()
def __init__(self, name): self.name = name self.anime_only = False self.bt_cache_urls = [ 'http://torcache.net/torrent/{torrent_hash}.torrent', 'http://thetorrent.org/torrent/{torrent_hash}.torrent', 'http://btdig.com/torrent/{torrent_hash}.torrent', # 'http://torrage.com/torrent/{torrent_hash}.torrent', # 'http://itorrents.org/torrent/{torrent_hash}.torrent', ] self.cache = TVCache(self) self.enable_backlog = False self.enable_daily = False self.enabled = False self.headers = {'User-Agent': UA_POOL.random} self.proper_strings = ['PROPER|REPACK|REAL'] self.provider_type = None self.public = False self.search_fallback = False self.search_mode = None self.session = make_session() self.show = None self.supports_absolute_numbering = False self.supports_backlog = True self.url = '' self.urls = {} shuffle(self.bt_cache_urls)
def index(self, *args, **kwargs): # noinspection PyBroadException try: changes = helpers.getURL( 'http://sickchill.github.io/sickchill-news/CHANGES.md', session=helpers.make_session(), returns='text') except Exception: logger.log('Could not load changes from repo, giving a link!', logger.DEBUG) changes = _( 'Could not load changes from the repo. [Click here for CHANGES.md]({changes_url})' ).format(changes_url= 'http://sickchill.github.io/sickchill-news/CHANGES.md') t = PageTemplate(rh=self, filename="markdown.mako") data = markdown2.markdown(changes if changes else _( "The was a problem connecting to github, please refresh and try again" ), extras=['header-ids']) return t.render(title=_("Changelog"), header=_("Changelog"), topmenu="system", data=data, controller="changes", action="index")
def update_network_dict(): """Update timezone information from SR repositories""" url = 'http://sickrage.github.io/sb_network_timezones/network_timezones.txt' data = helpers.getURL(url, session=helpers.make_session(), returns='text') if not data: logger.log( u'Updating network timezones failed, this can happen from time to time. URL: {0}' .format(url), logger.WARNING) load_network_dict() return d = {} try: for line in data.splitlines(): (key, val) = line.strip().rsplit(u':', 1) if key and val: d[key] = val except (IOError, OSError): pass if not d: logger.log( u'Parsing network timezones failed, not going to touch the db', logger.WARNING) load_network_dict() return cache_db_con = db.DBConnection('cache.db') network_list = dict( cache_db_con.select('SELECT * FROM network_timezones;')) queries = [] for network, timezone in d.iteritems(): existing = network in network_list if not existing: queries.append([ 'INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone] ]) elif network_list[network] != timezone: queries.append([ 'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;', [timezone, network] ]) if existing: del network_list[network] if network_list: purged = [x for x in network_list] queries.append([ 'DELETE FROM network_timezones WHERE network_name IN ({0});'. format(','.join(['?'] * len(purged))), purged ]) if queries: cache_db_con.mass_action(queries) load_network_dict()
def __init__(self): self.headers = { 'X-Plex-Device-Name': 'SickRage', 'X-Plex-Product': 'SickRage Notifier', 'X-Plex-Client-Identifier': sickbeard.common.USER_AGENT, 'X-Plex-Version': '2016.02.10' } self.session = make_session()
def __init__(self): self.branch = sickbeard.BRANCH if sickbeard.BRANCH == '': self.branch = self._find_installed_branch() self._cur_commit_hash = sickbeard.CUR_COMMIT_HASH self._newest_commit_hash = None self._num_commits_behind = 0 self.session = helpers.make_session()
def __init__(self): self.updater = None self.install_type = None self.amActive = False if sickbeard.gh: self.install_type = self.find_install_type() if self.install_type == 'git': self.updater = GitUpdateManager() elif self.install_type == 'source': self.updater = SourceUpdateManager() self.session = helpers.make_session()
def __init__(self): """Get a list of most popular TV series from imdb.""" # Use akas.imdb.com, just like the imdb lib. self.url = 'http://akas.imdb.com/search/title' self.params = { 'at': 0, 'sort': 'moviemeter', 'title_type': 'tv_series', 'year': '%s,%s' % (date.today().year - 1, date.today().year + 1), } self.session = helpers.make_session()
def __init__(self): """Gets a list of most popular TV series from imdb""" # Use akas.imdb.com, just like the imdb lib. self.url = 'http://akas.imdb.com/search/title' self.params = { 'at': 0, 'sort': 'moviemeter', 'title_type': 'tv_series', 'year': '{0},{1}'.format(date.today().year - 1, date.today().year + 1) } self.session = helpers.make_session()
def __init__(self, name, host=None, username=None, password=None): self.name = name self.username = sickbeard.TORRENT_USERNAME if username is None else username self.password = sickbeard.TORRENT_PASSWORD if password is None else password self.host = sickbeard.TORRENT_HOST if host is None else host self.rpcurl = sickbeard.TORRENT_RPCURL self.url = None self.response = None self.auth = None self.last_time = time.time() self.session = helpers.make_session() self.session.auth = (self.username, self.password) self.session.cookies = cookielib.CookieJar()
def __init__(self): """Gets a list of most popular TV series from imdb""" self.base_url = 'https://imdb.com' self.url = urljoin(self.base_url, 'search/title') self.params = { 'at': 0, 'sort': 'moviemeter', 'title_type': 'tv_series', 'year': '{0},{1}'.format(date.today().year - 1, date.today().year + 1) } self.session = helpers.make_session()
def update_network_dict(): """Update timezone information from SR repositories""" url = 'http://sickrage.github.io/sb_network_timezones/network_timezones.txt' data = helpers.getURL(url, session=helpers.make_session(), returns='text') if not data: logger.log(u'Updating network timezones failed, this can happen from time to time. URL: {0}'.format(url), logger.WARNING) load_network_dict() return d = {} try: for line in data.splitlines(): (key, val) = line.strip().rsplit(u':', 1) if key and val: d[key] = val except (IOError, OSError): pass if not d: logger.log(u'Parsing network timezones failed, not going to touch the db', logger.WARNING) load_network_dict() return cache_db_con = db.DBConnection('cache.db') network_list = dict(cache_db_con.select('SELECT * FROM network_timezones;')) queries = [] for network, timezone in d.iteritems(): existing = network in network_list if not existing: queries.append(['INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone]]) elif network_list[network] != timezone: queries.append(['UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;', [timezone, network]]) if existing: del network_list[network] if network_list: purged = [x for x in network_list] queries.append(['DELETE FROM network_timezones WHERE network_name IN ({0});'.format(','.join(['?'] * len(purged))), purged]) if queries: cache_db_con.mass_action(queries) load_network_dict()
def __init__(self): self.lock = threading.Lock() self.amActive = False self.apikey = json.dumps({ 'apikey': sickbeard.indexerApi(INDEXER_TVDB).api_params['apikey'] }) self.base = 'https://api.thetvdb.com' self.login = self.base + '/login' self.update = self.base + '/updated/query?fromTime=' self.session = helpers.make_session() # set the correct headers for the requests calls to tvdb self.session.headers.update({ 'Accept': 'application/json', 'Content-Type': 'application/json' }) self.timeout = 12.1 #General timeout for the requests calls to prevent a hang if tvdb does not respond
def test_search(): # pylint: disable=too-many-locals """ Test searching """ url = 'http://kickass.to/' search_url = 'http://kickass.to/usearch/American%20Dad%21%20S08%20-S08E%20category%3Atv/?field=seeders&sorder=desc' html = getURL(search_url, session=make_session(), returns='text') if not html: return soup = BeautifulSoup(html, 'html5lib') torrent_table = soup.find('table', attrs={'class': 'data'}) torrent_rows = torrent_table.find_all('tr') if torrent_table else [] # cleanup memory soup.clear(True) # Continue only if one Release is found if len(torrent_rows) < 2: print "The data returned does not contain any torrents" return for row in torrent_rows[1:]: try: link = urlparse.urljoin(url, (row.find('div', { 'class': 'torrentname' }).find_all('a')[1])['href']) _id = row.get('id')[-7:] title = (row.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \ or (row.find('div', {'class': 'torrentname'}).find_all('a')[2]).text url = row.find('a', 'imagnet')['href'] verified = True if row.find('a', 'iverify') else False trusted = True if row.find('img', {'alt': 'verified'}) else False seeders = int(row.find_all('td')[-2].text) leechers = int(row.find_all('td')[-1].text) _ = link, _id, verified, trusted, seeders, leechers except (AttributeError, TypeError): continue print title
def update_network_dict(): """Update timezone information from Medusa repositories""" url = 'https://cdn.pymedusa.com/sb_network_timezones/network_timezones.txt' url_data = helpers.getURL(url, session=helpers.make_session(), returns='text') if not url_data: logger.log(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url, logger.WARNING) load_network_dict() return d = {} try: for line in url_data.splitlines(): (key, val) = line.strip().rsplit(u':', 1) if key is None or val is None: continue d[key] = val except (IOError, OSError): pass cache_db_con = db.DBConnection('cache.db') network_list = dict(cache_db_con.select('SELECT * FROM network_timezones;')) queries = [] for network, timezone in d.iteritems(): existing = network in network_list if not existing: queries.append(['INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone]]) elif network_list[network] != timezone: queries.append(['UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;', [timezone, network]]) if existing: del network_list[network] if network_list: purged = [x for x in network_list] queries.append(['DELETE FROM network_timezones WHERE network_name IN (%s);' % ','.join(['?'] * len(purged)), purged]) if queries: cache_db_con.mass_action(queries) load_network_dict()
def __init__(self, name, host=None, username=None, password=None): """ Initializes the client :name: str:name of the client :host: str:url or ip of the client :username: str: username for authenticating with the client :password: str: password for authentication with the client """ self.name = name self.username = username or sickbeard.TORRENT_USERNAME self.password = password or sickbeard.TORRENT_PASSWORD self.host = host or sickbeard.TORRENT_HOST self.url = None self.response = None self.auth = None self.last_time = time.time() self.session = helpers.make_session() self.session.auth = (self.username, self.password)
def __init__(self, name, host=None, username=None, password=None): """ Initializes the client :name: str:name of the client :host: str:url or ip of the client :username: str: username for authenticating with the client :password: str: password for authentication with the client """ self.name = name self.username = sickbeard.TORRENT_USERNAME if not username else username self.password = sickbeard.TORRENT_PASSWORD if not password else password self.host = sickbeard.TORRENT_HOST if not host else host self.url = None self.response = None self.auth = None self.last_time = time.time() self.session = helpers.make_session() self.session.auth = (self.username, self.password)
def __init__(self, name, host=None, username=None, password=None): """ Initializes the client :name: str:name of the client :host: str:url or ip of the client :username: str: username for authenticating with the client :password: str: password for authentication with the client """ self.name = name self.username = sickbeard.TORRENT_USERNAME if not username else username self.password = sickbeard.TORRENT_PASSWORD if not password else password self.host = sickbeard.TORRENT_HOST if not host else host self.rpcurl = sickbeard.TORRENT_RPCURL self.url = None self.response = None self.auth = None self.last_time = time.time() self.session = helpers.make_session() self.session.auth = (self.username, self.password)
def test_search(): # pylint: disable=too-many-locals """ Test searching """ url = 'http://kickass.to/' search_url = 'http://kickass.to/usearch/American%20Dad%21%20S08%20-S08E%20category%3Atv/?field=seeders&sorder=desc' html = getURL(search_url, session=make_session(), returns='text') if not html: return soup = BeautifulSoup(html, 'html5lib') torrent_table = soup.find('table', attrs={'class': 'data'}) torrent_rows = torrent_table('tr') if torrent_table else [] # cleanup memory soup.clear(True) # Continue only if one Release is found if len(torrent_rows) < 2: print("The data returned does not contain any torrents") return for row in torrent_rows[1:]: try: link = urllib.parse.urljoin(url, (row.find('div', {'class': 'torrentname'})('a')[1])['href']) _id = row.get('id')[-7:] title = (row.find('div', {'class': 'torrentname'})('a')[1]).text \ or (row.find('div', {'class': 'torrentname'})('a')[2]).text url = row.find('a', 'imagnet')['href'] verified = True if row.find('a', 'iverify') else False trusted = True if row.find('img', {'alt': 'verified'}) else False seeders = int(row('td')[-2].text) leechers = int(row('td')[-1].text) _ = link, _id, verified, trusted, seeders, leechers except (AttributeError, TypeError): continue print(title)
def __init__(self, name): self.name = name self.anime_only = False self.bt_cache_urls = [ #'http://torcache.net/torrent/{torrent_hash}.torrent', 'http://torrentproject.se/torrent/{torrent_hash}.torrent', 'http://thetorrent.org/torrent/{torrent_hash}.torrent', 'http://btdig.com/torrent/{torrent_hash}.torrent', # 'http://torrage.com/torrent/{torrent_hash}.torrent', ('https://t.torrage.info/download?h={torrent_hash}', 'https://torrage.info/torrent.php?h={torrent_hash}'), "https://t.torrage.info/download?h=463E408429535139A0BBB5DD676DB10D5963BF05" 'http://itorrents.org/torrent/{torrent_hash}.torrent', ] self.cache = TVCache(self) self.enable_backlog = False self.enable_daily = False self.enabled = False self.headers = {'User-Agent': UA_POOL.random} self.proper_strings = ['PROPER|REPACK|REAL'] self.provider_type = None self.public = False self.search_fallback = False self.search_mode = None self.session = make_session() self.show = None self.supports_absolute_numbering = False self.supports_backlog = True self.url = '' self.urls = {} # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider self.enable_cookies = False self.cookies = '' self.rss_cookies = '' self.ability_status = self.PROVIDER_OK shuffle(self.bt_cache_urls)
def change_unrar_tool(unrar_tool, alt_unrar_tool): # Check for failed unrar attempt, and remove it # Must be done before unrar is ever called or the self-extractor opens and locks startup bad_unrar = os.path.join(sickbeard.DATA_DIR, 'unrar.exe') if os.path.exists(bad_unrar) and os.path.getsize(bad_unrar) == 447440: try: os.remove(bad_unrar) except OSError as e: logger.log("Unable to delete bad unrar.exe file {0}: {1}. You should delete it manually".format(bad_unrar, e.strerror), logger.WARNING) try: rarfile.custom_check(unrar_tool) except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError): # Let's just return right now if the defaults work try: # noinspection PyProtectedMember test = rarfile._check_unrar_tool() if test: # These must always be set to something before returning sickbeard.UNRAR_TOOL = rarfile.UNRAR_TOOL sickbeard.ALT_UNRAR_TOOL = rarfile.ALT_TOOL return True except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError): pass if platform.system() == 'Windows': # Look for WinRAR installations found = False winrar_path = 'WinRAR\\UnRAR.exe' # Make a set of unique paths to check from existing environment variables check_locations = { os.path.join(location, winrar_path) for location in ( os.environ.get("ProgramW6432"), os.environ.get("ProgramFiles(x86)"), os.environ.get("ProgramFiles"), re.sub(r'\s?\(x86\)', '', os.environ["ProgramFiles"]) ) if location } check_locations.add(os.path.join(sickbeard.PROG_DIR, 'unrar\\unrar.exe')) for check in check_locations: if ek(os.path.isfile, check): # Can use it? try: rarfile.custom_check(check) unrar_tool = check found = True break except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError): found = False # Download if not found: logger.log('Trying to download unrar.exe and set the path') unrar_store = ek(os.path.join, sickbeard.PROG_DIR, 'unrar') # ./unrar (folder) unrar_zip = ek(os.path.join, sickbeard.PROG_DIR, 'unrar_win.zip') # file download if (helpers.download_file( "http://sickrage.github.io/unrar/unrar_win.zip", filename=unrar_zip, session=helpers.make_session() ) and helpers.extractZip(archive=unrar_zip, targetDir=unrar_store)): try: ek(os.remove, unrar_zip) except OSError as e: logger.log("Unable to delete downloaded file {0}: {1}. You may delete it manually".format(unrar_zip, e.strerror)) check = os.path.join(unrar_store, "unrar.exe") try: rarfile.custom_check(check) unrar_tool = check logger.log('Successfully downloaded unrar.exe and set as unrar tool', ) except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError): logger.log('Sorry, unrar was not set up correctly. Try installing WinRAR and make sure it is on the system PATH') else: logger.log('Unable to download unrar.exe') # These must always be set to something before returning sickbeard.UNRAR_TOOL = rarfile.UNRAR_TOOL = rarfile.ORIG_UNRAR_TOOL = unrar_tool sickbeard.ALT_UNRAR_TOOL = rarfile.ALT_TOOL = alt_unrar_tool try: # noinspection PyProtectedMember test = rarfile._check_unrar_tool() except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError): if sickbeard.UNPACK == 1: logger.log('Disabling UNPACK setting because no unrar is installed.') sickbeard.UNPACK = 0 test = False return test
def split_result(obj): """ Split obj into separate episodes. :param obj: to search for results :return: a list of episode objects or an empty list """ url_data = helpers.getURL(obj.url, session=helpers.make_session(), returns='content') if url_data is None: logger.log( "Unable to load url " + obj.url + ", can't download season NZB", logger.ERROR) return [] # parse the season ep name try: parsed_obj = NameParser(False, showObj=obj.show).parse(obj.name) except (InvalidNameException, InvalidShowException) as error: logger.log("{0}".format(error), logger.DEBUG) return [] # bust it up season = 1 if parsed_obj.season_number is None else parsed_obj.season_number separate_nzbs, xmlns = get_season_nzbs(obj.name, url_data, season) result_list = [] # TODO: Re-evaluate this whole section # If we have valid results and hit an exception, we ignore the results found so far. # Maybe we should return the results found or possibly continue with the next iteration of the loop # Also maybe turn this into a function and generate the results_list with a list comprehension instead for new_nzb in separate_nzbs: logger.log("Split out " + new_nzb + " from " + obj.name, logger.DEBUG) # pylint: disable=no-member # parse the name try: parsed_obj = NameParser(False, showObj=obj.show).parse(new_nzb) except (InvalidNameException, InvalidShowException) as error: logger.log("{0}".format(error), logger.DEBUG) return [] # make sure the result is sane if (parsed_obj.season_number != season) or (parsed_obj.season_number is None and season != 1): # pylint: disable=no-member logger.log( "Found " + new_nzb + " inside " + obj.name + " but it doesn't seem to belong to the same season, ignoring it", logger.WARNING) continue elif not parsed_obj.episode_numbers: # pylint: disable=no-member logger.log( "Found " + new_nzb + " inside " + obj.name + " but it doesn't seem to be a valid episode NZB, ignoring it", logger.WARNING) continue want_ep = True for ep_num in parsed_obj.episode_numbers: if not obj.extraInfo[0].wantEpisode(season, ep_num, obj.quality): logger.log("Ignoring result: " + new_nzb, logger.DEBUG) want_ep = False break if not want_ep: continue # get all the associated episode objects ep_obj_list = [ obj.extraInfo[0].getEpisode(season, ep) for ep in parsed_obj.episode_numbers ] # make a result cur_obj = classes.NZBDataSearchResult(ep_obj_list) cur_obj.name = new_nzb cur_obj.provider = obj.provider cur_obj.quality = obj.quality cur_obj.extraInfo = [create_nzb_string(separate_nzbs[new_nzb], xmlns)] result_list.append(cur_obj) return result_list
# (at your option) any later version. # # SickChill is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickChill. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function, unicode_literals import sickbeard from sickbeard import helpers, logger meta_session = helpers.make_session() def getShowImage(url, imgNum=None): if url is None: return None # if they provided a fanart number try to use it instead if imgNum is not None: tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg" else: tempURL = url logger.log("Fetching image from " + tempURL, logger.DEBUG) image_data = helpers.getURL(tempURL,
def index(self): try: changes = helpers.getURL('https://cdn.pymedusa.com/sickrage-news/CHANGES.md', session=helpers.make_session(), returns='text') except Exception: logger.log('Could not load changes from repo, giving a link!', logger.DEBUG) changes = 'Could not load changes from the repo. [Click here for CHANGES.md](https://cdn.pymedusa.com/sickrage-news/CHANGES.md)' t = PageTemplate(rh=self, filename='markdown.mako') data = markdown2.markdown(changes if changes else 'The was a problem connecting to github, please refresh and try again', extras=['header-ids']) return t.render(title='Changelog', header='Changelog', topmenu='system', data=data, controller='changes', action='index')
def __init__(self): self.lock = threading.Lock() self.amActive = False self.session = helpers.make_session()
class Notifier(object): session = helpers.make_session() TEST_EVENT = 'Test' def __init__(self): pass def test_notify(self, pushbullet_api): logger.log(u"Sending a test Pushbullet notification.", logger.DEBUG) return self._sendPushbullet( pushbullet_api, event=self.TEST_EVENT, message="Testing Pushbullet settings from SickRage") def get_devices(self, pushbullet_api): logger.log( u"Testing Pushbullet authentication and retrieving the device list.", logger.DEBUG) return self._sendPushbullet(pushbullet_api) def notify_snatch(self, ep_name): if sickbeard.PUSHBULLET_NOTIFY_ONSNATCH: self._sendPushbullet(pushbullet_api=None, event=notifyStrings[NOTIFY_SNATCH] + " : " + ep_name, message=ep_name) def notify_download(self, ep_name): if sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD: self._sendPushbullet(pushbullet_api=None, event=notifyStrings[NOTIFY_DOWNLOAD] + " : " + ep_name, message=ep_name) def notify_subtitle_download(self, ep_name, lang): if sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD: self._sendPushbullet( pushbullet_api=None, event=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD] + " : " + ep_name + " : " + lang, message=ep_name + ": " + lang) def notify_git_update(self, new_version="??"): if sickbeard.USE_PUSHBULLET: self._sendPushbullet( pushbullet_api=None, event=notifyStrings[NOTIFY_GIT_UPDATE], message=notifyStrings[NOTIFY_GIT_UPDATE_TEXT] + new_version) def notify_login(self, ipaddress=""): if sickbeard.USE_PUSHBULLET: self._sendPushbullet( pushbullet_api=None, event=notifyStrings[NOTIFY_LOGIN], message=notifyStrings[NOTIFY_LOGIN_TEXT].format(ipaddress)) def _sendPushbullet(self, pushbullet_api=None, pushbullet_device=None, event=None, message=None): if not (sickbeard.USE_PUSHBULLET or event == 'Test' or event is None): return False pushbullet_api = pushbullet_api or sickbeard.PUSHBULLET_API pushbullet_device = pushbullet_device or sickbeard.PUSHBULLET_DEVICE logger.log(u"Pushbullet event: %r" % event, logger.DEBUG) logger.log(u"Pushbullet message: %r" % message, logger.DEBUG) logger.log(u"Pushbullet api: %r" % pushbullet_api, logger.DEBUG) logger.log(u"Pushbullet devices: %r" % pushbullet_device, logger.DEBUG) logger.log( u"Pushbullet notification type: %r" % ('note' if event else 'None'), logger.DEBUG) url = 'https://api.pushbullet.com/v2/%s' % ( 'devices', 'pushes')[event is not None] data = None if not event else json.dumps( { 'title': event.encode('utf-8'), 'body': message.encode('utf-8'), 'device_iden': pushbullet_device.encode('utf-8'), 'type': 'note' }) method = 'GET' if data is None else 'POST' headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % pushbullet_api } try: response = self.session.request(method, url, data=data, headers=headers) except Exception: logger.log( u'Pushbullet authorization failed with exception: %r' % traceback.format_exc(), logger.DEBUG) return False if response.status_code == 410: logger.log(u'Pushbullet authorization failed', logger.DEBUG) return False if response.status_code != 200: logger.log( u'Pushbullet call failed with error code %r' % response.status_code, logger.DEBUG) return False logger.log(u"Pushbullet response: %r" % response.text, logger.DEBUG) if not response.text: logger.log(u"Pushbullet notification failed.", logger.ERROR) return False logger.log(u"Pushbullet notifications sent.", logger.DEBUG) return (True, response.text)[event is self.TEST_EVENT or event is None]
name=show.name, tvdbid=show.indexerid, autoCorrectName=True) except Exception: continue else: if anime.name and anime.name != show.name: anidb_exception_dict[show.indexerid] = [{ anime.name: -1 }] setLastRefresh('anidb') return anidb_exception_dict xem_session = helpers.make_session() def _xem_exceptions_fetcher(): if shouldRefresh('xem'): for indexer in sickbeard.indexerApi().indexers: logger.log( "Checking for XEM scene exception updates for {0}".format( sickbeard.indexerApi(indexer).name)) url = "http://thexem.de/map/allNames?origin={0}&seasonNumbers=1".format( sickbeard.indexerApi(indexer).config['xem_origin']) parsedJSON = helpers.getURL(url, session=xem_session, timeout=90,
def __init__(self): self.session = helpers.make_session() self.url = 'https://api.pushbullet.com/v2/'
# SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from requests.compat import urljoin import datetime import sickbeard from sickbeard import helpers, logger session = helpers.make_session() def sendNZB(nzb): # pylint:disable=too-many-return-statements, too-many-branches, too-many-statements ''' Sends an NZB to SABnzbd via the API. :param nzb: The NZBSearchResult object to send to SAB ''' category = sickbeard.SAB_CATEGORY if nzb.show.is_anime: category = sickbeard.SAB_CATEGORY_ANIME # if it aired more than 7 days ago, override with the backlog category IDs for curEp in nzb.episodes:
def split_result(obj): """ Split obj into separate episodes. :param obj: to search for results :return: a list of episode objects or an empty list """ url_data = helpers.getURL(obj.url, session=helpers.make_session(), returns='content') if url_data is None: logger.log(u"Unable to load url " + obj.url + ", can't download season NZB", logger.ERROR) return [] # parse the season ep name try: parsed_obj = NameParser(False, showObj=obj.show).parse(obj.name) except (InvalidNameException, InvalidShowException) as error: logger.log(u"{}".format(error), logger.DEBUG) return [] # bust it up season = 1 if parsed_obj.season_number is None else parsed_obj.season_number separate_nzbs, xmlns = get_season_nzbs(obj.name, url_data, season) result_list = [] # TODO: Re-evaluate this whole section # If we have valid results and hit an exception, we ignore the results found so far. # Maybe we should return the results found or possibly continue with the next iteration of the loop # Also maybe turn this into a function and generate the results_list with a list comprehension instead for new_nzb in separate_nzbs: logger.log(u"Split out " + new_nzb + " from " + obj.name, logger.DEBUG) # pylint: disable=no-member # parse the name try: parsed_obj = NameParser(False, showObj=obj.show).parse(new_nzb) except (InvalidNameException, InvalidShowException) as error: logger.log(u"{}".format(error), logger.DEBUG) return [] # make sure the result is sane if (parsed_obj.season_number != season) or (parsed_obj.season_number is None and season != 1): # pylint: disable=no-member logger.log(u"Found " + new_nzb + " inside " + obj.name + " but it doesn't seem to belong to the same season, ignoring it", logger.WARNING) continue elif len(parsed_obj.episode_numbers) == 0: # pylint: disable=no-member logger.log(u"Found " + new_nzb + " inside " + obj.name + " but it doesn't seem to be a valid episode NZB, ignoring it", logger.WARNING) continue want_ep = True for ep_num in parsed_obj.episode_numbers: if not obj.extraInfo[0].wantEpisode(season, ep_num, obj.quality): logger.log(u"Ignoring result: " + new_nzb, logger.DEBUG) want_ep = False break if not want_ep: continue # get all the associated episode objects ep_obj_list = [obj.extraInfo[0].getEpisode(season, ep) for ep in parsed_obj.episode_numbers] # make a result cur_obj = classes.NZBDataSearchResult(ep_obj_list) cur_obj.name = new_nzb cur_obj.provider = obj.provider cur_obj.quality = obj.quality cur_obj.extraInfo = [create_nzb_string(separate_nzbs[new_nzb], xmlns)] result_list.append(cur_obj) return result_list
def __init__(self): """Gets a list of most popular TV series from imdb""" self.session = helpers.make_session()
def __init__(self): self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__ self.session = helpers.make_session()
def __init__(self): self.cache_subfolder = __name__.split( '.')[-1] if '.' in __name__ else __name__ self.session = helpers.make_session()
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30 } } INDEXER_TVDB = 1 INDEXER_TVRAGE = 2 # Must keep indexerConfig = { INDEXER_TVDB: { 'id': INDEXER_TVDB, 'name': 'theTVDB', 'module': Tvdb, 'api_params': { 'apikey': 'F9C450E78D99172E', 'language': 'en', 'useZip': True, }, 'session': helpers.make_session(), 'trakt_id': 'tvdb_id', 'xem_origin': 'tvdb', 'icon': 'thetvdb16.png', 'scene_loc': 'https://cdn.pymedusa.com/scene_exceptions/scene_exceptions.json', 'show_url': 'http://thetvdb.com/?tab=series&id=', 'base_url': 'http://thetvdb.com/api/%(apikey)s/series/' } } indexerConfig[INDEXER_TVDB]['base_url'] %= indexerConfig[INDEXER_TVDB]['api_params'] # insert API key into base url