def __init__(self): super(Trakt, self).__init__() addApiView('automation.trakt.auth_url', self.getAuthorizationUrl) addApiView('automation.trakt.credentials', self.getCredentials) fireEvent('schedule.interval', 'updater.check', self.refreshToken, hours = 24) addEvent('app.load', self.refreshToken)
def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_lists = [] max_items = int(self.conf("max_items", section="charts", default=5)) for name in self.charts: chart = self.charts[name].copy() url = chart.get("url") if self.conf("chart_display_%s" % name): chart["list"] = [] imdb_ids = self.getFromURL(url) try: for imdb_id in imdb_ids[0:max_items]: is_movie = fireEvent("movie.is_movie", identifier=imdb_id, single=True) if not is_movie: continue info = self.getInfo(imdb_id) chart["list"].append(info) if self.shuttingDown(): break except: log.error("Failed loading IMDB chart results from %s: %s", (url, traceback.format_exc())) if chart["list"]: movie_lists.append(chart) return movie_lists
def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_list = {'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': []} movie_ids = [] max_items = int(self.conf('max_items', section='charts', default=5)) rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() if not name.find('/') == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get('imdb') in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True) if not is_movie: continue movie_ids.append(movie.get('imdb')) movie_list['list'].append( movie ) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return return [ movie_list ]
def refreshToken(self): token = self.conf('automation_oauth_token') refresh_token = self.conf('automation_oauth_refresh') if token and refresh_token: prop_name = 'last_trakt_refresh' last_refresh = int(Env.prop(prop_name, default=0)) if last_refresh < time.time() - 4838400: # refresh every 8 weeks log.debug('Refreshing trakt token') url = self.urls['refresh_token'] + '?token=' + self.conf( 'automation_oauth_refresh') data = fireEvent('cp.api_call', url, cache_timeout=0, single=True) if data and 'oauth' in data and 'refresh' in data: log.debug('Oauth refresh: %s', data) self.conf('automation_oauth_token', value=data.get('oauth')) self.conf('automation_oauth_refresh', value=data.get('refresh')) Env.prop(prop_name, value=int(time.time())) else: log.error( 'Failed refreshing Trakt token, please re-register in settings' ) elif token and not refresh_token: log.error( 'Refresh token is missing, please re-register Trakt for autorefresh of the token in the future' )
def getMovie(self, url): name = splitString(url, '/')[-1] if name.startswith('ijw_'): name = name[4:] year_name = fireEvent('scanner.name_year', name, single = True) return self.search(year_name.get('name'), year_name.get('year'))
def getMovie(self, url): name = splitString(url, '/')[-1] if name.startswith('ijw_'): name = name[4:] year_name = fireEvent('scanner.name_year', name, single=True) return self.search(year_name.get('name'), year_name.get('year'))
def getChartList(self): cache_key = 'bluray.charts.rss' movie_list = { 'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': [] } rss_movies = self.getCache(cache_key) if not rss_movies: rss_movies = self.getRSSData(self.rss_url) # Put back in cache with a fresh 'reload' value in case it changed self.setCache(cache_key, rss_movies, timeout=self.conf('reload') * 24 * 60 * 60) movie_ids = [] max_items = self.conf('items') for movie in rss_movies: name = self.getTextElement( movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement( movie, 'description').split('|')[1].strip('(').strip() if not name.find( '/') == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: imdb = movie.get('imdb') if imdb in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier=imdb, single=True) if not is_movie: continue if movie.get('year') < self.conf('year'): continue movie_ids.append(imdb) movie_list['list'].append(movie) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return return [movie_list]
def getIMDBids(self): movies = [] retrieved_movies = self.getJsonData(self.url) for movie in retrieved_movies.get('movies'): imdb_id = movie.get('imdb_id') info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) if self.isMinimalMovie(info): movies.append(imdb_id) return movies
def getIMDBids(self): movies = [] retrieved_movies = self.getJsonData(self.url) if retrieved_movies: for movie in retrieved_movies.get('movies'): imdb_id = movie.get('imdb_id') info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) if self.isMinimalMovie(info): movies.append(imdb_id) return movies
def getChartList(self): cache_key = 'bluray.charts' movie_list = { 'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': self.getCache(cache_key) or [] } if not movie_list['list']: movie_ids = [] max_items = int(self.conf('max_items', section='charts', default=5)) rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement( movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement( movie, 'description').split('|')[1].strip('(').strip() if not name.find( '/' ) == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get('imdb') in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier=movie.get('imdb'), single=True) if not is_movie: continue movie_ids.append(movie.get('imdb')) movie_list['list'].append(movie) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return self.setCache(cache_key, movie_list['list'], timeout=259200) return [movie_list]
def _search(self, movie, quality, results): # Cookie login #if not self.last_login_check and not self.login(): # pass # return TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8") self._searchOnTitle(TitleStringReal, movie, quality, results) if not results: media_title = fireEvent('library.query', movie, include_year = False, single = True) for title in possibleTitles(media_title): self._searchOnTitle(title, movie, quality, results)
def getMovie(self, url): cookie = {'Cookie': 'welcomeScreen=welcome_screen'} try: data = self.urlopen(url, headers = cookie) except: return html = BeautifulSoup(data) name = html.find('meta', {'name': 'title'})['content'][:-9].strip() name_year = fireEvent('scanner.name_year', name, single = True) name = name_year.get('name') year = name_year.get('year') return self.search(name, year)
def getMovie(self, url): cookie = {'Cookie': 'welcomeScreen=welcome_screen'} try: data = self.urlopen(url, headers=cookie) except: return html = BeautifulSoup(data) name = html.find('meta', {'name': 'title'})['content'][:-9].strip() name_year = fireEvent('scanner.name_year', name, single=True) name = name_year.get('name') year = name_year.get('year') return self.search(name, year)
def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_lists = [] max_items = 10 for name in self.charts: chart = self.charts[name].copy() cache_key = 'imdb.chart_display_%s' % name if self.conf('chart_display_%s' % name): cached = self.getCache(cache_key) if cached: chart['list'] = cached movie_lists.append(chart) continue url = chart.get('url') chart['list'] = [] imdb_ids = self.getFromURL(url) try: for imdb_id in imdb_ids[0:max_items]: is_movie = fireEvent('movie.is_movie', identifier=imdb_id, adding=False, single=True) if not is_movie: continue info = self.getInfo(imdb_id) chart['list'].append(info) if self.shuttingDown(): break except: log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) self.setCache(cache_key, chart['list'], timeout=259200) if chart['list']: movie_lists.append(chart) return movie_lists
def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_list = { 'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': [] } movie_ids = [] max_items = int(self.conf('max_items', section='charts', default=5)) rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement( movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement( movie, 'description').split('|')[1].strip('(').strip() if not name.find( '/') == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get('imdb') in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier=movie.get('imdb'), single=True) if not is_movie: continue movie_ids.append(movie.get('imdb')) movie_list['list'].append(movie) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return return [movie_list]
def getMovie(self, url): try: data = self.getUrl(url) except: return try: title = re.findall("<title>(.*)</title>", data) name_year = fireEvent('scanner.name_year', title[0].split(' - Rotten')[0].decode('unicode_escape'), single = True) name = name_year.get('name') year = name_year.get('year') if name and year: return self.search(name, year) except: log.error('Failed parsing page for title and year: %s', traceback.format_exc())
def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_lists = [] max_items = 10 for name in self.charts: chart = self.charts[name].copy() cache_key = 'imdb.chart_display_%s' % name if self.conf('chart_display_%s' % name): cached = self.getCache(cache_key) if cached: chart['list'] = cached movie_lists.append(chart) continue url = chart.get('url') chart['list'] = [] imdb_ids = self.getFromURL(url) try: for imdb_id in imdb_ids[0:max_items]: is_movie = fireEvent('movie.is_movie', identifier = imdb_id, adding = False, single = True) if not is_movie: continue info = self.getInfo(imdb_id) chart['list'].append(info) if self.shuttingDown(): break except: log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) self.setCache(cache_key, chart['list'], timeout = 259200) if chart['list']: movie_lists.append(chart) return movie_lists
def getChartList(self): cache_key = 'bluray.charts' movie_list = { 'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': self.getCache(cache_key) or [] } if not movie_list['list']: movie_ids = [] max_items = 10 rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() if not name.find('/') == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get('imdb') in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True) if not is_movie: continue movie_ids.append(movie.get('imdb')) movie_list['list'].append( movie ) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return self.setCache(cache_key, movie_list['list'], timeout = 259200) return [movie_list]
def getChartList(self): cache_key = 'yts.charts' movie_list = { 'name': 'YTS - Popular Downloads', 'url': self.display_url, 'order': self.chart_order, 'list': self.getCache(cache_key) or [] } if not movie_list['list']: movie_ids = [] max_items = 10 rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement(movie, 'title').lower()[9:].split("(",1)[0].rstrip() year = self.getTextElement(movie, 'title').split("(")[1].split(")")[0].rstrip() if not name.find('/') == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get('imdb') in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True) if not is_movie: continue movie_ids.append(movie.get('imdb')) movie_list['list'].append( movie ) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return self.setCache(cache_key, movie_list['list'], timeout = 259200) return [movie_list]
def getChartList(self): cache_key = "bluray.charts" movie_list = { "name": "Blu-ray.com - New Releases", "url": self.display_url, "order": self.chart_order, "list": self.getCache(cache_key) or [], } if not movie_list["list"]: movie_ids = [] max_items = int(self.conf("max_items", section="charts", default=5)) rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement(movie, "title").lower().split("blu-ray")[0].strip("(").rstrip() year = self.getTextElement(movie, "description").split("|")[1].strip("(").strip() if not name.find("/") == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get("imdb") in movie_ids: continue is_movie = fireEvent("movie.is_movie", identifier=movie.get("imdb"), single=True) if not is_movie: continue movie_ids.append(movie.get("imdb")) movie_list["list"].append(movie) if len(movie_list["list"]) >= max_items: break if not movie_list["list"]: return self.setCache(cache_key, movie_list["list"], timeout=259200) return [movie_list]
def getMovie(self, url): try: data = self.getUrl(url) except: return try: title = re.findall("<title>(.*)</title>", data) title = title[0].split(' - Rotten')[0].replace(' ', ' ').decode('unicode_escape') name_year = fireEvent('scanner.name_year', title, single = True) name = name_year.get('name') year = name_year.get('year') if name and year: return self.search(name, year) except: log.error('Failed parsing page for title and year: %s', traceback.format_exc())
def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_lists = [] max_items = int(self.conf('max_items', section='charts', default=5)) for name in self.charts: chart = self.charts[name].copy() url = chart.get('url') if self.conf('chart_display_%s' % name): chart['list'] = [] imdb_ids = self.getFromURL(url) try: for imdb_id in imdb_ids[0:max_items]: is_movie = fireEvent('movie.is_movie', identifier=imdb_id, single=True) if not is_movie: continue info = self.getInfo(imdb_id) chart['list'].append(info) if self.shuttingDown(): break except: log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) if chart['list']: movie_lists.append(chart) return movie_lists
def refreshToken(self): token = self.conf('automation_oauth_token') refresh_token = self.conf('automation_oauth_refresh') if token and refresh_token: prop_name = 'last_trakt_refresh' last_refresh = int(Env.prop(prop_name, default = 0)) if last_refresh < time.time()-4838400: # refresh every 8 weeks log.debug('Refreshing trakt token') url = self.urls['refresh_token'] + '?token=' + self.conf('automation_oauth_refresh') data = fireEvent('cp.api_call', url, cache_timeout = 0, single = True) if data and 'oauth' in data and 'refresh' in data: log.debug('Oauth refresh: %s', data) self.conf('automation_oauth_token', value = data.get('oauth')) self.conf('automation_oauth_refresh', value = data.get('refresh')) Env.prop(prop_name, value = int(time.time())) else: log.error('Failed refreshing Trakt token, please re-register in settings') elif token and not refresh_token: log.error('Refresh token is missing, please re-register Trakt for autorefresh of the token in the future')
def getInfo(self, imdb_id): return fireEvent('movie.info', identifier = imdb_id, merge = True)
def getInfo(self, imdb_id): return fireEvent('movie.info', identifier=imdb_id, merge=True)
def getRequestHeaders(self): return {'User-Agent': fireEvent('app.version', single=True)}
def getRequestHeaders(self): return { 'User-Agent': fireEvent('app.version', single = True) }
def _search(self, movie, quality, results): # Cookie login if not self.last_login_check and not self.login(): return searchStrings= self.getSearchParams(movie,quality) lastsearch=0 searcher = Searcher() for searchString in searchStrings: actualtime=int(time.time()) if actualtime-lastsearch<10: timetosleep= 10-(actualtime-lastsearch) time.sleep(timetosleep) URL = self.urls['search']+searchString r = self.opener.open(URL) soup = BeautifulSoup( r, "html.parser" ) if soup.find('table', attrs = {'class':'results'}): resultdiv = soup.find('table', attrs = {'class':'results'}).find('tbody') else: continue if resultdiv: try: for result in resultdiv.findAll('tr'): try: categorie = result.findAll('td')[0].findAll('a')[0]['href'][result.findAll('td')[0].findAll('a')[0]['href'].find('='):] insert = 0 if categorie == '=631': insert = 1 if categorie == '=455': insert = 1 if categorie == '=634': insert = 1 if insert == 1 : new = {} idt = result.findAll('td')[2].findAll('a')[0]['href'][1:].replace('torrents/nfo/?id=','') name = result.findAll('td')[1].findAll('a')[0]['title'] testname=searcher.correctName(name,movie['title']) if not testname: continue url = (self.urls['download'] % idt) detail_url = (self.urls['detail'] % idt) leecher = result.findAll('td')[8].text size = result.findAll('td')[5].text age = result.findAll('td')[4].text seeder = result.findAll('td')[7].text def extra_check(item): return True new['id'] = idt new['name'] = name + ' french' new['url'] = url new['detail_url'] = detail_url new['size'] = self.parseSize(str(size)) new['age'] = self.ageToDays(str(age)) new['seeders'] = tryInt(seeder) new['leechers'] = tryInt(leecher) new['extra_check'] = extra_check new['download'] = self.download log.debug("url='%s'"%str(url)) results.append(new) except: log.error('Failed parsing T411: %s', traceback.format_exc()) except AttributeError: log.debug('No search results found.') else: log.debug('No search results found.') if not results: media_title = fireEvent('library.query', movie, include_year = False, single = True) for title in possibleTitles(media_title): self._searchOnTitle(title, movie, quality, results)
def _search(self, movie, quality, results): # Cookie login if not self.last_login_check and not self.login(): return searchStrings = self.getSearchParams(movie, quality) lastsearch = 0 searcher = Searcher() for searchString in searchStrings: actualtime = int(time.time()) if actualtime - lastsearch < 10: timetosleep = 10 - (actualtime - lastsearch) time.sleep(timetosleep) URL = self.urls['search'] + searchString r = self.opener.open(URL) soup = BeautifulSoup(r, "html.parser") if soup.find('table', attrs={'class': 'results'}): resultdiv = soup.find('table', attrs={ 'class': 'results' }).find('tbody') else: continue if resultdiv: try: for result in resultdiv.findAll('tr'): try: categorie = result.findAll('td')[0].findAll('a')[ 0]['href'][result.findAll('td')[0]. findAll('a')[0]['href'].find('='):] insert = 0 if categorie == '=631': insert = 1 if categorie == '=455': insert = 1 if categorie == '=634': insert = 1 if insert == 1: new = {} idt = result.findAll('td')[2].findAll( 'a')[0]['href'][1:].replace( 'torrents/nfo/?id=', '') name = result.findAll('td')[1].findAll( 'a')[0]['title'] testname = searcher.correctName( name, movie['title']) if not testname: continue url = (self.urls['download'] % idt) detail_url = (self.urls['detail'] % idt) leecher = result.findAll('td')[8].text size = result.findAll('td')[5].text age = result.findAll('td')[4].text seeder = result.findAll('td')[7].text def extra_check(item): return True new['id'] = idt new['name'] = name + ' french' new['url'] = url new['detail_url'] = detail_url new['size'] = self.parseSize(str(size)) new['age'] = self.ageToDays(str(age)) new['seeders'] = tryInt(seeder) new['leechers'] = tryInt(leecher) new['extra_check'] = extra_check new['download'] = self.download log.debug("url='%s'" % str(url)) results.append(new) except: log.error('Failed parsing T411: %s', traceback.format_exc()) except AttributeError: log.debug('No search results found.') else: log.debug('No search results found.') if not results: media_title = fireEvent('library.query', movie, include_year=False, single=True) for title in possibleTitles(media_title): self._searchOnTitle(title, movie, quality, results)
def getInfo(self, imdb_id): return fireEvent('movie.info', identifier = imdb_id, extended = False, adding = False, merge = True)
def buildUrl(self, media, quality): return (tryUrlencode(fireEvent('library.query', media, single=True)), self.getCatId(quality)[0])
def buildUrl(self, media, quality): return ( tryUrlencode(fireEvent('library.query', media, single = True)), self.getCatId(quality)[0] )