def _post(self, url, data={}): if self.token == '' and not 'token' in url: return None headers = {'User-Agent': self.user_agent, 'Authorization': 'Bearer %s' % self.token} if not 'token' in url: url = self.base_url + url response = requests.post(url, data=data, headers=headers).text try: return to_utf8(json.loads(response)) except: return to_utf8(response)
def cache_trakt_object(function, string, url, expiration=None): expires = expiration if expiration else settings.trakt_cache_duration() _cache = TraktCache() cache = _cache.get(string) if cache: return to_utf8(cache) result = function(url) _cache.set(string, result, expiration=datetime.timedelta(hours=expires)) return to_utf8(result)
def cache_function(function, string, url, expiration=96, json=True): data = metacache.get_function(string) if data: return to_utf8(data) if json: result = function(url).json() else: result = function(url) metacache.set_function(string, result, expiration=timedelta(hours=expiration)) return to_utf8(result)
def _get_ep_name(self): ep_name = None if self.vid_type == 'episode': ep_name = self.meta.get('ep_name') try: ep_name = to_utf8(safe_string(remove_accents(ep_name))) except: ep_name = to_utf8(safe_string(ep_name)) return ep_name
def _get(self, url, params={}): headers = {'Authorization': self.auth} response = requests.get(url, params=params, headers=headers, timeout=self.timeout).text try: return to_utf8(json.loads(response)) except: return to_utf8(response)
def _process(): for listing in to_utf8(all_reviews): try: spoiler = listing['spoiler'] except: spoiler = False try: listing = listing['content'] except: continue try: try: title = parseDOM(listing, 'a', attrs={'class': 'title'})[0] title = to_utf8(remove_accents(title)) except: title = '' try: date = parseDOM(listing, 'span', attrs={'class': 'review-date'})[0] date = to_utf8(remove_accents(date)) except: date = '' try: rating = parseDOM( listing, 'span', attrs={'class': 'rating-other-user-rating'}) rating = parseDOM(rating, 'span') rating = rating[0] + rating[1] rating = to_utf8(remove_accents(rating)) except: rating = '' try: content = parseDOM( listing, 'div', attrs={'class': 'text show-more__control'})[0] content = replace_html_codes(content) content = to_utf8(remove_accents(content)) content = content.replace('<br/><br/>', '\n') except: continue review = { 'spoiler': spoiler, 'title': title, 'date': date, 'rating': rating, 'content': content } yield review except: pass
def cache_object(function, string, url, json=True, expiration=24): from modules.utils import to_utf8 _cache = FenCache() cache = _cache.get(string) if cache: return to_utf8(cache) if isinstance(url, list): args = tuple(url) else: args = (url, ) if json: result = function(*args).json() else: result = function(*args) _cache.set(string, result, expiration=datetime.timedelta(hours=expiration)) return to_utf8(result)
def _get_v3(self, url, params={}): headers = {'Authorization': self.auth} response = requests.get(url, params=params, headers=headers, timeout=self.timeout).content response = re.compile(self.regex, re.DOTALL).findall(response)[0] response = response + '}' try: return to_utf8(json.loads(response)) except: return to_utf8(response)
def account_info(): from datetime import datetime import time try: account_html, usage_html = EasyNews.account() if not account_html or not usage_html: return dialog.ok('Fen', 'Error Getting Easynews Info.', 'Account or Usage html not available.') account_info = { 'account_username': to_utf8(account_html[0].find_all('td', recursive=False)[1].getText()), 'account_type': to_utf8(account_html[1].find_all('td', recursive=False)[2].getText()), 'account_status': to_utf8(account_html[3].find_all('td', recursive=False)[2].getText()), 'account_expiration': to_utf8(account_html[2].find_all('td', recursive=False)[2].getText()), 'usage_total': to_utf8(usage_html[0].find_all('td', recursive=False)[1].getText()), 'usage_web': to_utf8(usage_html[1].find_all('td', recursive=False)[2].getText()), 'usage_NNTP': to_utf8(usage_html[2].find_all('td', recursive=False)[2].getText()), 'usage_remaining': to_utf8(usage_html[4].find_all('td', recursive=False)[2].getText()), 'usage_loyalty': to_utf8(usage_html[5].find_all('td', recursive=False)[2].getText()) } resformat = "%Y-%m-%d" try: expires = datetime.strptime(account_info['account_expiration'], resformat) except TypeError: expires = datetime(*(time.strptime( account_info['account_expiration'], resformat)[0:6])) days_remaining = (expires - datetime.today()).days heading = 'EASYNEWS' body = [] body.append('[B]Account:[/B] %s' % account_info['account_type']) body.append('[B]Username:[/B] %s' % account_info['account_username']) body.append('[B]Status:[/B] %s' % account_info['account_status']) body.append('[B]Expires:[/B] %s' % expires) body.append('[B]Days Remaining:[/B] %s' % days_remaining) body.append('[B]Data Used:[/B] %s' % account_info['usage_total'].replace('Gigs', 'GB')) body.append('[B]Data Remaining:[/B] %s' % account_info['usage_remaining'].replace('Gigs', 'GB')) return dialog.select(heading, body) except Exception as e: return dialog.ok('Fen', 'Error Getting Easynews Info..', e)
def playAudioAlbum(self, t_files=None, name=None, from_seperate=False): import os import xbmcaddon from modules.utils import clean_file_name, batch_replace, to_utf8 from modules.nav_utils import setView icon_directory = settings.get_theme() default_furk_icon = os.path.join(icon_directory, 'furk.png') formats = ('.3gp', ''), ('.aac', ''), ('.flac', ''), ('.m4a', ''), ('.mp3', ''), \ ('.ogg', ''), ('.raw', ''), ('.wav', ''), ('.wma', ''), ('.webm', ''), ('.ra', ''), ('.rm', '') params = dict(parse_qsl(sys.argv[2].replace('?', ''))) furk_files_list = [] playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC) playlist.clear() if from_seperate: t_files = [ i for i in t_files if clean_file_name(i['path']) == params.get('item_path') ] for item in t_files: try: name = item['path'] if not name else name if not 'audio' in item['ct']: continue url = item['url_dl'] track_name = clean_file_name( batch_replace(to_utf8(item['name']), formats)) listitem = xbmcgui.ListItem(track_name) listitem.setThumbnailImage(default_furk_icon) listitem.setInfo(type='music', infoLabels={ 'title': track_name, 'size': int(item['size']), 'album': clean_file_name( batch_replace(to_utf8(name), formats)), 'duration': item['length'] }) listitem.setProperty('mimetype', 'audio/mpeg') playlist.add(url, listitem) if from_seperate: furk_files_list.append((url, listitem, False)) except: pass self.play(playlist) if from_seperate: xbmcplugin.addDirectoryItems(__handle__, furk_files_list, len(furk_files_list)) setView('view.furk_files') xbmcplugin.endOfDirectory(__handle__)
def _post(self, url, post_data): original_url = url url = self.base_url + url if self.token == '': return None if '?' not in url: url += "?auth_token=%s" % self.token else: url += "&auth_token=%s" % self.token response = requests.post(url, data=post_data, timeout=10).text if 'bad_token' in response or 'Bad Request' in response: self.refreshToken() response = self._post(original_url, post_data) try: return to_utf8(json.loads(response)) except: return to_utf8(response)
def _get(self, url, data={}): if self.token == '': return None headers = { 'User-Agent': self.user_agent, 'Authorization': 'Bearer %s' % self.token } url = base_url + url response = requests.get(url, data=data, headers=headers, timeout=self.timeout).text try: return to_utf8(json.loads(response)) except: return to_utf8(response)
def retrieve_kodi_library(db_type, page_no, letter): from modules.utils import title_key, to_utf8 from modules.nav_utils import paginate_list from settings import paginate, page_limit paginate = paginate() limit = page_limit() if db_type in ('movie', 'movies'): db_type = 'movies' JSON_req = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", \ "params": {"properties": ["title", "imdbnumber"]},"id": "1"}' elif db_type in ('tvshow', 'tvshows'): db_type = 'tvshows' JSON_req = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTvShows", \ "params": {"properties": ["title","imdbnumber","uniqueid"]}, "id": "1"}' r = xbmc.executeJSONRPC(JSON_req) r = to_utf8(json.loads(r)['result'][db_type]) r = sorted(r, key=lambda k: title_key(k['title'])) original_list = [{ 'title': i.get('title', None), 'media_id': i.get('imdbnumber', None) } for i in r] if db_type == 'movies' else [{ 'title': i.get('title', None), 'media_id': i['uniqueid']['tvdb'] if 'uniqueid' in i and 'tvdb' in i['uniqueid'] else i['imdbnumber'] if i['imdbnumber'].startswith("tt") else None } for i in r] if paginate: final_list, total_pages = paginate_list(original_list, page_no, letter, limit) else: final_list, total_pages = original_list, 1 return final_list, total_pages
def build_url(query): try: from urllib import urlencode except ImportError: from urllib.parse import urlencode from modules.utils import to_utf8 return sys.argv[0] + '?' + urlencode(to_utf8(query))
def build_list_object(): try: cm = [] url_params = { 'mode': 'media_play', 'url': item['url_dl'], 'media_type': 'music' } url = build_url(url_params) track_name = clean_file_name( batch_replace(to_utf8(item['name']), formats)).upper() listitem = make_listitem() listitem.setLabel(track_name) down_file_params = { 'mode': 'downloader', 'name': item['name'], 'url': item['url_dl'], 'action': 'audio', 'image': default_furk_icon } cm.append( (ls(32747), 'RunPlugin(%s)' % build_url(down_file_params))) listitem.addContextMenuItems(cm) listitem.setArt({ 'icon': default_furk_icon, 'poster': default_furk_icon, 'thumb': default_furk_icon, 'fanart': fanart, 'banner': default_furk_icon }) kodi_utils.add_item(__handle__, url, listitem, True) except: pass
def cast(self): key = 'cast' if self._action(key) in ('clear', None): return from apis.tmdb_api import get_tmdb from caches.main_cache import cache_object result = None actor_id = None search_name = None search_name = kodi_utils.dialog.input(self.heading_base % ls(32664)) if not search_name: return string = '%s_%s' % ('tmdb_movies_people_search_actor_data', search_name) url = 'https://api.themoviedb.org/3/search/person?api_key=%s&language=en-US&query=%s' % ( self.tmdb_api, search_name) result = cache_object(get_tmdb, string, url, 4) result = result['results'] if not result: return actor_list = [] append = actor_list.append if len(result) > 1: for item in result: name = item['name'] known_for_list = [ i.get('title', 'NA') for i in item['known_for'] ] known_for_list = [i for i in known_for_list if not i == 'NA'] known_for = ', '.join(known_for_list) if known_for_list else '' if item.get('profile_path'): icon = 'https://image.tmdb.org/t/p/h632/%s' % item[ 'profile_path'] else: icon = kodi_utils.translate_path( 'special://home/addons/script.tikiart/resources/media/genre_family.png' ) append({ 'line1': name, 'line2': known_for, 'icon': icon, 'name': name, 'id': item['id'] }) heading = self.heading_base % ls(32664) kwargs = { 'items': json.dumps(actor_list), 'heading': heading, 'enumerate': 'false', 'multi_choice': 'false', 'multi_line': 'true' } choice = kodi_utils.select_dialog(actor_list, **kwargs) if choice == None: return self._set_property() actor_id = choice['id'] actor_name = choice['name'] else: actor_id = [item['id'] for item in result][0] actor_name = [item['name'] for item in result][0] if actor_id: values = ('&with_cast=%s' % str(actor_id), to_utf8(safe_string(remove_accents(actor_name)))) self._process(key, values)
def _process(): for item in to_utf8(items): try: keyword = re.findall(r'" >(.+?)</a>', item, re.DOTALL)[0] yield keyword except: pass
def search(self, query, imdb_id, language, season=None, episode=None): url = '/imdbid-%s/query-%s' % (imdb_id, quote(query)) if season: url += '/season-%d/episode-%d' % (season, episode) url += '/sublanguageid-%s' % language url = self.base_url + url response = self._get(url, retry=True) response = to_utf8(json.loads(response.text)) return response
def get_favourites(self, db_type): dbcon = database.connect(self.fav_database) dbcur = dbcon.cursor() dbcur.execute('''SELECT tmdb_id, title FROM favourites WHERE db_type=?''', (db_type,)) result = dbcur.fetchall() dbcon.close() result = [{'tmdb_id': str(i[0]), 'title': str(to_utf8(i[1]))} for i in result] return result
def account_info(params): from datetime import datetime from modules.utils import jsondate_to_datetime try: account_info, usage_info = EasyNews.account() if not account_info or not usage_info: return kodi_utils.ok_dialog(text=32574, top_space=True) body = [] append = body.append expires = jsondate_to_datetime(to_utf8(account_info[2]), '%Y-%m-%d') days_remaining = (expires - datetime.today()).days append(ls(32758) % to_utf8(account_info[1])) append(ls(32755) % to_utf8(account_info[0])) append(ls(32757) % to_utf8(account_info[3])) append(ls(32750) % expires) append(ls(32751) % days_remaining) append('%s %s' % (ls(32772), to_utf8(usage_info[2]).replace('years', ls(32472)))) append(ls(32761) % to_utf8(usage_info[0]).replace('Gigs', 'GB')) append(ls(32762) % to_utf8(usage_info[1]).replace('Gigs', 'GB')) return kodi_utils.show_text(ls(32070).upper(), '\n\n'.join(body), font_size='large') except: pass
def _get(self, url): original_url = url url = base_url + url if self.token == '': return None if '?' not in url: url += '?auth_token=%s' % self.token else: url += '&auth_token=%s' % self.token response = requests.get(url, timeout=self.timeout).text if 'bad_token' in response or 'Bad Request' in response: if self.refreshToken(): response = self._get(original_url) else: return None try: return to_utf8(json.loads(response)) except: return to_utf8(response)
def _process(): for item in to_utf8(items): try: title = parseDOM(item, 'a')[0] title = replace_html_codes(title) url = parseDOM(item, 'a', ret='href')[0] list_id = url.split('/list/', 1)[-1].strip('/') yield {'title': title, 'list_id': list_id} except: pass
def _process(): for item in to_utf8(items): try: content = re.sub(r'<a href="\S+">', '', item).replace('</a>', '') content = replace_html_codes(content) content = content.replace('<br/><br/>', '\n') yield content except: pass
def search(self, query): self.search_url, self.params = self._translate_search(query) cache_name = 'fen_EASYNEWS_SEARCH_' + urlencode(self.params) cache = _cache.get(cache_name) if cache: files = cache else: results = self._get(self.search_url, self.params) files = to_utf8(self._process_files(results)) _cache.set(cache_name, files, expiration=datetime.timedelta(hours=2)) return files
def build_list_object(): try: cm = [] url_params = {'mode': 'media_play', 'url': item['url_dl'], 'rootname': 'music'} url = build_url(url_params) track_name = clean_file_name(batch_replace(to_utf8(item['name']), formats)).upper() listitem = xbmcgui.ListItem(track_name) down_file_params = {'mode': 'download_file', 'name': item['name'], 'url': item['url_dl'], 'image': default_furk_icon, 'db_type': 'audio'} cm.append(("[B]Download File[/B]",'XBMC.RunPlugin(%s)' % build_url(down_file_params))) listitem.addContextMenuItems(cm) listitem.setArt({'icon': default_furk_icon, 'poster': default_furk_icon, 'thumb': default_furk_icon, 'fanart': fanart, 'banner': default_furk_icon}) xbmcplugin.addDirectoryItem(__handle__, url, listitem, isFolder=True) except: pass
def add_trakt_subscription_listitem(self, db_type, ids, count, total, path, dialog): try: get_ids = get_trakt_movie_id if db_type in ( 'movie', 'movies') else get_trakt_tvshow_id meta_action = tikimeta.movie_meta if db_type in ( 'movie', 'movies') else tikimeta.tvshow_meta tmdb_id = get_ids(ids) address_insert = 'movie' if db_type in ('movie', 'movies') else 'tv' meta = meta_action('tmdb_id', tmdb_id, self.meta_user_info) title = clean_file_name(meta['title']) year = meta['year'] if 'year' in meta else '0' rootname = '{0} ({1})'.format(title, year) if year != '0' else title folder = os.path.join(path, rootname + '/') nfo_filename = rootname + '.nfo' if db_type in ( 'movie', 'movies') else 'tvshow.nfo' nfo_filepath = os.path.join(folder, nfo_filename) nfo_content = "https://www.themoviedb.org/%s/%s-%s" % ( address_insert, str(meta['tmdb_id']), title.lower().replace( ' ', '-')) self.make_folder(folder) self.make_nfo(nfo_filepath, nfo_content) if db_type in ('movie', 'movies'): in_library = get_library_video( 'movie', title, year) if settings.skip_duplicates() else False if in_library: return stream_file = self.create_movie_strm_files(folder, rootname) params = to_utf8({ 'mode': 'play_media', 'library': 'True', 'query': rootname, 'poster': meta['poster'], 'year': year, 'plot': meta['plot'], 'title': title, 'tmdb_id': meta['tmdb_id'], 'vid_type': 'movie' }) self.make_stream(stream_file, params) else: self.create_tvshow_strm_files(meta, folder, tmdb_id, title, year) dialog.update(int(float(count) / float(total) * 100), '', 'Adding: [B]%s[/B]' % rootname) except Exception as e: logger('add_trakt_subscription_listitem Exception', e) pass
def batch_mark_episodes_as_watched_unwatched_kodi_library( show_info, list_object): action = list_object['action'] episode_list = list_object['season_ep_list'] tvshowid = list_object['tvshowid'] playcount = 1 if action == 'mark_as_watched' else 0 tvshowid = str(show_info['tvshowid']) ep_ids = [] action_list = [] bg_dialog = xbmcgui.DialogProgressBG() bg_dialog.create('Please Wait', '') try: for item in episode_list: try: season = item.split('<>')[0] episode = item.split('<>')[1] r = xbmc.executeJSONRPC( '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file", "playcount"], "tvshowid": %s }, "id": 1}' % (str(season), str(episode), str(tvshowid))) r = to_utf8(r) r = json.loads(r)['result']['episodes'][0] ep_ids.append('%s<>%s' % (r['episodeid'], r['playcount'])) except: pass for count, item in enumerate(ep_ids, 1): try: ep_id = item.split('<>')[0] current_playcount = item.split('<>')[1] if int(current_playcount) != playcount: xbmc.sleep(50) display = 'Syncing Kodi Library Watched Status' bg_dialog.update( int(float(count) / float(len(ep_ids)) * 100), 'Please Wait', '%s' % display) t = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid" : %d, "playcount" : %d }, "id": 1 }' % ( int(ep_id), playcount) t = json.loads(t) action_list.append(t) else: pass except: pass bg_dialog.update(100, 'Please Wait', 'Finalizing Sync with Kodi Library') r = xbmc.executeJSONRPC(json.dumps(action_list)) bg_dialog.close() return r except: pass
def play_trailer(url, all_trailers=[]): if all_trailers: import xbmcgui import json from modules.utils import clean_file_name, to_utf8 all_trailers = to_utf8(json.loads(all_trailers)) video_choice = xbmcgui.Dialog().select( "Youtube Videos...", [clean_file_name(i['name']) for i in all_trailers]) if video_choice < 0: return url = 'plugin://plugin.video.youtube/play/?video_id=%s' % all_trailers[ video_choice].get('key') try: xbmc.executebuiltin('RunPlugin(%s)' % url) except: notification('Error Playing Trailer')
def _search_name(self): search_title = clean_file_name(to_utf8(self.info.get("title"))) search_title = search_title.replace(' ', '+') db_type = self.info.get("db_type") if db_type == 'movie': year = self.info.get("year") years = '%s+|+%s+|+%s' % (str( int(year - 1)), year, str(int(year + 1))) search_name = '@name+%s+%s' % (search_title, years) else: season = self.info.get("season") episode = self.info.get("episode") queries = self._seas_ep_query_list(season, episode) search_name = '@name+%s+@files+%s+|+%s+|+%s+|+%s+|+%s' % ( search_title, queries[0], queries[1], queries[2], queries[3], queries[4]) return search_name
def _process(): for item in to_utf8(items): try: title = parseDOM(item, 'a')[1] year = parseDOM(item, 'span', attrs={'class': 'lister-item-year.+?'}) year = re.findall(r'(\d{4})', year[0])[0] imdb_id = parseDOM(item, 'a', ret='href')[0] imdb_id = re.findall(r'(tt\d*)', imdb_id)[0] yield { 'title': str(title), 'year': str(year), 'imdb_id': str(imdb_id) } except: pass