def playAudioAlbum(self, t_files=None, name=None, from_seperate=False): import os import xbmcaddon from resources.lib.modules.utils import clean_file_name, batch_replace, to_utf8 from resources.lib.modules.nav_utils import setView __addon_id__ = 'plugin.video.fen' __addon__ = xbmcaddon.Addon(id=__addon_id__) __handle__ = int(sys.argv[1]) addon_dir = xbmc.translatePath(__addon__.getAddonInfo('path')) icon_directory = settings.get_theme() default_furk_icon = os.path.join(icon_directory, 'furk.png') formats = ('.3gp', ''), ('.aac', ''), ('.flac', ''), ('.m4a', ''), ('.mp3', ''), \ ('.ogg', ''), ('.raw', ''), ('.wav', ''), ('.wma', ''), ('.webm', ''), ('.ra', ''), ('.rm', '') params = dict(parse_qsl(sys.argv[2].replace('?', ''))) furk_files_list = [] playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC) playlist.clear() if from_seperate: t_files = [ i for i in t_files if clean_file_name(i['path']) == params.get('item_path') ] for item in t_files: try: name = item['path'] if not name else name if not 'audio' in item['ct']: continue url = item['url_dl'] track_name = clean_file_name( batch_replace(to_utf8(item['name']), formats)) listitem = xbmcgui.ListItem(track_name) listitem.setThumbnailImage(default_furk_icon) listitem.setInfo(type='music', infoLabels={ 'title': track_name, 'size': int(item['size']), 'album': clean_file_name( batch_replace(to_utf8(name), formats)), 'duration': item['length'] }) listitem.setProperty('mimetype', 'audio/mpeg') playlist.add(url, listitem) if from_seperate: furk_files_list.append((url, listitem, False)) except: pass self.play(playlist) if from_seperate: xbmcplugin.addDirectoryItems(__handle__, furk_files_list, len(furk_files_list)) setView('view.furk_files') xbmcplugin.endOfDirectory(__handle__)
def cache_trakt_object(function, string, url): _cache = TraktCache() cache = _cache.get(string) if cache: return to_utf8(cache) result = function(url) _cache.set( string, result, expiration=datetime.timedelta(hours=settings.trakt_cache_duration())) return to_utf8(result)
def account(self): from bs4 import BeautifulSoup account_html = self._http_get(self.account_link) if account_html == None or account_html == '': raise Exception() account_html = BeautifulSoup(account_html, "html.parser") account_html = account_html.find_all('form', id = 'accountForm')[0] account_html = account_html.find_all('table', recursive = False)[0] account_html = account_html.find_all('tr', recursive = False) usage_html = self._http_get(self.usage_link) if usage_html == None or usage_html == '': raise Exception() usage_html = BeautifulSoup(usage_html, "html.parser") usage_html = usage_html.find_all('div', class_ = 'table-responsive')[0] usage_html = usage_html.find_all('table', recursive = False)[0] usage_html = usage_html.find_all('tr', recursive = False) account_info = { 'account_username': to_utf8(account_html[0].find_all('td', recursive = False)[1].getText()), 'account_type': to_utf8(account_html[1].find_all('td', recursive = False)[2].getText()), 'account_status': to_utf8(account_html[3].find_all('td', recursive = False)[2].getText()), 'account_expiration': to_utf8(account_html[2].find_all('td', recursive = False)[2].getText()), 'usage_total': to_utf8(usage_html[0].find_all('td', recursive = False)[1].getText()), 'usage_web': to_utf8(usage_html[1].find_all('td', recursive = False)[2].getText()), 'usage_NNTP': to_utf8(usage_html[2].find_all('td', recursive = False)[2].getText()), 'usage_remaining': to_utf8(usage_html[4].find_all('td', recursive = False)[2].getText()), 'usage_loyalty': to_utf8(usage_html[5].find_all('td', recursive = False)[2].getText()) } return account_info
def cache_object(function, string, url, json=True, expiration=24): from datetime import timedelta from resources.lib.modules import fen_cache from resources.lib.modules.utils import to_utf8 _cache = fen_cache.FenCache() cache = _cache.get(string) if cache: return to_utf8(cache) if json: result = function(url).json() else: result = function(url) _cache.set(string, result, expiration=timedelta(hours=expiration)) return to_utf8(result)
def add_remove_movie(self, silent=False): from resources.lib.modules.utils import to_utf8 from resources.lib.modules.kodi_library import get_library_video self.add_remove_database() if self.action == 'add': in_library = get_library_video( 'movie', self.title, self.year) if settings.skip_duplicates() else False if in_library: return self.make_folder() self.make_nfo() stream_file = self.create_movie_strm_files() params = to_utf8({ 'mode': 'play_media', 'library': 'True', 'query': self.rootname, 'poster': self.meta['poster'], 'year': self.year, 'plot': self.meta['plot'], 'title': self.title, 'tmdb_id': self.meta['tmdb_id'], 'vid_type': 'movie' }) self.make_stream(stream_file, params) elif self.action == 'remove': self.remove_folder() if not silent: notification('{}'.format(self.notify % self.rootname), 4500)
def search(self, query): cache = _cache.get("fen_%s_%s" % ('EASYNEWS_SEARCH', query)) if cache: files = cache else: query_url = '/search?query=%s' % (urllib.quote_plus(query)) results = self._get_links(query_url) files = to_utf8(self._process_files(results)) _cache.set("fen_%s_%s" % ('EASYNEWS_SEARCH', query), files, expiration=datetime.timedelta(hours=2)) return files
def get_subscriptions(self): dbcon = database.connect(self.sub_database) dbcur = dbcon.cursor() dbcur.execute( '''SELECT tmdb_id, title FROM subscriptions WHERE db_type=?''', (self.db_type, )) result = dbcur.fetchall() result = [{ 'tmdb_id': str(i[0]), 'title': str(to_utf8(i[1])) } for i in result] return result
def create_tvshow_strm_files(self): from datetime import date from resources.lib.modules.utils import to_utf8 from resources.lib.modules.kodi_library import get_library_video skip_duplicates = settings.skip_duplicates() season_data = [ i for i in self.meta['season_data'] if int(i['season_number']) > 0 ] for i in season_data: infoLabels = tikimeta.season_episodes_meta(self.tmdb_id, i['season_number']) ep_data = infoLabels['episodes'] for item in ep_data: in_library = get_library_video( 'episode', self.title, self.year, item['season_number'], item['episode_number']) if skip_duplicates else None if not in_library: first_aired = item[ 'air_date'] if 'air_date' in item else None try: d = first_aired.split('-') episode_date = date(int(d[0]), int(d[1]), int(d[2])) except: episode_date = date(2100, 10, 24) if date.today() > episode_date: display = "%s S%.2dE%.2d" % ( self.title, int(item['season_number']), int(item['episode_number'])) season_path = os.path.join( self.folder, 'Season ' + str(item['season_number'])) self.make_folder(season_path) stream_file = os.path.join(season_path, str(display) + '.strm') params = to_utf8({ 'mode': 'play_media', 'library': 'True', 'query': self.title, 'year': self.year, 'plot': item['overview'], 'poster': self.meta['poster'], 'season': item['season_number'], 'episode': item['episode_number'], 'ep_name': item['name'], 'premiered': item['air_date'], 'tmdb_id': self.tmdb_id, 'vid_type': 'episode' }) self.make_stream(stream_file, params)
def play_trailer(url, all_trailers=[]): if all_trailers: import xbmcgui import json from resources.lib.modules.utils import clean_file_name, to_utf8 all_trailers = to_utf8(json.loads(all_trailers)) video_choice = xbmcgui.Dialog().select( "Youtube Videos...", [clean_file_name(i['name']) for i in all_trailers]) if video_choice < 0: return url = 'plugin://plugin.video.youtube/play/?video_id=%s' % all_trailers[ video_choice].get('key') try: xbmc.executebuiltin('RunPlugin(%s)' % url) except: notification('Error Playing Trailer')
def build_list_object(): try: cm = [] url_params = { 'mode': 'media_play', 'url': item['url_dl'], 'rootname': 'nill' } url = build_url(url_params) track_name = clean_file_name( batch_replace(to_utf8(item['name']), formats)).upper() listitem = xbmcgui.ListItem(track_name) down_file_params = { 'mode': 'download_file', 'name': item['name'], 'url': item['url_dl'], 'image': default_furk_icon, 'db_type': 'furk_file' } cm.append(("[B]Download File[/B]", 'XBMC.RunPlugin(%s)' % build_url(down_file_params))) listitem.addContextMenuItems(cm) listitem.setThumbnailImage(default_furk_icon) listitem.setArt({'fanart': fanart}) listitem.setInfo(type='music', infoLabels={ 'title': track_name, 'size': int(item['size']), 'album': item['path'], 'duration': item['length'] }) listitem.setProperty("IsPlayable", "true") xbmcplugin.addDirectoryItem(__handle__, url, listitem, isFolder=False) except: pass
def _get_library_video(self, db_type, title, year, season=None, episode=None): try: name = None years = (str(year), str(int(year)+1), str(int(year)-1)) if db_type == 'movie': r = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}' % years) r = to_utf8(r) r = json.loads(r)['result']['movies'] try: r = [i for i in r if clean_file_name(title).lower() in clean_file_name(to_utf8(i['title'])).lower()] r = [i for i in r if not to_utf8(i['file']).endswith('.strm')][0] except: return None r = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}' % str(r['movieid'])) r = to_utf8(r) r = json.loads(r)['result']['moviedetails'] else: r = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["title"]}, "id": 1}' % years) r = to_utf8(r) r = json.loads(r)['result']['tvshows'] try: r = [i for i in r if clean_file_name(title).lower() in (clean_file_name(to_utf8(i['title'])).lower() if not ' (' in to_utf8(i['title']) else clean_file_name(to_utf8(i['title'])).lower().split(' (')[0])][0] except: return None r = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}' % (str(season), str(episode), str(r['tvshowid']))) r = to_utf8(r) r = json.loads(r)['result']['episodes'] try: r = [i for i in r if not to_utf8(i['file']).endswith('.strm')][0] except: return None r = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}' % str(r['episodeid'])) r = to_utf8(r) r = json.loads(r)['result']['episodedetails'] url = r['file'].encode('utf-8') try: name = url.split('/')[-1:][0] except: name = None if not name: try: name = url.split('\\')[-1:][0] except: name = None if not name: name = title try: quality = int(r['streamdetails']['video'][0]['width']) except: quality = -1 if quality > 1920: quality = '4K' if quality >= 1920: quality = '1080p' if 1280 <= quality < 1900: quality = '720p' if quality < 1280: quality = 'SD' release_details = [] try: f = xbmcvfs.File(url) ; s = f.size() ; f.close() s = '%.2f GB' % (float(s)/1024/1024/1024) release_details.append(s) except: pass try: c = r['streamdetails']['video'][0]['codec'] if c == 'avc1': c = 'h264' release_details.append(c) except: pass try: ac = r['streamdetails']['audio'][0]['codec'] if ac == 'dca': ac = 'dts' if ac == 'dtshd_ma': ac = 'dts-hd ma' release_details.append(ac) except: pass try: ach = r['streamdetails']['audio'][0]['channels'] if ach == 1: ach = 'mono' if ach == 2: ach = '2.0' if ach == 6: ach = '5.1' if ach == 8: ach = '7.1' release_details.append(ach) except: pass release_details = ' | '.join(release_details) release_details = release_details.encode('utf-8') return {'name': name, 'file_id': url, 'quality': quality, 'details': release_details} except: pass
def get_watched_items(db_type, page_no, letter, passed_list=[]): import ast from resources.lib.modules.nav_utils import paginate_list from resources.lib.modules.utils import title_key, to_utf8 watched_indicators = settings.watched_indicators() limit = 40 if db_type == 'tvshow': from resources.lib.indexers.tvshows import aired_episode_number_tvshow if watched_indicators in (1, 2): if not passed_list: from resources.lib.modules.trakt import trakt_indicators_tv data = trakt_indicators_tv() data = sorted(data, key=lambda tup: title_key(tup[3])) original_list = [{ 'media_id': i[0], 'title': i[3] } for i in data if i[1] == len(i[2])] else: original_list = ast.literal_eval(passed_list) else: if not passed_list: from resources.lib.indexers.tvshows import make_fresh_tvshow_meta settings.check_database(WATCHED_DB) dbcon = database.connect(WATCHED_DB) dbcur = dbcon.cursor() dbcur.execute( "SELECT media_id, title FROM watched_status WHERE db_type = ?", ('episode', )) rows = dbcur.fetchall() dbcon.close() watched_list = list(set(to_utf8([(i[0], i[1]) for i in rows]))) data = [] for item in watched_list: watched = get_watched_status_tvshow( item[0], aired_episode_number_tvshow( make_fresh_tvshow_meta('tmdb_id', item[0]))) if watched[0] == 1: data.append(item) else: pass data = sorted(data, key=lambda tup: title_key(tup[1])) original_list = [{ 'media_id': i[0], 'title': i[1] } for i in data] else: original_list = ast.literal_eval(passed_list) else: if watched_indicators in (1, 2): if not passed_list: from resources.lib.modules.trakt import trakt_indicators_movies data = trakt_indicators_movies() data = sorted(data, key=lambda tup: title_key(tup[1])) original_list = [{ 'media_id': i[0], 'title': i[1] } for i in data] else: original_list = ast.literal_eval(passed_list) else: if not passed_list: settings.check_database(WATCHED_DB) dbcon = database.connect(WATCHED_DB) dbcur = dbcon.cursor() dbcur.execute( "SELECT media_id, title FROM watched_status WHERE db_type = ?", (db_type, )) rows = dbcur.fetchall() dbcon.close() data = to_utf8([(i[0], i[1]) for i in rows]) data = sorted(data, key=lambda tup: title_key(tup[1])) original_list = [{ 'media_id': i[0], 'title': i[1] } for i in data] else: original_list = ast.literal_eval(passed_list) paginated_list, total_pages = paginate_list(original_list, page_no, letter, limit) return paginated_list, original_list, total_pages, limit
def build_url(query): import urllib from resources.lib.modules.utils import to_utf8 return __url__ + '?' + urllib.urlencode(to_utf8(query))
def _get(self, link): s = requests.Session() p = s.get(link) return to_utf8(json.loads(p.text))