def GET_SUBTILES(url, subtitle_completion1, subtitle_completion2): srtsubs_path1 = VSPath('special://temp/vstream_viki_SubFrench.srt') srtsubs_path2 = VSPath('special://temp/vstream_viki_SubEnglish.srt') try: if (int(subtitle_completion1) > 79 and se == 'true'): urlreq = SIGN(url, '/subtitles/fr.srt') oRequestHandler = cRequestHandler(urlreq) oRequestHandler.addHeaderEntry('User-Agent', UA) data = oRequestHandler.request() with open(srtsubs_path1, "w") as subfile: subfile.write(data) if (int(subtitle_completion2) > 0 and se == 'true'): urlreq = SIGN(url, '/subtitles/en.srt') oRequestHandler = cRequestHandler(urlreq) oRequestHandler.addHeaderEntry('User-Agent', UA) data = oRequestHandler.request() with open(srtsubs_path2, "w") as subfile: subfile.write(data) else: # VSlog('GET_SUBTILES:erreur completion') pass except: # VSlog('GET_SUBTILES:erreur exception') pass return srtsubs_path1, srtsubs_path2
def service(): ADDON = addon() recordIsActivate = ADDON.getSetting('enregistrement_activer') if recordIsActivate == 'false': return pathRecording = 'special://userdata/addon_data/plugin.video.vstream/Enregistrement' path = ''.join([pathRecording]) if not xbmcvfs.exists(path): xbmcvfs.mkdir(path) recordList = xbmcvfs.listdir(path) interval = ADDON.getSetting('heure_verification') ADDON.setSetting('path_enregistrement_programmation', path) recordInProgress = False monitor = xbmc.Monitor() del ADDON while not monitor.abortRequested() and not recordInProgress == True: if monitor.waitForAbort(int(interval)): break hour = datetime.now().strftime('%d-%H-%M') + '.py' if hour in str(recordList): hour = path + '/' + hour hour = VSPath(hour) recordInProgress = True VSlog('python ' + hour) command = 'python ' + hour proc = subprocess.Popen(command, stdout=subprocess.PIPE) p_status = proc.wait() server_thread.join()
def new_getaddrinfo(self, *args): try: import sys import dns.resolver path = VSPath('special://home/addons/script.module.dnspython/lib/').decode('utf-8') if path not in sys.path: sys.path.append(path) host = args[0] port = args[1] # Keep the domain only: http://example.com/foo/bar => example.com if "//" in host: host = host[host.find("//"):] if "/" in host: host = host[:host.find("/")] resolver = dns.resolver.Resolver(configure=False) # Résolveurs DNS ouverts: https://www.fdn.fr/actions/dns/ resolver.nameservers = ['80.67.169.12', '2001:910:800::12', '80.67.169.40', '2001:910:800::40'] answer = resolver.query(host, 'a') host_found = str(answer[0]) VSlog("new_getaddrinfo found host %s" % host_found) # Keep same return schema as socket.getaddrinfo (family, type, proto, canonname, sockaddr) return [(2, 1, 0, '', (host_found, port)), (2, 1, 0, '', (host_found, port))] except Exception as e: VSlog("new_getaddrinfo ERROR: {0}".format(e)) return self.save_getaddrinfo(*args)
def AddDownload(self, meta): sTitle = meta['title'] sUrl = meta['url'] # titre fichier sTitle = self.__createTitle(sUrl, sTitle) sTitle = self.__createDownloadFilename(sTitle) sTitle = cGui().showKeyBoard(sTitle) if (sTitle != False and len(sTitle) > 0): # chemin de sauvegarde sPath2 = VSPath(self.ADDON.getSetting('download_folder')) dialog = xbmcgui.Dialog() sPath = dialog.browse(3, 'Downloadfolder', 'files', '', False, False, sPath2) if (sPath != ''): self.ADDON.setSetting('download_folder', sPath) sDownloadPath = VSPath(sPath + '%s' % (sTitle)) if xbmcvfs.exists(sDownloadPath): self.DIALOG.VSinfo(self.ADDON.VSlang(30082), sTitle) return self.AddDownload(meta) else: xbmcvfs.File(sDownloadPath, 'w') try: VSlog(self.ADDON.VSlang(30083) + ' ' + str(sUrl)) meta['title'] = sTitle meta['path'] = sDownloadPath cDb().insert_download(meta) return True except: # print_exc() self.DIALOG.VSinfo(self.ADDON.VSlang(30084), sTitle) VSlog('Unable to download') return False
def programmation_enregistrement(self, sUrl): oGui = cGui() ADDON = addon() if '.m3u8' in sUrl: header = '-fflags +genpts+igndts -y -i "' + sUrl + '"' else: header = '-re -reconnect 1 -reconnect_at_eof 1 -reconnect_streamed 1 -reconnect_delay_max 4294 -timeout 2000000000 -f mpegts -re -flags +global_header -fflags +genpts+igndts -y -i "' + sUrl + '" -headers "User-Agent: Mozilla/5.0+(X11;+Linux+i686)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Ubuntu+Chromium/48.0.2564.116+Chrome/48.0.2564.116+Safari/537.36" -sn -c:v libx264 -c:a copy -map 0 -segment_format mpegts -segment_time -1' pathEnregistrement = ADDON.getSetting( 'path_enregistrement_programmation') currentPath = ADDON.getSetting('path_enregistrement').replace( '\\', '/') ffmpeg = ADDON.getSetting('path_ffmpeg').replace('\\', '/') heureFichier = oGui.showKeyBoard( heading= "Heure du début d'enregistrement au format Date-Heure-Minute") heureFin = oGui.showKeyBoard( heading="Heure de fin d'enregistrement au format Heure-Minute") titre = oGui.showKeyBoard(heading="Titre de l'enregistrement").replace( "'", "\\'") heureDebut = GetTimeObject(heureFichier, '%d-%H-%M') heureFin = GetTimeObject(heureFin, '%H-%M') durer = heureFin - heureDebut marge = ADDON.getSetting('marge_auto') timedelta = datetime.timedelta(minutes=int(marge)) durer = durer + timedelta realPath = VSPath(pathEnregistrement + '/' + str(heureFichier) + '.py').replace('\\', '\\\\') f = xbmcvfs.File(realPath, 'w') read = f.write('''#-*- coding: utf-8 -*- import os,subprocess command = '"''' + ffmpeg + '''" ''' + header + ''' -t ''' + str(durer) + ''' "''' + currentPath + titre + '''.mkv"' proc = subprocess.Popen(command, stdout=subprocess.PIPE) p_status = proc.wait() f = open("''' + currentPath + '''/test.txt",'w') f.write('Finit avec code erreur ' + str(p_status)) f.close()''') f.close() oDialog = dialog().VSinfo( 'Redémarrer Kodi pour prendre en compte la planification', 'matrix', 10) oGui.setEndOfDirectory()
def openLibrary(self): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sFile = oInputParameterHandler.getValue('filePath') listDir = xbmcvfs.listdir(sFile) if listDir[0]: data = listDir[0] else: data = listDir[1] addon_handle = None for i in data: path = VSPath(sFile + '/' + i) #Suppression du special: pour plus tard sTitle = os.path.basename(path) #Titre du fichier .strm if '.strm' in i: sHosterUrl = sFile + '/' + i addon_handle = int(sys.argv[1]) xbmcplugin.setContent(addon_handle, 'video') li = xbmcgui.ListItem(sTitle) xbmcplugin.addDirectoryItem(handle=addon_handle, url=sHosterUrl, listitem=li) else: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('filePath', sFile + '/' + i) oGui.addDir(SITE_IDENTIFIER, 'openLibrary', sTitle, 'annees.png', oOutputParameterHandler) if addon_handle: xbmcplugin.endOfDirectory(addon_handle) else: oGui.setEndOfDirectory()
def main(self, env): if (env == 'urlresolver'): addon('script.module.urlresolver').openSettings() return elif (env == 'metahandler'): addon('script.module.metahandler').openSettings() return elif (env == 'changelog_old'): try: sUrl = 'https://raw.githubusercontent.com/Kodi-vStream/venom-xbmc-addons/master/plugin.video.vstream/changelog.txt' oRequest = urllib2.Request(sUrl) oResponse = urllib2.urlopen(oRequest) # En python 3 on doit décoder la reponse if xbmc.getInfoLabel('system.buildversion')[0:2] >= '19': sContent = oResponse.read().decode('utf-8') else: sContent = oResponse.read() self.TextBoxes('vStream Changelog', sContent) except: self.DIALOG.VSerror("%s, %s" % (self.ADDON.VSlang(30205), sUrl)) return elif (env == 'changelog'): class XMLDialog(xbmcgui.WindowXMLDialog): def __init__(self, *args, **kwargs): xbmcgui.WindowXMLDialog.__init__(self) pass def onInit(self): self.container = self.getControl(6) self.button = self.getControl(5) self.getControl(3).setVisible(False) self.getControl(1).setLabel('ChangeLog') self.button.setLabel('OK') sUrl = 'https://api.github.com/repos/Kodi-vStream/venom-xbmc-addons/commits' oRequest = urllib2.Request(sUrl) oResponse = urllib2.urlopen(oRequest) # En python 3 on doit décoder la reponse if xbmc.getInfoLabel('system.buildversion')[0:2] >= '19': sContent = oResponse.read().decode('utf-8') else: sContent = oResponse.read() result = json.loads(sContent) listitems = [] for item in result: # autor icon = item['author']['avatar_url'] login = item['author']['login'] # message try: desc = item['commit']['message'].encode("utf-8") except: desc = 'None' listitem = xbmcgui.ListItem(label=login, label2=desc) listitem.setArt({'icon': icon, 'thumb': icon}) listitems.append(listitem) self.container.addItems(listitems) self.setFocus(self.container) def onClick(self, controlId): self.close() return def onFocus(self, controlId): self.controlId = controlId def _close_dialog(self): self.close() path = "special://home/addons/plugin.video.vstream" wd = XMLDialog('DialogSelect.xml', path, "Default") wd.doModal() del wd return elif (env == 'soutient'): try: sUrl = 'https://raw.githubusercontent.com/Kodi-vStream/venom-xbmc-addons/master/plugin.video.vstream/soutient.txt' oRequest = urllib2.Request(sUrl) oResponse = urllib2.urlopen(oRequest) # En python 3 on doit décoder la reponse if xbmc.getInfoLabel('system.buildversion')[0:2] >= '19': sContent = oResponse.read().decode('utf-8') else: sContent = oResponse.read() self.TextBoxes('vStream Soutient', sContent) except: self.DIALOG.VSerror("%s, %s" % (self.ADDON.VSlang(30205), sUrl)) return elif (env == 'addon'): # Vider le cache des métadonnées if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)): cached_Cache = "special://home/userdata/addon_data/plugin.video.vstream/video_cache.db" # important seul xbmcvfs peux lire le special try: cached_Cache = VSPath(cached_Cache).decode("utf-8") except AttributeError: cached_Cache = VSPath(cached_Cache) try: db = sqlite.connect(cached_Cache) dbcur = db.cursor() dbcur.execute('DELETE FROM movie') dbcur.execute('DELETE FROM tvshow') dbcur.execute('DELETE FROM season') dbcur.execute('DELETE FROM episode') db.commit() dbcur.close() db.close() self.DIALOG.VSinfo(self.ADDON.VSlang(30090)) except: self.DIALOG.VSerror(self.ADDON.VSlang(30091)) return elif (env == 'clean'): liste = [ 'Historiques', 'Lecture en cours', 'Marqués vues', 'Marque-Pages', 'Téléchargements' ] ret = self.DIALOG.VSselect(liste, self.ADDON.VSlang(30110)) cached_DB = "special://home/userdata/addon_data/plugin.video.vstream/vstream.db" # important seul xbmcvfs peux lire le special try: cached_DB = VSPath(cached_DB).decode("utf-8") except AttributeError: cached_DB = VSPath(cached_DB) sql_drop = "" if ret > -1: if ret == 0: sql_drop = 'DELETE FROM history' elif ret == 1: sql_drop = 'DELETE FROM resume' elif ret == 2: sql_drop = 'DELETE FROM watched' elif ret == 3: sql_drop = 'DELETE FROM favorite' elif ret == 4: sql_drop = 'DELETE FROM download' try: db = sqlite.connect(cached_DB) dbcur = db.cursor() dbcur.execute(sql_drop) db.commit() dbcur.close() db.close() self.DIALOG.VSok(self.ADDON.VSlang(30090)) except: self.DIALOG.VSerror(self.ADDON.VSlang(30091)) return elif (env == 'xbmc'): if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)): path = "special://temp/" try: xbmcvfs.rmdir(path, True) self.DIALOG.VSok(self.ADDON.VSlang(30092)) except: self.DIALOG.VSerror(self.ADDON.VSlang(30093)) return elif (env == 'fi'): if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)): path = "special://temp/archive_cache/" try: xbmcvfs.rmdir(path, True) self.DIALOG.VSok(self.ADDON.VSlang(30095)) except: self.DIALOG.VSerror(self.ADDON.VSlang(30096)) return elif (env == 'uplog'): if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)): path = "special://logpath/kodi.log" UA = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0' headers = {'User-Agent': UA} if xbmcvfs.exists(path): post_data = {} cUrl = 'http://slexy.org/index.php/submit' logop = xbmcvfs.File(path, 'rb') result = logop.read() logop.close() oRequestHandler = cRequestHandler(cUrl) oRequestHandler.setRequestType(1) oRequestHandler.addHeaderEntry('User-Agent', UA) oRequestHandler.addParameters('raw_paste', result) oRequestHandler.addParameters('author', "kodi.log") oRequestHandler.addParameters('language', "text") oRequestHandler.addParameters('permissions', 1) # private oRequestHandler.addParameters('expire', 259200) # 3j oRequestHandler.addParameters('submit', 'Submit+Paste') sHtmlContent = oRequestHandler.request() code = oRequestHandler.getRealUrl().replace( 'http://slexy.org/view/', '') self.ADDON.setSetting('service_log', code) self.DIALOG.VSok(self.ADDON.VSlang(30097) + ' ' + code) return elif (env == 'search'): from resources.lib.handler.pluginHandler import cPluginHandler valid = '[COLOR green][x][/COLOR]' class XMLDialog(xbmcgui.WindowXMLDialog): ADDON = addon() def __init__(self, *args, **kwargs): xbmcgui.WindowXMLDialog.__init__(self) pass def onInit(self): self.container = self.getControl(6) self.button = self.getControl(5) self.getControl(3).setVisible(False) self.getControl(1).setLabel(self.ADDON.VSlang(30094)) self.button.setLabel('OK') listitems = [] oPluginHandler = cPluginHandler() aPlugins = oPluginHandler.getAllPlugins() for aPlugin in aPlugins: # teste si deja dans le dsip sPluginSettingsName = 'plugin_' + aPlugin[1] bPlugin = self.ADDON.getSetting(sPluginSettingsName) icon = "special://home/addons/plugin.video.vstream/resources/art/sites/%s.png" % aPlugin[ 1] stitle = aPlugin[0].replace('[COLOR violet]', '').replace('[COLOR orange]', '')\ .replace('[/COLOR]', '').replace('[COLOR dodgerblue]', '')\ .replace('[COLOR coral]', '') if (bPlugin == 'true'): stitle = ('%s %s') % (stitle, valid) listitem = xbmcgui.ListItem(label=stitle, label2=aPlugin[2]) listitem.setArt({'icon': icon, 'thumb': icon}) listitem.setProperty('Addon.Summary', aPlugin[2]) listitem.setProperty('sitename', aPlugin[1]) if (bPlugin == 'true'): listitem.select(True) listitems.append(listitem) self.container.addItems(listitems) self.setFocus(self.container) def onClick(self, controlId): if controlId == 5: self.close() return elif controlId == 99: window = xbmcgui.Window(xbmcgui.getCurrentWindowId()) del window self.close() return elif controlId == 7: window = xbmcgui.Window(xbmcgui.getCurrentWindowId()) del window self.close() return elif controlId == 6: item = self.container.getSelectedItem() if item.isSelected() == True: label = item.getLabel().replace(valid, '') item.setLabel(label) item.select(False) sPluginSettingsName = ('plugin_%s') % ( item.getProperty('sitename')) self.ADDON.setSetting(sPluginSettingsName, str('false')) else: label = ('%s %s') % (item.getLabel(), valid) item.setLabel(label) item.select(True) sPluginSettingsName = ('plugin_%s') % ( item.getProperty('sitename')) self.ADDON.setSetting(sPluginSettingsName, str('true')) return def onFocus(self, controlId): self.controlId = controlId def _close_dialog(self): self.close() # def onAction(self, action): # if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448): # self.close() path = "special://home/addons/plugin.video.vstream" wd = XMLDialog('DialogSelect.xml', path, "Default") wd.doModal() del wd return elif (env == 'thumb'): if self.DIALOG.VSyesno(self.ADDON.VSlang(30098)): text = False path = "special://userdata/Thumbnails/" path_DB = "special://userdata/Database" try: xbmcvfs.rmdir(path, True) text = 'Clear Thumbnail Folder, Successful[CR]' except: text = 'Clear Thumbnail Folder, Error[CR]' folder, items = xbmcvfs.listdir(path_DB) items.sort() for sItemName in items: if "extures" in sItemName: cached_Cache = "/".join([path_DB, sItemName]) try: xbmcvfs.delete(cached_Cache) text += 'Clear Thumbnail DB, Successful[CR]' except: text += 'Clear Thumbnail DB, Error[CR]' if text: text = "%s (Important relancer Kodi)" % text self.DIALOG.VSok(text) return elif (env == 'sauv'): select = self.DIALOG.VSselect(['Import', 'Export']) DB = "special://home/userdata/addon_data/plugin.video.vstream/vstream.db" if select >= 0: new = self.DIALOG.browse(3, 'vStream', "files") if new: try: if select == 0: xbmcvfs.delete(DB) # copy(source, destination)--copy file to destination, returns true/false. xbmcvfs.copy(new + 'vstream.db', DB) elif select == 1: # copy(source, destination)--copy file to destination, returns true/false. xbmcvfs.copy(DB, new + 'vstream.db') self.DIALOG.VSinfo(self.ADDON.VSlang(30099)) except: self.DIALOG.VSerror(self.ADDON.VSlang(30100)) return else: return return
class cDb: # important seul xbmcvfs peux lire le special DB = 'special://home/userdata/addon_data/plugin.video.vstream/vstream.db' try: REALDB = VSPath(DB).decode('utf-8') except AttributeError: REALDB = VSPath(DB) def __init__(self): VSlog('DB engine for db : ' + sqlite.__name__) try: if not xbmcvfs.exists(self.DB): self.db = sqlite.connect(self.REALDB) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() self._create_tables() return except: VSlog('Error: Unable to write to %s' % self.REALDB) pass try: self.db = sqlite.connect(self.REALDB) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() except: VSlog('Error: Unable to access to %s' % self.REALDB) pass def __del__(self): ''' Cleanup db when object destroyed ''' try: self.dbcur.close() self.db.close() except Exception: pass def _create_tables(self): # sql_create2 = 'DROP TABLE history' ''' Create table ''' sql_create = "CREATE TABLE IF NOT EXISTS history ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "disp TEXT, "\ "icone TEXT, "\ "isfolder TEXT, "\ "level TEXT, "\ "lastwatched TIMESTAMP "", "\ "UNIQUE(title)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS resume ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "hoster TEXT, "\ "point TEXT, "\ "UNIQUE(title, hoster)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS watched ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "site TEXT, "\ "UNIQUE(title, site)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS favorite ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "siteurl TEXT, "\ "site TEXT, "\ "fav TEXT, "\ "cat TEXT, "\ "icon TEXT, "\ "fanart TEXT, "\ "UNIQUE(title, site)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS download ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "url TEXT, "\ "path TEXT, "\ "cat TEXT, "\ "icon TEXT, "\ "size TEXT,"\ "totalsize TEXT, "\ "status TEXT, "\ "UNIQUE(title, path)"\ ");" self.dbcur.execute(sql_create) VSlog('Table initialized') # Ne pas utiliser cette fonction pour les chemins def str_conv(self, data): if isinstance(data, str): # Must be encoded in UTF-8 try: data = data.decode('utf8') except AttributeError: pass import unicodedata data = unicodedata.normalize('NFKD', data).encode('ascii', 'ignore') try: data = data.decode( 'string-escape' ) # ATTENTION: provoque des bugs pour les chemins a cause du caractere '/' except: pass return data # *********************************** # History fonctions # *********************************** def insert_history(self, meta): # title = Unquote(meta['title']).decode('ascii', 'ignore') title = self.str_conv(Unquote(meta['title'])) disp = meta['disp'] icon = 'icon.png' try: ex = 'INSERT INTO history (title, disp, icone) VALUES (?, ?, ?)' self.dbcur.execute(ex, (title, disp, icon)) self.db.commit() VSlog('SQL INSERT history Successfully') except Exception as e: if 'UNIQUE constraint failed' in e.message: ex = "UPDATE history set title = '%s', disp = '%s', icone= '%s' WHERE title = '%s'" % ( title, disp, icon, title) self.dbcur.execute(ex) self.db.commit() VSlog('SQL UPDATE history Successfully') VSlog('SQL ERROR INSERT, title = %s, %s' % (title, e)) pass def get_history(self): sql_select = 'SELECT * FROM history' try: self.dbcur.execute(sql_select) # matchedrow = self.dbcur.fetchone() matchedrow = self.dbcur.fetchall() return matchedrow except Exception as e: VSlog('SQL ERROR EXECUTE, %s' % e) return None def del_history(self): from resources.lib.gui.gui import cGui oGui = cGui() oInputParameterHandler = cInputParameterHandler() if oInputParameterHandler.exist('searchtext'): sql_delete = "DELETE FROM history WHERE title = '%s'" % ( oInputParameterHandler.getValue('searchtext')) else: sql_delete = 'DELETE FROM history;' try: self.dbcur.execute(sql_delete) self.db.commit() dialog().VSinfo(addon().VSlang(30041)) oGui.updateDirectory() return False, False except Exception: VSlog('SQL ERROR DELETE : %s' % sql_delete) return False, False # *********************************** # Watched fonctions # *********************************** def insert_watched(self, meta): title = meta['title'] if not title: return site = QuotePlus(meta['site']) ex = 'INSERT INTO watched (title, site) VALUES (?, ?)' self.dbcur.execute(ex, (title, site)) try: self.db.commit() VSlog('SQL INSERT watched Successfully') except Exception: VSlog('SQL ERROR INSERT watched : title = %s, site = %s' % (title, site)) pass def get_watched(self, meta): title = meta['title'] if not title: return None sql_select = "SELECT * FROM watched WHERE title = '%s'" % title try: self.dbcur.execute(sql_select) # matchedrow = self.dbcur.fetchone() matchedrow = self.dbcur.fetchall() if matchedrow: return 1 return 0 except Exception: VSlog('SQL ERROR %s' % sql_select) return None def del_watched(self, meta): title = meta['title'] if not title: return sql_select = "DELETE FROM watched WHERE title = '%s'" % title try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False # *********************************** # Resume fonctions # *********************************** def insert_resume(self, meta): title = self.str_conv(meta['title']) site = QuotePlus(meta['site']) # hoster = meta['hoster'] point = meta['point'] ex = "DELETE FROM resume WHERE hoster = '%s'" % site self.dbcur.execute(ex) ex = 'INSERT INTO resume (title, hoster, point) VALUES (?, ?, ?)' self.dbcur.execute(ex, (title, site, point)) try: self.db.commit() except Exception: VSlog('SQL ERROR INSERT resume, title = %s' % title) pass def get_resume(self, meta): # title = self.str_conv(meta['title']) site = QuotePlus(meta['site']) sql_select = "SELECT * FROM resume WHERE hoster = '%s'" % site try: self.dbcur.execute(sql_select) # matchedrow = self.dbcur.fetchone() matchedrow = self.dbcur.fetchall() return matchedrow except Exception: VSlog('SQL ERROR %s' % sql_select) return None def del_resume(self, meta): site = QuotePlus(meta['site']) sql_select = "DELETE FROM resume WHERE hoster = '%s'" % site try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False # *********************************** # Bookmark fonctions # *********************************** def insert_bookmark(self, meta): title = self.str_conv(meta['title']) siteurl = QuotePlus(meta['siteurl']) try: sIcon = meta['icon'].decode('UTF-8') except: sIcon = meta['icon'] try: ex = 'INSERT INTO favorite (title, siteurl, site, fav, cat, icon, fanart) VALUES (?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute(ex, (title, siteurl, meta['site'], meta['fav'], meta['cat'], sIcon, meta['fanart'])) self.db.commit() dialog().VSinfo(addon().VSlang(30042), meta['title']) VSlog('SQL INSERT favorite Successfully') except Exception as e: if 'UNIQUE constraint failed' in e.message: dialog().VSinfo(addon().VSlang(30043), meta['title']) VSlog('SQL ERROR INSERT : %s' % e) pass def get_bookmark(self): sql_select = 'SELECT * FROM favorite' try: self.dbcur.execute(sql_select) # matchedrow = self.dbcur.fetchone() matchedrow = self.dbcur.fetchall() return matchedrow except Exception: VSlog('SQL ERROR EXECUTE') return None def del_bookmark(self, sSiteUrl='', sMovieTitle='', sCat='', sAll=False): sql_delete = None # Tous supprimer if sAll: sql_delete = 'DELETE FROM favorite;' # Supprimer un bookmark selon son titre elif sMovieTitle: siteUrl = QuotePlus(sSiteUrl) title = self.str_conv(sMovieTitle) title = title.replace("'", r"''") sql_delete = "DELETE FROM favorite WHERE siteurl = '%s' AND title = '%s'" % ( siteUrl, title) # Supprimer un bookmark selon son url elif sSiteUrl: siteUrl = QuotePlus(sSiteUrl) sql_delete = "DELETE FROM favorite WHERE siteurl = '%s'" % siteUrl # Supprimer toute une catégorie elif sCat: sql_delete = "DELETE FROM favorite WHERE cat = '%s'" % sCat if sql_delete: from resources.lib.gui.gui import cGui try: self.dbcur.execute(sql_delete) self.db.commit() update = self.db.total_changes if not update and sSiteUrl and sMovieTitle: # si pas trouvé, on essaie sans le titre, seulement l'URL return self.del_bookmark(sSiteUrl) dialog().VSinfo(addon().VSlang(30044)) cGui().updateDirectory() return False, False except Exception: VSlog('SQL ERROR %s' % sql_delete) return False, False # *********************************** # Download fonctions # *********************************** def insert_download(self, meta): title = self.str_conv(meta['title']) url = QuotePlus(meta['url']) sIcon = QuotePlus(meta['icon']) sPath = meta['path'] ex = 'INSERT INTO download (title, url, path, cat, icon, size, totalsize, status) VALUES (?, ?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute(ex, (title, url, sPath, meta['cat'], sIcon, '', '', 0)) try: self.db.commit() VSlog('SQL INSERT download Successfully') dialog().VSinfo(addon().VSlang(30042), meta['title']) except Exception: VSlog('SQL ERROR INSERT into download') pass def get_download(self, meta=''): if meta == '': sql_select = 'SELECT * FROM download' else: url = QuotePlus(meta['url']) sql_select = "SELECT * FROM download WHERE url = '%s' AND status = '0'" % url try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchall() return matchedrow except Exception: VSlog('SQL ERROR %s' % sql_select) return None def clean_download(self): sql_select = "DELETE FROM download WHERE status = '2'" try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False def reset_download(self, meta): url = QuotePlus(meta['url']) sql_select = "UPDATE download SET status = '0' WHERE status = '2' AND url = '%s'" % url try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False def del_download(self, meta): if len(meta['url']) > 1: url = QuotePlus(meta['url']) sql_select = "DELETE FROM download WHERE url = '%s'" % url elif len(meta['path']) > 1: path = meta['path'] sql_select = "DELETE FROM download WHERE path = '%s'" % path else: return try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False def cancel_download(self): sql_select = "UPDATE download SET status = '0' WHERE status = '1'" try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False def update_download(self, meta): path = meta['path'] size = meta['size'] totalsize = meta['totalsize'] status = meta['status'] sql_select = "UPDATE download set size = '%s', totalsize = '%s', status= '%s' WHERE path = '%s'" % ( size, totalsize, status, path) try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception: VSlog('SQL ERROR %s' % sql_select) return False, False
import re import os import xbmcaddon from resources.lib.comaddon import VSlog, xbmc, VSPath from resources.lib.handler.requestHandler import cRequestHandler try: # Python 2 import urllib2 except ImportError: # Python 3 import urllib.request as urllib2 PathCache = VSPath( xbmcaddon.Addon('plugin.video.mando').getAddonInfo('profile')) UA = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0' class Stormwall(object): def __init__(self): self.cE = '' self.cK = '' self.cN = '' self.cO = '' self._0xbd1168 = "0123456789qwertyuiopasdfghjklzxcvbnm:?!" self.a = [] self.b = {} self.state = False self.hostComplet = '' self.host = '' self.url = ''
class cTMDb: # https://developers.themoviedb.org/3/genres/get-movie-list # https://developers.themoviedb.org/3/genres/get-tv-list TMDB_GENRES = { 12: 'Aventure', 14: 'Fantastique', 16: 'Animation', 18: 'Drame', 27: 'Horreur', 28: 'Action', 35: 'Comédie', 36: 'Histoire', 37: 'Western', 53: 'Thriller', 80: 'Crime', 99: 'Documentaire', 878: 'Science-Fiction', 9648: 'Mystère', 10402: 'Musique', 10749: 'Romance', 10751: 'Familial', 10752: 'Guerre', 10759: 'Action & Aventure', 10762: 'Kids', 10763: 'News', 10764: 'Realité', 10765: 'Science-Fiction & Fantastique', 10766: 'Feuilleton', 10767: 'Talk', 10768: 'Guerre & Politique', 10769: 'Etranger', 10770: 'Téléfilm' } URL = 'https://api.themoviedb.org/3/' URL_TRAILER = 'plugin://plugin.video.youtube/play/?video_id=%s' # ancien : 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' CACHE = 'special://home/userdata/addon_data/plugin.video.vstream/video_cache.db' # important seul xbmcvfs peux lire le special if not isMatrix(): REALCACHE = VSPath(CACHE).decode('utf-8') else: REALCACHE = VSPath(CACHE) def __init__(self, api_key='', debug=False, lang='fr'): self.ADDON = addon() self.api_key = self.ADDON.getSetting('api_tmdb') self.debug = debug self.lang = lang self.poster = 'https://image.tmdb.org/t/p/%s' % self.ADDON.getSetting( 'poster_tmdb') self.fanart = 'https://image.tmdb.org/t/p/%s' % self.ADDON.getSetting( 'backdrop_tmdb') try: if not xbmcvfs.exists(self.CACHE): # f = open(self.cache, 'w') # f.close() self.db = sqlite.connect(self.REALCACHE) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() self.__createdb() return except: VSlog('Error: Unable to write on %s' % self.REALCACHE) pass try: self.db = sqlite.connect(self.REALCACHE) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() except: VSlog('Error: Unable to connect to %s' % self.REALCACHE) pass def __createdb(self): sql_create = "CREATE TABLE IF NOT EXISTS movie ("\ "imdb_id TEXT, "\ "tmdb_id TEXT, "\ "title TEXT, "\ "year INTEGER,"\ "director TEXT, "\ "writer TEXT, "\ "tagline TEXT, "\ "credits TEXT,"\ "vote_average FLOAT, "\ "vote_count TEXT, "\ "runtime TEXT, "\ "overview TEXT,"\ "mpaa TEXT, "\ "premiered TEXT, "\ "genre TEXT, "\ "studio TEXT,"\ "status TEXT,"\ "poster_path TEXT, "\ "trailer TEXT, "\ "backdrop_path TEXT,"\ "playcount INTEGER,"\ "UNIQUE(imdb_id, tmdb_id, title, year)"\ ");" try: self.dbcur.execute(sql_create) except: VSlog('Error: Cannot create table movie') sql_create = "CREATE TABLE IF NOT EXISTS tvshow ("\ "imdb_id TEXT, "\ "tmdb_id TEXT, "\ "title TEXT, "\ "year INTEGER,"\ "director TEXT, "\ "writer TEXT, "\ "credits TEXT,"\ "vote_average FLOAT, "\ "vote_count TEXT, "\ "runtime TEXT, "\ "overview TEXT,"\ "mpaa TEXT, "\ "premiered TEXT, "\ "genre TEXT, "\ "studio TEXT,"\ "status TEXT,"\ "poster_path TEXT,"\ "trailer TEXT, "\ "backdrop_path TEXT,"\ "playcount INTEGER,"\ "UNIQUE(imdb_id, tmdb_id, title)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS season ("\ "imdb_id TEXT, "\ "tmdb_id TEXT, " \ "season INTEGER, "\ "year INTEGER,"\ "premiered TEXT, "\ "poster_path TEXT,"\ "playcount INTEGER,"\ "UNIQUE(imdb_id, tmdb_id, season)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS episode ("\ "imdb_id TEXT, "\ "tmdb_id TEXT, "\ "episode_id TEXT, "\ "season INTEGER, "\ "episode INTEGER, "\ "title TEXT, "\ "director TEXT, "\ "writer TEXT, "\ "overview TEXT, "\ "vote_average FLOAT, "\ "premiered TEXT, "\ "poster_path TEXT, "\ "playcount INTEGER, "\ "UNIQUE(imdb_id, tmdb_id, episode_id, title)"\ ");" self.dbcur.execute(sql_create) VSlog('table movie creee') def __del__(self): """ Cleanup db when object destroyed """ try: self.dbcur.close() self.db.close() except: pass def getToken(self): result = self._call('authentication/token/new', '') total = len(result) if (total > 0): url = 'https://www.themoviedb.org/authenticate/' if not xbmc.getCondVisibility('system.platform.android'): #Si possible on ouvre la page automatiquement dans un navigateur internet. webbrowser.open(url + result['request_token']) sText = (self.ADDON.VSlang(30421)) % (url, result['request_token']) DIALOG = dialog() if not DIALOG.VSyesno(sText): return False else: from resources.lib import pyqrcode qr = pyqrcode.create(url + result['request_token']) qr.png( 'special://home/userdata/addon_data/plugin.video.vstream/qrcode.png', scale=5) oSolver = cInputWindowYesNo( captcha= 'special://home/userdata/addon_data/plugin.video.vstream/qrcode.png', msg="Scanner le QRCode pour acceder au lien d'autorisation", roundnum=1) retArg = oSolver.get() DIALOG = dialog() if retArg == "N": return False result = self._call('authentication/session/new', 'request_token=' + result['request_token']) if 'success' in result and result['success']: self.ADDON.setSetting('tmdb_session', str(result['session_id'])) DIALOG.VSinfo(self.ADDON.VSlang(30000)) return else: DIALOG.VSerror('Erreur' + self.ADDON.VSlang(30000)) return # xbmc.executebuiltin('Container.Refresh') return return # cherche dans les films ou serie l'id par le nom, return ID ou FALSE def get_idbyname(self, name, year='', mediaType='movie', page=1): #Pour les series il faut enlever le numero de l episode et la saison. if mediaType == "tv": m = re.search( '(?i)(?:^|[^a-z])((?:E|(?:\wpisode\s?))([0-9]+(?:[\-\.][0-9\?]+)*))', name) m1 = re.search('(?i)( s(?:aison +)*([0-9]+(?:\-[0-9\?]+)*))', name) name = name.replace(m.group(1), '').replace(m1.group(1), '').replace('+', ' ') #On enleve le contenu entre paranthese. try: name = name.split('(')[0] except: pass if year: term = QuotePlus(name) + '&year=' + year else: term = QuotePlus(name) meta = self._call('search/' + str(mediaType), 'query=' + term + '&page=' + str(page)) # si pas de résultat avec l'année, on teste sans l'année if 'total_results' in meta and meta['total_results'] == 0 and year: meta = self.search_movie_name(name, '') # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: if meta['total_results'] > 1: qua = [] url = [] for aEntry in meta['results']: url.append(aEntry["id"]) qua.append(aEntry['name']) #Affichage du tableau tmdb_id = dialog().VSselectqual(qua, url) else: tmdb_id = meta['results'][0]['id'] return tmdb_id else: return False return False # Search for movies by title. def search_movie_name(self, name, year='', page=1): name = re.sub(" +", " ", name) # nettoyage du titre if year: term = QuotePlus(name) + '&year=' + year else: term = QuotePlus(name) meta = self._call('search/movie', 'query=' + term + '&page=' + str(page)) if 'errors' not in meta and 'status_code' not in meta: # si pas de résultat avec l'année, on teste sans l'année if 'total_results' in meta and meta['total_results'] == 0 and year: meta = self.search_movie_name(name, '') # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: movie = '' # s'il n'y en a qu'un, c'est le bon if meta['total_results'] == 1: movie = meta['results'][0] else: # premiere boucle, recherche la correspondance parfaite sur le nom for searchMovie in meta['results']: if searchMovie['genre_ids'] and 99 not in searchMovie[ 'genre_ids']: if self._clean_title( searchMovie['title']) == self._clean_title( name): movie = searchMovie break # sinon, hors documentaire et année proche if not movie: for searchMovie in meta['results']: if searchMovie[ 'genre_ids'] and 99 not in searchMovie[ 'genre_ids']: # controle supplémentaire sur l'année meme si déjà dans la requete if year: if 'release_date' in searchMovie and searchMovie[ 'release_date']: release_date = searchMovie[ 'release_date'] yy = release_date[:4] if int(year) - int(yy) > 1: continue # plus de deux ans d'écart, c'est pas bon movie = searchMovie break # Rien d'interessant, on prend le premier if not movie: movie = meta['results'][0] # recherche de toutes les infos tmdb_id = movie['id'] meta = self.search_movie_id(tmdb_id) else: meta = {} return meta # Search for collections by title. def search_collection_name(self, name): name = re.sub(" +", " ", name) # nettoyage du titre term = QuotePlus(name) meta = self._call('search/collection', 'query=' + term) if 'errors' not in meta and 'status_code' not in meta: # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: collection = '' # s'il n'y en a qu'un, c'est le bon if meta['total_results'] == 1: collection = meta['results'][0] else: # premiere boucle, recherche la correspondance parfaite sur le nom for searchCollec in meta['results']: cleanTitleTMDB = self._clean_title( searchCollec['name']) cleanTitleSearch = self._clean_title(name) if not cleanTitleSearch.endswith('saga'): cleanTitleSearch += 'saga' if cleanTitleTMDB == cleanTitleSearch: collection = searchCollec break elif (cleanTitleSearch + 'saga') == cleanTitleTMDB: collection = searchCollec break # sinon, le premier qui n'est pas du genre animation if not collection: for searchCollec in meta['results']: if 'animation' not in searchCollec['name']: collection = searchCollec break # Rien d'interessant, on prend le premier if not collection: collection = meta['results'][0] meta = collection tmdb_id = collection['id'] meta['tmdb_id'] = tmdb_id # recherche de toutes les infos meta = self.search_collection_id(tmdb_id) else: meta = {} return meta # Search for TV shows by title. def search_tvshow_name(self, name, year='', page=1, genre=''): if year: term = QuotePlus(name) + '&year=' + year else: term = QuotePlus(name) meta = self._call('search/tv', 'query=' + term + '&page=' + str(page)) if 'errors' not in meta and 'status_code' not in meta: # si pas de résultat avec l'année, on teste sans l'année if 'total_results' in meta and meta['total_results'] == 0 and year: meta = self.search_tvshow_name(name, '') # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: movie = '' # s'il n'y en a qu'un, c'est le bon if meta['total_results'] == 1: movie = meta['results'][0] else: # premiere boucle, recherche la correspondance parfaite sur le nom for searchMovie in meta['results']: if genre == '' or genre in searchMovie['genre_ids']: movieName = searchMovie['name'] if self._clean_title( movieName) == self._clean_title(name): movie = searchMovie break # sinon, hors documentaire et année proche if not movie: for searchMovie in meta['results']: if genre and genre in searchMovie['genre_ids']: # controle supplémentaire sur l'année meme si déjà dans la requete if year: if 'release_date' in searchMovie and searchMovie[ 'release_date']: release_date = searchMovie[ 'release_date'] yy = release_date[:4] if int(year) - int(yy) > 1: continue # plus de deux ans d'écart, c'est pas bon movie = searchMovie break # Rien d'interessant, on prend le premier if not movie: movie = meta['results'][0] # recherche de toutes les infos tmdb_id = movie['id'] meta = self.search_tvshow_id(tmdb_id) else: meta = {} return meta # Search for person by name. def search_person_name(self, name): name = re.sub(" +", " ", name) # nettoyage du titre term = QuotePlus(name) meta = self._call('search/person', 'query=' + term) # si pas d'erreur if 'errors' not in meta and 'status_code' not in meta: # on prend le premier resultat if 'total_results' in meta and meta['total_results'] != 0: meta = meta['results'][0] # recherche de toutes les infos person_id = meta['id'] meta = self.search_person_id(person_id) else: meta = {} return meta # Get the basic movie information for a specific movie id. def search_movie_id( self, movie_id, append_to_response='append_to_response=trailers,credits'): result = self._call('movie/' + str(movie_id), append_to_response) result['tmdb_id'] = movie_id return result # obj(**self._call('movie/' + str(movie_id), append_to_response)) # Get the primary information about a TV series by id. def search_tvshow_id( self, show_id, append_to_response='append_to_response=external_ids,videos,credits' ): result = self._call('tv/' + str(show_id), append_to_response) result['tmdb_id'] = show_id return result # Get the basic informations for a specific collection id. def search_collection_id(self, collection_id): result = self._call('collection/' + str(collection_id)) result['tmdb_id'] = collection_id return result # Get the basic person informations for a specific person id. def search_person_id(self, person_id): result = self._call('person/' + str(person_id)) result['tmdb_id'] = person_id return result # Get the informations for a specific network. def search_network_id(self, network_id): result = self._call('network/%s/images' % str(network_id)) if 'status_code' not in result and 'logos' in result: network = result['logos'][0] vote = -1 # On prend le logo qui a la meilleure note for logo in result['logos']: logoVote = float(logo['vote_average']) if logoVote > vote: network = logo vote = logoVote network['tmdb_id'] = network_id network.pop('vote_average') return network return {} def _format(self, meta, name): _meta = {} _meta['imdb_id'] = '' _meta['tmdb_id'] = '' _meta['tvdb_id'] = '' _meta['title'] = name _meta['media_type'] = '' _meta['rating'] = 0 _meta['votes'] = 0 _meta['duration'] = 0 _meta['plot'] = '' _meta['mpaa'] = '' _meta['premiered'] = '' _meta['year'] = '' _meta['trailer'] = '' _meta['tagline'] = '' _meta['genre'] = '' _meta['studio'] = '' _meta['status'] = '' _meta['credits'] = '' _meta['cast'] = [] _meta['director'] = '' _meta['writer'] = '' _meta['poster_path'] = '' _meta['cover_url'] = '' _meta['backdrop_path'] = '' _meta['backdrop_url'] = '' _meta['episode'] = 0 _meta['playcount'] = 0 if 'title' in meta and meta['title']: _meta['title'] = meta['title'] elif 'name' in meta and meta['name']: _meta['title'] = meta['name'] if 'id' in meta: _meta['tmdb_id'] = meta['id'] if 'tmdb_id' in meta: _meta['tmdb_id'] = meta['tmdb_id'] if 'imdb_id' in meta: _meta['imdb_id'] = meta['imdb_id'] elif 'external_ids' in meta: _meta['imdb_id'] = meta['external_ids']['imdb_id'] if 'mpaa' in meta: _meta['mpaa'] = meta['mpaa'] if 'media_type' in meta: _meta['media_type'] = meta['media_type'] if 'release_date' in meta: _meta['premiered'] = meta['release_date'] elif 'first_air_date' in meta: _meta['premiered'] = meta['first_air_date'] elif 'premiered' in meta and meta['premiered']: _meta['premiered'] = meta['premiered'] elif 's_premiered' in meta and meta['s_premiered']: _meta['premiered'] = meta['s_premiered'] elif 'air_date' in meta and meta['air_date']: _meta['premiered'] = meta['air_date'] if 'year' in meta: _meta['year'] = meta['year'] elif 's_year' in meta: _meta['year'] = meta['s_year'] else: try: if 'premiered' in _meta and _meta['premiered']: _meta['year'] = int(_meta['premiered'][:4]) except: pass if 'rating' in meta: _meta['rating'] = meta['rating'] elif 'vote_average' in meta: _meta['rating'] = meta['vote_average'] if 'votes' in meta: _meta['votes'] = meta['votes'] elif 'vote_count' in meta: _meta['votes'] = meta['vote_count'] try: duration = 0 if 'runtime' in meta and meta['runtime']: duration = float(meta['runtime']) elif 'episode_run_time' in meta and meta['episode_run_time']: duration = float(meta['episode_run_time'][0]) if duration < 300: # en minutes duration *= 60 # Convertir les minutes TMDB en secondes pour KODI _meta['duration'] = duration except: _meta['duration'] = 0 if 'overview' in meta and meta['overview']: _meta['plot'] = meta['overview'] elif 'parts' in meta: # Il s'agit d'une collection, on récupere le plot du premier film _meta['plot'] = meta['parts'][0]['overview'] elif 'biography' in meta: # Il s'agit d'une personne, on récupere sa bio _meta['plot'] = meta['biography'] if 'studio' in meta: _meta['studio'] = meta['studio'] elif 'production_companies' in meta: _meta['studio'] = '' for studio in meta['production_companies']: if _meta['studio'] == '': _meta['studio'] += studio['name'] else: _meta['studio'] += ' / ' + studio['name'] if 'genre' in meta: listeGenre = meta['genre'] if '{' in listeGenre: meta['genres'] = eval(listeGenre) else: _meta['genre'] = listeGenre if 'genres' in meta: # _meta['genre'] = '' for genre in meta['genres']: if _meta['genre'] == '': _meta['genre'] += genre['name'] else: _meta['genre'] += ' / ' + genre['name'] elif 'genre_ids' in meta: genres = self.getGenresFromIDs(meta['genre_ids']) _meta['genre'] = '' for genre in genres: if _meta['genre'] == '': _meta['genre'] += genre else: _meta['genre'] += ' / ' + genre if not isMatrix(): _meta['genre'] = unicode(_meta['genre'], 'utf-8') elif 'parts' in meta: # Il s'agit d'une collection, on récupere le genre du premier film genres = self.getGenresFromIDs(meta['parts'][0]['genre_ids']) _meta['genre'] = '' for genre in genres: if _meta['genre'] == '': _meta['genre'] += genre else: _meta['genre'] += ' / ' + genre if not isMatrix(): _meta['genre'] = unicode(_meta['genre'], 'utf-8') trailer_id = '' if 'trailer' in meta and meta['trailer']: # Lecture du cache _meta['trailer'] = meta['trailer'] elif 'trailers' in meta: # Trailer d'un film retourné par TMDB try: # Recherche de la BA en français trailers = meta['trailers']['youtube'] for trailer in trailers: if trailer['type'] == 'Trailer': if 'VF' in trailer['name']: trailer_id = trailer['source'] break # pas de trailer français, on prend le premier if not trailer_id: trailer_id = meta['trailers']['youtube'][0]['source'] _meta['trailer'] = self.URL_TRAILER % trailer_id except: pass elif 'videos' in meta and meta[ 'videos']: # Trailer d'une série retourné par TMDB try: # Recherche de la BA en français trailers = meta['videos'] if len(trailers['results']) > 0: for trailer in trailers['results']: if trailer['type'] == 'Trailer' and trailer[ 'site'] == 'YouTube': trailer_id = trailer[ 'key'] # Au moins c'est un trailer, pas forcement français if 'fr' in trailer['iso_639_1']: trailer_id = trailer['key'] break # pas de trailer, on prend la premiere vidéo disponible if not trailer_id: trailer_id = meta['videos'][0]['key'] _meta['trailer'] = self.URL_TRAILER % trailer_id except: pass if 'backdrop_path' in meta and meta['backdrop_path']: _meta['backdrop_path'] = meta['backdrop_path'] _meta['backdrop_url'] = self.fanart + str(_meta['backdrop_path']) elif 'parts' in meta: # Il s'agit d'une collection, on récupere le backdrop du dernier film nbFilm = len(meta['parts']) _meta['backdrop_path'] = meta['parts'][nbFilm - 1]['backdrop_path'] _meta['backdrop_url'] = self.fanart + str(_meta['backdrop_path']) if 'poster_path' in meta and meta['poster_path']: _meta['poster_path'] = meta['poster_path'] _meta['cover_url'] = self.poster + str(_meta['poster_path']) elif 'parts' in meta: # Il s'agit d'une collection, on récupere le poster du dernier film nbFilm = len(meta['parts']) _meta['poster_path'] = meta['parts'][nbFilm - 1]['poster_path'] _meta['cover_url'] = self.fanart + str(_meta['poster_path']) elif 'profile_path' in meta: # il s'agit d'une personne _meta['poster_path'] = meta['profile_path'] _meta['cover_url'] = self.poster + str(_meta['poster_path']) elif 'file_path' in meta: # il s'agit d'un network _meta['poster_path'] = meta['file_path'] _meta['cover_url'] = self.poster + str(_meta['poster_path']) _meta['backdrop_path'] = _meta['poster_path'] _meta['backdrop_url'] = self.fanart + str(_meta['backdrop_path']) # special saisons if 's_poster_path' in meta and meta['s_poster_path']: _meta['poster_path'] = meta['s_poster_path'] _meta['cover_url'] = self.poster + str(meta['s_poster_path']) if 'playcount' in meta: _meta['playcount'] = meta['playcount'] if _meta['playcount'] == 6: # Anciennement 6 = unwatched _meta['playcount'] = 0 else: _meta['playcount'] = 0 if 'tagline' in meta and meta['tagline']: _meta['tagline'] = meta['tagline'] if 'status' in meta: _meta['status'] = meta['status'] if 'writer' in meta and meta['writer']: _meta['writer'] = meta['writer'] if 'director' in meta and meta['director']: _meta['director'] = meta['director'] if 'credits' in meta and meta['credits']: # Transformation compatible pour lecture depuis le cache et retour de TMDB strmeta = str(meta['credits']) listCredits = eval(strmeta) casts = listCredits['cast'] crews = [] if len(casts) > 0: licast = [] if 'crew' in listCredits: crews = listCredits['crew'] if len(crews) > 0: _meta['credits'] = "{u'cast': " + str( casts) + ", u'crew': " + str(crews) + "}" else: _meta['credits'] = "{u'cast': " + str(casts) + '}' # _meta['credits'] = "{u'cast': " + str(casts) + ", u'crew': "+str(crews) + "}" # _meta['credits'] = 'u\'cast\': ' + str(casts) + '' for cast in casts: licast.append((cast['name'], cast['character'])) _meta['cast'] = licast #if 'crew' in listCredits: if len(crews) > 0: for crew in crews: if crew['job'] == 'Director': _meta['director'] = crew['name'] elif crew['department'] == 'Writing': if _meta['writer'] != '': _meta['writer'] += ' / ' _meta['writer'] += '%s (%s)' % (crew['job'], crew['name']) elif crew[ 'department'] == 'Production' and 'Producer' in crew[ 'job']: if _meta['writer'] != '': _meta['writer'] += ' / ' _meta['writer'] += '%s (%s)' % (crew['job'], crew['name']) return _meta def _clean_title(self, title): title = re.sub('[^%s]' % (string.ascii_lowercase + string.digits), '', title.lower()) return title def _cache_search(self, media_type, name, tmdb_id='', year='', season='', episode=''): if media_type == 'movie': sql_select = 'SELECT * FROM movie' if tmdb_id: sql_select = sql_select + ' WHERE tmdb_id = \'%s\'' % tmdb_id else: sql_select = sql_select + ' WHERE title = \'%s\'' % name if year: sql_select = sql_select + ' AND year = %s' % year elif media_type == 'collection': sql_select = 'SELECT * FROM movie' if tmdb_id: sql_select = sql_select + ' WHERE tmdb_id = \'%s\'' % tmdb_id else: if not name.endswith('saga'): name += 'saga' sql_select = sql_select + ' WHERE title = \'%s\'' % name elif media_type == 'tvshow' or media_type == 'anime': sql_select = 'SELECT * FROM tvshow' if season: sql_select = 'SELECT *, season.poster_path as s_poster_path, season.premiered as s_premiered, ' \ 'season.year as s_year FROM tvshow LEFT JOIN season ON tvshow.imdb_id = season.imdb_id ' if tmdb_id: sql_select = sql_select + ' WHERE tvshow.tmdb_id = \'%s\'' % tmdb_id else: sql_select = sql_select + ' WHERE tvshow.title = \'%s\'' % name if year: sql_select = sql_select + ' AND tvshow.year = %s' % year if season: sql_select = sql_select + ' AND season.season = \'%s\'' % season else: return None try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchone() except Exception as e: VSlog('************* Error selecting from cache db: %s' % e, 4) return None if matchedrow: # VSlog('Found meta information by name in cache table') return dict(matchedrow) else: # VSlog('No match in local DB') return None def _cache_save(self, meta, name, media_type, season, year): # Pas de cache pour les personnes ou les distributeurs if media_type in ('person', 'network'): return # cache des séries et animes if media_type == 'tvshow' or media_type == 'anime': return self._cache_save_tvshow(meta, name, 'tvshow', season, year) # cache des collections if media_type == 'collection': media_type = 'movie' # On utilise la même table que pour les films if not name.endswith('saga'): name += 'saga' # sauvegarde de la durée en minutes, pour le retrouver en minutes comme le fait TMDB runtime = 0 if 'duration' in meta and meta['duration']: runtime = int(meta['duration']) / 60 if not year and 'year' in meta: year = meta['year'] # sauvegarde movie dans la BDD # year n'est pas forcement l'année du film mais l'année utilisée pour la recherche try: sql = 'INSERT INTO %s (imdb_id, tmdb_id, title, year, credits, writer, director, tagline, vote_average, vote_count, runtime, ' \ 'overview, mpaa, premiered, genre, studio, status, poster_path, trailer, backdrop_path, playcount) ' \ 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)' % media_type self.dbcur.execute( sql, (meta['imdb_id'], meta['tmdb_id'], name, year, meta['credits'], meta['writer'], meta['director'], meta['tagline'], meta['rating'], meta['votes'], str(runtime), meta['plot'], meta['mpaa'], meta['premiered'], meta['genre'], meta['studio'], meta['status'], meta['poster_path'], meta['trailer'], meta['backdrop_path'], 0)) self.db.commit() # VSlog('SQL INSERT Successfully') except Exception as e: VSlog('SQL ERROR INSERT into table ' + media_type) pass # Cache pour les séries (et animes) def _cache_save_tvshow(self, meta, name, media_type, season, year): # ecrit les saisons dans la BDD if 'seasons' in meta: self._cache_save_season(meta, season) del meta['seasons'] if not year and 'year' in meta: year = meta['year'] # sauvegarde de la durée en minutes, pour le retrouver en minutes comme le fait TMDB runtime = 0 if 'duration' in meta and meta['duration']: runtime = int(meta['duration']) / 60 # sauvegarde tvshow dans la BDD try: sql = 'INSERT INTO %s (imdb_id, tmdb_id, title, year, credits, writer, director, vote_average, vote_count, runtime, ' \ 'overview, mpaa, premiered, genre, studio, status, poster_path, trailer, backdrop_path, playcount) ' \ 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)' % media_type self.dbcur.execute( sql, (meta['imdb_id'], meta['tmdb_id'], name, year, meta['credits'], meta['writer'], meta['director'], meta['rating'], meta['votes'], runtime, meta['plot'], meta['mpaa'], meta['premiered'], meta['genre'], meta['studio'], meta['status'], meta['poster_path'], meta['trailer'], meta['backdrop_path'], 0)) self.db.commit() # VSlog('SQL INSERT Successfully') except Exception as e: VSlog('SQL ERROR INSERT into table ' + media_type) pass def _cache_save_season(self, meta, season): for s in meta['seasons']: if s['season_number'] != None and ( '%02d' % int(s['season_number'])) == season: meta['s_poster_path'] = s['poster_path'] meta['s_premiered'] = s['air_date'] meta['s_year'] = s['air_date'] try: sql = 'INSERT INTO season (imdb_id, tmdb_id, season, year, premiered, poster_path, playcount) VALUES ' \ '(?, ?, ?, ?, ?, ?, ?) ' self.dbcur.execute( sql, (meta['imdb_id'], s['id'], s['season_number'], s['air_date'], s['air_date'], s['poster_path'], 6)) self.db.commit() # VSlog('SQL INSERT Successfully') except Exception: VSlog('SQL ERROR INSERT into table season') pass def get_meta(self, media_type, name, imdb_id='', tmdb_id='', year='', season='', episode='', update=False): """ Main method to get meta data for movie or tvshow. Will lookup by name/year if no IMDB ID supplied. Args: media_type (str): 'movie' or 'tvshow' name (str): full name of movie/tvshow you are searching Kwargs: imdb_id (str): IMDB ID tmdb_id (str): TMDB ID year (str): 4 digit year of video, recommended to include the year whenever possible to maximize correct search results. season (int) episode (int) Returns: DICT of meta data or None if cannot be found. """ name = re.sub(" +", " ", name) # nettoyage du titre # VSlog('Attempting to retrieve meta data for %s: %s %s %s %s' % (media_type, name, year, imdb_id, tmdb_id)) # recherche dans la base de données if not update: meta = self._cache_search(media_type, self._clean_title(name), tmdb_id, year, season, episode) if meta: meta = self._format(meta, name) return meta # recherche online meta = {} if media_type == 'movie': if tmdb_id: meta = self.search_movie_id(tmdb_id) elif name: meta = self.search_movie_name(name, year) elif media_type == 'tvshow': if tmdb_id: meta = self.search_tvshow_id(tmdb_id) elif name: meta = self.search_tvshow_name(name, year) elif media_type == 'anime': if tmdb_id: meta = self.search_tvshow_id(tmdb_id) elif name: meta = self.search_tvshow_name(name, year, genre=16) elif media_type == 'collection': if tmdb_id: meta = self.search_collection_id(tmdb_id) elif name: meta = self.search_collection_name(name) elif media_type == 'person': if tmdb_id: meta = self.search_person_id(tmdb_id) elif name: meta = self.search_person_name(name) elif media_type == 'network': if tmdb_id: meta = self.search_network_id(tmdb_id) # Mise en forme des metas si trouvé if meta and 'tmdb_id' in meta: meta = self._format(meta, name) # sauvegarde dans un cache self._cache_save(meta, self._clean_title(name), media_type, season, year) else: # initialise un meta vide meta = self._format(meta, name) return meta def getUrl(self, url, page=1, term=''): # return url api exemple 'movie/popular' page en cours try: if term: term = term + '&page=' + str(page) else: term = 'page=' + str(page) result = self._call(url, term) except: return False return result def _call(self, action, append_to_response=''): url = '%s%s?language=%s&api_key=%s' % (self.URL, action, self.lang, self.api_key) if append_to_response: url += '&%s' % append_to_response #On utilise requests car urllib n'arrive pas a certain moment a ouvrir le json. import requests data = requests.get(url).json() return data def getPostUrl(self, action, post): tmdb_session = self.ADDON.getSetting('tmdb_session') if not tmdb_session: return sUrl = '%s%s?api_key=%s&session_id=%s' % (self.URL, action, self.api_key, tmdb_session) try: sPost = json.dumps(post).encode('utf-8') except: sPost = json.dumps(post) headers = {'Content-Type': 'application/json'} req = urllib2.Request(sUrl, sPost, headers) response = urllib2.urlopen(req) data = json.loads(response.read()) return data # retourne la liste des genres en Texte, à partir des IDs def getGenresFromIDs(self, genresID): sGenres = [] for gid in genresID: genre = self.TMDB_GENRES.get(gid) if genre: sGenres.append(genre) return sGenres # Retourne le genre en Texte, à partir d'un ID def getGenreFromID(self, genreID): if not str(genreID).isdigit(): return genreID genre = self.TMDB_GENRES.get(genreID) if genre: return genre return genreID
class cTMDb: # https://developers.themoviedb.org/3/genres/get-movie-list # https://developers.themoviedb.org/3/genres/get-tv-list TMDB_GENRES = { 12: 'Aventure', 14: 'Fantastique', 16: 'Animation', 18: 'Drame', 27: 'Horreur', 28: 'Action', 35: 'Comédie', 36: 'Histoire', 37: 'Western', 53: 'Thriller', 80: 'Crime', 99: 'Documentaire', 878: 'Science-Fiction', 9648: 'Mystère', 10402: 'Musique', 10749: 'Romance', 10751: 'Familial', 10752: 'Guerre', 10759: 'Action & Aventure', 10762: 'Kids', 10763: 'News', 10764: 'Realité', 10765: 'Science-Fiction & Fantastique', 10766: 'Feuilleton', 10767: 'Talk', 10768: 'Guerre & Politique', 10769: 'Etranger', 10770: 'Téléfilm' } URL = 'https://api.themoviedb.org/3/' URL_TRAILER = 'plugin://plugin.video.youtube/play/?video_id=%s' # ancien : 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' CACHE = 'special://home/userdata/addon_data/plugin.video.vstream/video_cache.db' # important seul xbmcvfs peux lire le special if not isMatrix(): REALCACHE = VSPath(CACHE).decode('utf-8') else: REALCACHE = VSPath(CACHE) def __init__(self, api_key='', debug=False, lang='fr'): self.ADDON = addon() self.api_key = self.ADDON.getSetting('api_tmdb') self.debug = debug self.lang = lang self.poster = 'https://image.tmdb.org/t/p/%s' % self.ADDON.getSetting( 'poster_tmdb') self.fanart = 'https://image.tmdb.org/t/p/%s' % self.ADDON.getSetting( 'backdrop_tmdb') try: if not xbmcvfs.exists(self.CACHE): self.db = sqlite.connect(self.REALCACHE) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() self.__createdb() return except: VSlog('Error: Unable to write on %s' % self.REALCACHE) pass try: self.db = sqlite.connect(self.REALCACHE) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() except: VSlog('Error: Unable to connect to %s' % self.REALCACHE) pass def __createdb(self, dropTable=''): try: # Permets de detruire une table pour la recreer de zero. if dropTable != '': self.dbcur.execute("DROP TABLE " + dropTable) self.db.commit() except: pass sql_create = "CREATE TABLE IF NOT EXISTS movie ("\ "imdb_id TEXT, "\ "tmdb_id TEXT, "\ "title TEXT, "\ "year INTEGER, "\ "director TEXT, "\ "writer TEXT, "\ "tagline TEXT, "\ "cast TEXT, "\ "crew TEXT, "\ "rating FLOAT, "\ "votes TEXT, "\ "duration INTEGER, "\ "plot TEXT, "\ "mpaa TEXT, "\ "premiered TEXT, "\ "genre TEXT, "\ "studio TEXT, "\ "status TEXT, "\ "poster_path TEXT, "\ "trailer TEXT, "\ "backdrop_path TEXT, "\ "UNIQUE(tmdb_id)"\ ");" try: self.dbcur.execute(sql_create) VSlog('table movie creee') except: VSlog('Error: Cannot create table movie') sql_create = "CREATE TABLE IF NOT EXISTS saga ("\ "tmdb_id TEXT, "\ "title TEXT, "\ "plot TEXT, "\ "genre TEXT, "\ "poster_path TEXT, "\ "backdrop_path TEXT, "\ "UNIQUE(tmdb_id)"\ ");" try: self.dbcur.execute(sql_create) VSlog('table saga creee') except: VSlog('Error: Cannot create table saga') sql_create = "CREATE TABLE IF NOT EXISTS tvshow ("\ "imdb_id TEXT, "\ "tmdb_id TEXT, "\ "title TEXT, "\ "year INTEGER, "\ "director TEXT, "\ "writer TEXT, "\ "cast TEXT, "\ "crew TEXT, "\ "rating FLOAT, "\ "votes TEXT, "\ "duration INTEGER, "\ "plot TEXT, "\ "mpaa TEXT, "\ "premiered TEXT, "\ "genre TEXT, "\ "studio TEXT, "\ "status TEXT, "\ "poster_path TEXT, "\ "trailer TEXT, "\ "backdrop_path TEXT, "\ "nbseasons INTEGER, "\ "UNIQUE(tmdb_id)"\ ");" try: self.dbcur.execute(sql_create) VSlog('table tvshow creee') except: VSlog('Error: Cannot create table tvshow') sql_create = "CREATE TABLE IF NOT EXISTS season ("\ "tmdb_id TEXT, " \ "season INTEGER, "\ "year INTEGER, "\ "premiered TEXT, "\ "poster_path TEXT, "\ "plot TEXT, "\ "episode INTEGER, "\ "UNIQUE(tmdb_id, season)"\ ");" try: self.dbcur.execute(sql_create) VSlog('table season creee') except: VSlog('Error: Cannot create table season') sql_create = "CREATE TABLE IF NOT EXISTS episode ("\ "tmdb_id TEXT, "\ "originaltitle TEXT,"\ "season INTEGER, "\ "episode INTEGER, "\ "year INTEGER, "\ "title TEXT, "\ "director TEXT, "\ "writer TEXT, "\ "guest_stars TEXT, "\ "plot TEXT, "\ "rating FLOAT, "\ "votes TEXT, "\ "premiered TEXT, "\ "tagline TEXT, "\ "poster_path TEXT, "\ "UNIQUE(tmdb_id, season, episode)"\ ");" try: self.dbcur.execute(sql_create) VSlog('table episode creee') except: VSlog('Error: Cannot create table episode') def __del__(self): """ Cleanup db when object destroyed """ try: self.dbcur.close() self.db.close() except: pass def getToken(self): result = self._call('authentication/token/new', '') total = len(result) if (total > 0): url = 'https://www.themoviedb.org/authenticate/' if not xbmc.getCondVisibility('system.platform.android'): # Si possible on ouvre la page automatiquement dans un navigateur internet. import webbrowser webbrowser.open(url + result['request_token']) sText = (self.ADDON.VSlang(30421)) % (url, result['request_token']) DIALOG = dialog() if not DIALOG.VSyesno(sText): return False else: from resources.lib import pyqrcode from resources.lib.librecaptcha.gui import cInputWindowYesNo qr = pyqrcode.create(url + result['request_token']) qr.png( 'special://home/userdata/addon_data/plugin.video.vstream/qrcode.png', scale=5) oSolver = cInputWindowYesNo( captcha= 'special://home/userdata/addon_data/plugin.video.vstream/qrcode.png', msg="Scanner le QRCode pour acceder au lien d'autorisation", roundnum=1) retArg = oSolver.get() DIALOG = dialog() if retArg == "N": return False result = self._call('authentication/session/new', 'request_token=' + result['request_token']) if 'success' in result and result['success']: self.ADDON.setSetting('tmdb_session', str(result['session_id'])) DIALOG.VSinfo(self.ADDON.VSlang(30000)) return else: DIALOG.VSerror('Erreur' + self.ADDON.VSlang(30000)) return # xbmc.executebuiltin('Container.Refresh') return return # cherche dans les films ou serie l'id par le nom, return ID ou FALSE def get_idbyname(self, name, year='', mediaType='movie', page=1): # On enleve le contenu entre paranthese. try: name = name.split('(')[0] except: pass if year: term = QuotePlus(name) + '&year=' + year else: term = QuotePlus(name) meta = self._call('search/' + str(mediaType), 'query=' + term + '&page=' + str(page)) # si pas de résultat avec l'année, on teste sans l'année if 'total_results' in meta: if year and meta['total_results'] == 0: return self.search_movie_name(name) # cherche 1 seul resultat if meta['total_results'] != 0: tmdb_id = meta['results'][0]['id'] return tmdb_id return False # Search for movies by title. def search_movie_name(self, name, year='', page=1): name = re.sub(" +", " ", name) # nettoyage du titre if year: term = QuotePlus(name) + '&year=' + year else: term = QuotePlus(name) meta = self._call('search/movie', 'query=' + term + '&page=' + str(page)) if 'errors' not in meta and 'status_code' not in meta: # si pas de résultat avec l'année, on teste sans l'année if 'total_results' in meta and meta['total_results'] == 0 and year: return self.search_movie_name(name) # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: movie = '' # s'il n'y en a qu'un, c'est le bon if meta['total_results'] == 1: movie = meta['results'][0] else: # premiere boucle, recherche la correspondance parfaite sur le nom for searchMovie in meta['results']: if searchMovie['genre_ids'] and 99 not in searchMovie[ 'genre_ids']: if self._clean_title( searchMovie['title']) == self._clean_title( name): movie = searchMovie break # sinon, hors documentaire et année proche if not movie: for searchMovie in meta['results']: if searchMovie[ 'genre_ids'] and 99 not in searchMovie[ 'genre_ids']: # controle supplémentaire sur l'année meme si déjà dans la requete if year: if 'release_date' in searchMovie and searchMovie[ 'release_date']: release_date = searchMovie[ 'release_date'] yy = release_date[:4] if int(year) - int(yy) > 1: continue # plus de deux ans d'écart, c'est pas bon movie = searchMovie break # Rien d'interessant, on prend le premier if not movie: movie = meta['results'][0] # recherche de toutes les infos tmdb_id = movie['id'] meta = self.search_movie_id(tmdb_id) else: meta = {} return meta # Search for collections by title. def search_collection_name(self, name): name = re.sub(" +", " ", name) # nettoyage du titre term = QuotePlus(name) meta = self._call('search/collection', 'query=' + term) if 'errors' not in meta and 'status_code' not in meta: # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: collection = '' # s'il n'y en a qu'un, c'est le bon if meta['total_results'] == 1: collection = meta['results'][0] else: # premiere boucle, recherche la correspondance parfaite sur le nom for searchCollec in meta['results']: cleanTitleTMDB = self._clean_title( searchCollec['name']) cleanTitleSearch = self._clean_title(name) if cleanTitleTMDB == cleanTitleSearch: collection = searchCollec break # sinon, le premier qui n'est pas du genre animation if not collection: for searchCollec in meta['results']: if 'animation' not in searchCollec['name']: collection = searchCollec break # Rien d'interessant, on prend le premier if not collection: collection = meta['results'][0] meta = collection tmdb_id = collection['id'] meta['tmdb_id'] = tmdb_id # recherche de toutes les infos meta = self.search_collection_id(tmdb_id) else: meta = {} return meta # Search for TV shows by title. def search_tvshow_name(self, name, year='', page=1, genre=''): if year: term = QuotePlus(name) + '&year=' + year else: term = QuotePlus(name) meta = self._call('search/tv', 'query=' + term + '&page=' + str(page)) if 'errors' not in meta and 'status_code' not in meta: # si pas de résultat avec l'année, on teste sans l'année if 'total_results' in meta and meta['total_results'] == 0 and year: return self.search_tvshow_name(name) # cherche 1 seul resultat if 'total_results' in meta and meta['total_results'] != 0: movie = '' # s'il n'y en a qu'un, c'est le bon if meta['total_results'] == 1: movie = meta['results'][0] else: # premiere boucle, recherche la correspondance parfaite sur le nom for searchMovie in meta['results']: if genre == '' or genre in searchMovie['genre_ids']: movieName = searchMovie['name'] if self._clean_title( movieName) == self._clean_title(name): movie = searchMovie break # sinon, hors documentaire et année proche if not movie: for searchMovie in meta['results']: if genre and genre in searchMovie['genre_ids']: # controle supplémentaire sur l'année meme si déjà dans la requete if year: if 'release_date' in searchMovie and searchMovie[ 'release_date']: release_date = searchMovie[ 'release_date'] yy = release_date[:4] if int(year) - int(yy) > 1: continue # plus de deux ans d'écart, c'est pas bon movie = searchMovie break # Rien d'interessant, on prend le premier if not movie: movie = meta['results'][0] # recherche de toutes les infos tmdb_id = movie['id'] meta = self.search_tvshow_id(tmdb_id) else: meta = {} return meta # Search for person by name. def search_person_name(self, name): name = re.sub(" +", " ", name) # nettoyage du titre term = QuotePlus(name) meta = self._call('search/person', 'query=' + term) # si pas d'erreur if 'errors' not in meta and 'status_code' not in meta: # on prend le premier resultat if 'total_results' in meta and meta['total_results'] != 0: meta = meta['results'][0] # recherche de toutes les infos person_id = meta['id'] meta = self.search_person_id(person_id) else: meta = {} return meta # Get the basic movie information for a specific movie id. def search_movie_id( self, movie_id, append_to_response='append_to_response=trailers,credits,release_dates' ): result = self._call('movie/' + str(movie_id), append_to_response) result['tmdb_id'] = movie_id return result # obj(**self._call('movie/' + str(movie_id), append_to_response)) # Get the primary information about a TV series by id. def search_tvshow_id( self, show_id, append_to_response='append_to_response=external_ids,videos,credits,release_dates' ): result = self._call('tv/' + str(show_id), append_to_response) result['tmdb_id'] = show_id return result # Get the primary information about a TV series by id. def search_season_id(self, show_id, season): result = self._call('tv/' + str(show_id) + '/season/' + str(season)) result['tmdb_id'] = show_id return result # Get the primary information about a episode. def search_episode_id(self, show_id, season, episode): if season: result = self._call('tv/' + str(show_id) + '/season/' + str(season) + '/episode/' + str(episode)) result['tmdb_id'] = show_id return result else: return False # Get the basic informations for a specific collection id. def search_collection_id(self, collection_id): result = self._call('collection/' + str(collection_id)) result['tmdb_id'] = collection_id return result # Get the basic person informations for a specific person id. def search_person_id(self, person_id): result = self._call('person/' + str(person_id)) result['tmdb_id'] = person_id return result # Get the informations for a specific network. def search_network_id(self, network_id): result = self._call('network/%s/images' % str(network_id)) if 'status_code' not in result and 'logos' in result: network = result['logos'][0] vote = -1 # On prend le logo qui a la meilleure note for logo in result['logos']: logoVote = float(logo['vote_average']) if logoVote > vote: network = logo vote = logoVote network['tmdb_id'] = network_id network.pop('vote_average') return network return {} def _format(self, meta, name, media_type=""): _meta = { 'imdb_id': meta.get('imdb_id', ""), 'tmdb_id': meta.get('tmdb_id', "") if meta.get('tmdb_id') else meta.get('id'), 'tvdb_id': "", "title": meta.get('title') if meta.get('title') else meta.get('name', ""), 'media_type': meta.get('media_type', "") if media_type == "" else media_type, 'rating': meta.get('s_vote_average', 0.0) if meta.get('s_vote_average') else meta.get('vote_average', 0.0), 'votes': meta.get('s_vote_count', 0) if meta.get('s_vote_count') else meta.get('vote_count', 0), 'duration': (int(meta.get('episode_run_time', 0)[0]) if meta.get( 'episode_run_time', 0) else meta.get('runtime', 0)) * 60, 'plot': ''.join([ meta.get(key, "") for key in ['s_overview', 'overview', 'biography'] if meta.get(key) != None ]), 'mpaa': meta.get('mpaa', ""), 'premiered': meta.get('s_premiered', "") if meta.get('s_premiered') else meta.get('release_date', "") if meta.get('release_date') else meta.get('first_air_date', "") if meta.get('first_air_date') else meta.get('air_date', ""), 'year': meta.get('s_year', 0) if meta.get('s_year') else meta.get( 'year', 0), 'trailer': '', 'tagline': meta.get('name') if media_type == "episode" else meta.get('tagline'), 'genre': '', 'studio': "", 'status': meta.get('status', ""), 'cast': '', 'crew': '', 'director': meta.get('s_director', "") if meta.get('s_director') else meta.get( 'director', ""), 'writer': meta.get('s_writer', "") if meta.get('s_writer') else meta.get( 'writer', ""), 'poster_path': ''.join([ meta.get(key, "") for key in ['poster_path', 'still_path', 'file_path', 'profile_path'] if meta.get(key) != None ]), 'backdrop_path': ''.join([ meta.get(key, "") for key in ['backdrop_path', 'still_path', 'file_path', 'profile_path'] if meta.get(key) != None ]), 'episode': meta.get('episode_number', 0), 'season': meta.get('season_number', 0) if meta.get('season_number') else meta.get('seasons', []), 'nbseasons': meta.get('number_of_seasons', ""), 'guest_stars': str(meta.get('guest_stars', [])), } try: if _meta['year'] == 0: _meta['year'] = int(_meta['premiered'][:4]) except: pass if 'production_companies' in meta: for studio in meta['production_companies']: if _meta['studio'] == '': _meta['studio'] += studio['name'] else: _meta['studio'] += ' / ' + studio['name'] if 'genre' in meta: listeGenre = meta['genre'] if '{' in listeGenre: meta['genres'] = eval(listeGenre) else: _meta['genre'] = listeGenre elif 'genres' in meta: for genre in meta['genres']: if _meta['genre'] == '': _meta['genre'] += genre['name'] else: _meta['genre'] += ' / ' + genre['name'] elif 'genre_ids' in meta: genres = self.getGenresFromIDs(meta['genre_ids']) _meta['genre'] = '' for genre in genres: if _meta['genre'] == '': _meta['genre'] += genre else: _meta['genre'] += ' / ' + genre if not isMatrix(): _meta['genre'] = unicode(_meta['genre'], 'utf-8') elif 'parts' in meta: # Il s'agit d'une collection, on récupere le genre du premier film genres = self.getGenresFromIDs(meta['parts'][0]['genre_ids']) _meta['genre'] = '' for genre in genres: if _meta['genre'] == '': _meta['genre'] += genre else: _meta['genre'] += ' / ' + genre if not isMatrix(): _meta['genre'] = unicode(_meta['genre'], 'utf-8') trailer_id = '' if 'trailer' in meta and meta['trailer']: # Lecture du cache _meta['trailer'] = meta['trailer'] elif 'trailers' in meta: # Trailer d'un film retourné par TMDB try: # Recherche de la BA en français trailers = meta['trailers']['youtube'] for trailer in trailers: if trailer['type'] == 'Trailer': if 'VF' in trailer['name']: trailer_id = trailer['source'] break # pas de trailer français, on prend le premier if not trailer_id: trailer_id = meta['trailers']['youtube'][0]['source'] _meta['trailer'] = self.URL_TRAILER % trailer_id except: pass elif 'videos' in meta and meta[ 'videos']: # Trailer d'une série retourné par TMDB try: # Recherche de la BA en français trailers = meta['videos'] if len(trailers['results']) > 0: for trailer in trailers['results']: if trailer['type'] == 'Trailer' and trailer[ 'site'] == 'YouTube': trailer_id = trailer[ 'key'] # Au moins c'est un trailer, pas forcement français if 'fr' in trailer['iso_639_1']: trailer_id = trailer['key'] break # pas de trailer, on prend la premiere vidéo disponible if not trailer_id: trailer_id = meta['videos'][0]['key'] _meta['trailer'] = self.URL_TRAILER % trailer_id except: pass if 'credits' in meta and meta['credits']: # Code from https://github.com/jurialmunkey/plugin.video.themoviedb.helper/blob/matrix/resources/lib/tmdb/mapping.py cast_list = [] if meta.get('credits', {}).get('cast'): cast_list += meta['credits']['cast'] cast = [] cast_item = None for i in sorted(cast_list, key=lambda k: k.get('order', 0)): if cast_item: if cast_item.get('name') != i.get('name'): cast.append(cast_item) cast_item = None elif i.get('character'): if 'role' in cast_item: cast_item['role'] = u'{} / {}'.format( cast_item['role'], i['character']) else: cast_item = None if not cast_item: cast_item = { 'id': i.get('id'), 'name': i.get('name'), 'character': i.get('character'), 'order': i.get('order') } if i.get('profile_path'): cast_item[ 'thumbnail'] = self.poster + i['profile_path'] if cast_item: cast.append(cast_item) _meta['cast'] = json.dumps(cast) # Pas dans le cache, à récupérer depuis TMDB if not _meta['director'] and not _meta['writer']: crews = [] if "credits" in meta: crews = eval(str(meta['credits']['crew'])) _meta['crew'] = json.dumps(crews) elif "crew" in meta: # cas des épisodes crews = eval(str(meta['crew'])) if len(crews) > 0: for crew in crews: if crew['job'] == 'Director': _meta['director'] = crew['name'] elif crew['department'] == 'Writing': if _meta['writer'] != '': _meta['writer'] += ' / ' _meta['writer'] += '%s (%s)' % (crew['job'], crew['name']) elif crew[ 'department'] == 'Production' and 'Producer' in crew[ 'job']: if _meta['writer'] != '': _meta['writer'] += ' / ' _meta['writer'] += '%s (%s)' % (crew['job'], crew['name']) if _meta["mpaa"] == "": try: cert = meta['release_dates'] if len(cert['results']) > 0: for data in cert['results']: if 'fr' in data['iso_3166_1']: _meta['mpaa'] = data['release_dates'][0][ 'certification'] break if not _meta['mpaa']: _meta['mpaa'] = cert['results'][0]['release_dates'][0][ 'certification'] except: pass if _meta['poster_path']: _meta['poster_path'] = self.poster + _meta['poster_path'] if _meta['backdrop_path']: _meta['backdrop_path'] = self.fanart + _meta['backdrop_path'] return _meta def _clean_title(self, title): # vire accent try: title = unicode(title, 'utf-8') title = unicodedata.normalize('NFD', title).encode( 'ascii', 'ignore').decode('unicode_escape') if not isMatrix(): title = title.encode('utf-8') # on repasse en utf-8 except Exception as e: pass # Vire tous les caracteres non alphabetiques title = re.sub('[^%s]' % (string.ascii_lowercase + string.digits), '', title.lower()) return title def _cache_search(self, media_type, name, tmdb_id='', year='', season='', episode=''): if media_type == 'movie': sql_select = 'SELECT * FROM movie' if tmdb_id: sql_select = sql_select + ' WHERE tmdb_id = \'%s\'' % tmdb_id else: sql_select = sql_select + ' WHERE title = \'%s\'' % name if year: sql_select = sql_select + ' AND year = %s' % year elif media_type == 'collection': sql_select = 'SELECT * FROM saga' if tmdb_id: sql_select = sql_select + ' WHERE tmdb_id = \'%s\'' % tmdb_id else: sql_select = sql_select + ' WHERE title = \'%s\'' % name elif media_type == 'tvshow' or media_type == 'anime': sql_select = 'SELECT * FROM tvshow' if tmdb_id: sql_select = sql_select + ' WHERE tvshow.tmdb_id = \'%s\'' % tmdb_id else: sql_select = sql_select + ' WHERE tvshow.title = \'%s\'' % name if year: sql_select = sql_select + ' AND tvshow.year = %s' % year elif media_type == 'season': sql_select = 'SELECT *, season.poster_path, season.premiered, ' \ 'season.year, season.plot FROM season LEFT JOIN tvshow ON season.tmdb_id = tvshow.tmdb_id' if tmdb_id: sql_select = sql_select + ' WHERE tvshow.tmdb_id = \'%s\'' % tmdb_id else: sql_select = sql_select + ' WHERE tvshow.title = \'%s\'' % name sql_select = sql_select + ' AND season.season = \'%s\'' % season elif media_type == 'episode': sql_select = 'SELECT *, episode.title, episode.poster_path, episode.premiered, '\ 'episode.guest_stars, episode.year, episode.plot, '\ 'episode.director, episode.writer, episode.rating, episode.votes '\ 'FROM episode LEFT JOIN tvshow ON episode.tmdb_id = tvshow.tmdb_id' if tmdb_id: sql_select += ' WHERE tvshow.tmdb_id = \'%s\'' % tmdb_id else: sql_select += ' WHERE tvshow.title = \'%s\'' % name sql_select += ' AND episode.season = \'%s\' AND episode.episode = \'%s\'' % ( season, episode) else: return None try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchone() except Exception as e: if 'no such column' in str(e) or 'no column named' in str(e): # Pour les series il faut reconstruire les deux tables. if media_type == "tvshow": self.__createdb('tvshow') self.__createdb('season') else: self.__createdb(media_type) VSlog('Table recreated') # Deuxieme tentative self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchone() else: VSlog('************* Error selecting from cache db: %s' % e, 4) return None if matchedrow: # VSlog('Found meta information by name in cache table') return dict(matchedrow) else: # VSlog('No match in local DB') return None def _cache_save(self, meta, name, media_type, season, episode, year): # Pas de cache pour les personnes ou les distributeurs if media_type in ('person', 'network'): return # cache des films if media_type == 'movie': return self._cache_save_movie(meta, name, year) # cache des séries et animes if media_type == 'tvshow' or media_type == 'anime': return self._cache_save_tvshow(meta, name, season, year) # cache des saisons if media_type == "season": return self._cache_save_season(meta, season) # cache des épisodes if media_type == "episode": return self._cache_save_episode(meta, name, season, episode) # cache des collections if media_type == 'collection': return self._cache_save_collection(meta, name) # sauvegarde movie dans la BDD def _cache_save_movie(self, meta, name, year): # year n'est pas forcement l'année du film mais l'année utilisée pour la recherche if not year and 'year' in meta: year = meta['year'] try: sql = 'INSERT or IGNORE INTO movie (imdb_id, tmdb_id, title, year, cast, crew, writer, director, tagline, rating, votes, duration, ' \ 'plot, mpaa, premiered, genre, studio, status, poster_path, trailer, backdrop_path) ' \ 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute( sql, (meta['imdb_id'], meta['tmdb_id'], name, year, meta['cast'], meta['crew'], meta['writer'], meta['director'], meta['tagline'], meta['rating'], meta['votes'], str(meta['duration']), meta['plot'], meta['mpaa'], meta['premiered'], meta['genre'], meta['studio'], meta['status'], meta['poster_path'], meta['trailer'], meta['backdrop_path'])) self.db.commit() # VSlog('SQL INSERT Successfully') except Exception as e: VSlog(str(e)) if 'no such column' in str(e) or 'no column named' in str( e) or "no such table" in str(e): self.__createdb('movie') VSlog('Table recreated') # Deuxieme tentative self.dbcur.execute( sql, (meta['imdb_id'], meta['tmdb_id'], name, year, meta['cast'], meta['crew'], meta['writer'], meta['director'], meta['tagline'], meta['rating'], meta['votes'], str(meta['duration']), meta['plot'], meta['mpaa'], meta['premiered'], meta['genre'], meta['studio'], meta['status'], meta['poster_path'], meta['trailer'], meta['backdrop_path'])) self.db.commit() else: VSlog('SQL ERROR INSERT into table movie') pass # Cache pour les séries (et animes) def _cache_save_tvshow(self, meta, name, season, year): # Ecrit les saisons dans le cache for s_meta in meta['season']: s_meta['tmdb_id'] = meta['tmdb_id'] self._cache_save_season(s_meta, season) if not year and 'year' in meta: year = meta['year'] # sauvegarde tvshow dans la BDD try: sql = 'INSERT or IGNORE INTO tvshow (imdb_id, tmdb_id, title, year, cast, crew, writer, director, rating, votes, duration, ' \ 'plot, mpaa, premiered, genre, studio, status, poster_path, trailer, backdrop_path, nbseasons) ' \ 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute( sql, (meta['imdb_id'], meta['tmdb_id'], name, year, meta['cast'], meta['crew'], meta['writer'], meta['director'], meta['rating'], meta['votes'], meta['duration'], meta['plot'], meta['mpaa'], meta['premiered'], meta['genre'], meta['studio'], meta['status'], meta['poster_path'], meta['trailer'], meta['backdrop_path'], meta['nbseasons'])) self.db.commit() except Exception as e: VSlog(str(e)) if 'no such column' in str(e) or 'no column named' in str(e): self.__createdb('tvshow') VSlog('Table recreated') # Deuxieme tentative self.dbcur.execute( sql, (meta['imdb_id'], meta['tmdb_id'], name, year, meta['cast'], meta['crew'], meta['writer'], meta['director'], meta['rating'], meta['votes'], meta['duration'], meta['plot'], meta['mpaa'], meta['premiered'], meta['genre'], meta['studio'], meta['status'], meta['poster_path'], meta['trailer'], meta['backdrop_path'], meta['nbseasons'])) self.db.commit() else: VSlog('SQL ERROR INSERT into table tvshow') pass # Cache pour les saisons def _cache_save_season(self, meta, season): if 'air_date' in meta and meta['air_date']: premiered = meta['air_date'] elif 'premiered' in meta and meta['premiered']: premiered = meta['premiered'] else: premiered = 0 s_year = 0 if 'year' in meta and meta['year']: s_year = meta['year'] else: try: if premiered: s_year = int(premiered[:4]) except: pass if 'season_number' in meta: season = meta['season_number'] elif 'season' in meta: season = meta['season'] if 'overview' in meta: plot = meta.get('overview', "") else: plot = "" if meta['poster_path']: fanart = self.poster + meta['poster_path'] else: fanart = "" try: sql = 'INSERT or IGNORE INTO season (tmdb_id, season, year, premiered, poster_path, plot, episode) VALUES '\ '(?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute(sql, (meta['tmdb_id'], season, s_year, premiered, fanart, plot, meta.get('episode_count', 0))) self.db.commit() except Exception as e: VSlog(str(e)) if 'no such column' in str(e) or 'no column named' in str(e): self.__createdb('season') VSlog('Table recreated') # Deuxieme tentative self.dbcur.execute( sql, (meta['tmdb_id'], season, s_year, premiered, fanart, plot, meta.get('episode_count', 0))) self.db.commit() else: VSlog('SQL ERROR INSERT into table season') pass # Cache pour les épisodes def _cache_save_episode(self, meta, name, season, episode): try: title = name + '_S' + season + 'E' + episode sql = 'INSERT or IGNORE INTO episode (tmdb_id, originaltitle, season, episode, year, title, premiered, poster_path, plot, rating, votes, director, writer, guest_stars, tagline) VALUES ' \ '(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute( sql, (meta['tmdb_id'], title, season, episode, meta['year'], title, meta['premiered'], meta['poster_path'], meta['plot'], meta['rating'], meta['votes'], meta['director'], meta['writer'], ''.join( meta.get('guest_stars', "")), meta["tagline"])) self.db.commit() except Exception as e: VSlog(str(e)) if 'no such column' in str(e) or 'no column named' in str(e): self.__createdb('episode') VSlog('Table recreated') # Deuxieme tentative self.dbcur.execute( sql, (meta['tmdb_id'], title, season, episode, meta['year'], title, meta['premiered'], meta['poster_path'], meta['plot'], meta['rating'], meta['votes'], meta['director'], meta['writer'], ''.join( meta.get('guest_stars', "")), meta["tagline"])) self.db.commit() else: VSlog('SQL ERROR INSERT into table episode') pass # Cache pour les sagas def _cache_save_collection(self, meta, name): try: sql = 'INSERT or IGNORE INTO saga (tmdb_id, title, plot, genre, poster_path, backdrop_path) VALUES ' \ '(?, ?, ?, ?, ?, ?)' self.dbcur.execute( sql, (meta['tmdb_id'], name, meta['plot'], meta['genre'], meta['poster_path'], meta["backdrop_path"])) self.db.commit() except Exception as e: VSlog(str(e)) if 'no such column' in str(e) or 'no column named' in str( e) or "no such table" in str(e): self.__createdb('saga') VSlog('Table recreated') # Deuxieme tentative self.dbcur.execute( sql, (meta['tmdb_id'], name, meta['plot'], meta['genre'], meta['poster_path'], meta["backdrop_path"])) self.db.commit() else: VSlog('SQL ERROR INSERT into table saga') pass def get_meta(self, media_type, name, imdb_id='', tmdb_id='', year='', season='', episode='', update=False): """ Main method to get meta data for movie or tvshow. Will lookup by name/year if no IMDB ID supplied. Args: media_type (str): 'movie' or 'tvshow' name (str): full name of movie/tvshow you are searching Kwargs: imdb_id (str): IMDB ID tmdb_id (str): TMDB ID year (str): 4 digit year of video, recommended to include the year whenever possible to maximize correct search results. season (int) episode (int) Returns: DICT of meta data or None if cannot be found. """ name = re.sub(" +", " ", name) # nettoyage du titre name = name.replace('VF', '').replace('VOSTFR', '') # VSlog('Attempting to retrieve meta data for %s: %s %s %s %s' % (media_type, name, year, imdb_id, tmdb_id)) # recherche dans la base de données if not update: # Obligatoire pour pointer vers les bonnes infos dans la base de données if not tmdb_id: if media_type in ("season", "tvshow", "anime", "episode"): name = re.sub( '(?i)( s(?:aison +)*([0-9]+(?:\-[0-9\?]+)*))(?:([^"]+)|)', '', name) meta = self._cache_search(media_type, self._clean_title(name), tmdb_id, year, season, episode) if meta: return meta # recherche online meta = {} if media_type == 'movie': if tmdb_id: meta = self.search_movie_id(tmdb_id) elif name: meta = self.search_movie_name(name, year) elif media_type == 'tvshow': if tmdb_id: meta = self.search_tvshow_id(tmdb_id) elif name: meta = self.search_tvshow_name(name, year) elif media_type == 'season': if tmdb_id: meta = self.search_season_id(tmdb_id, season) else: # on retrouve l'id en cherchant la série qui peut être en cache meta = self.get_meta('tvshow', name, year=year) if 'tmdb_id' in meta and meta['tmdb_id']: return self.get_meta('season', name, tmdb_id=meta['tmdb_id'], year=year, season=season) elif media_type == 'episode': if tmdb_id: # pas de recherche par nom si pas de tmdb_id, car il y aurait déjà un tmdb_id si on connaissait la série meta = self.search_episode_id(tmdb_id, season, episode) elif media_type == 'anime': if tmdb_id: meta = self.search_tvshow_id(tmdb_id) elif name: meta = self.search_tvshow_name(name, year, genre=16) elif media_type == 'collection': if tmdb_id: meta = self.search_collection_id(tmdb_id) elif name: meta = self.search_collection_name(name) elif media_type == 'person': if tmdb_id: meta = self.search_person_id(tmdb_id) elif name: meta = self.search_person_name(name) elif media_type == 'network': if tmdb_id: meta = self.search_network_id(tmdb_id) # Mise en forme des metas si trouvé if meta and 'tmdb_id' in meta: meta = self._format(meta, name, media_type) # sauvegarde dans un cache self._cache_save(meta, self._clean_title(name), media_type, season, episode, year) elif meta != False: # initialise un meta vide meta = self._format(meta, name) else: meta = {} return meta def getUrl(self, url, page=1, term=''): # return url api exemple 'movie/popular' page en cours try: if term: term = term + '&page=' + str(page) else: term = 'page=' + str(page) result = self._call(url, term) except: return False return result def _call(self, action, append_to_response=''): url = '%s%s?language=%s&api_key=%s' % (self.URL, action, self.lang, self.api_key) if append_to_response: url += '&%s' % append_to_response oRequestHandler = cRequestHandler(url) data = oRequestHandler.request(jsonDecode=True) return data # retourne la liste des genres en Texte, à partir des IDs def getGenresFromIDs(self, genresID): sGenres = [] for gid in genresID: genre = self.TMDB_GENRES.get(gid) if genre: sGenres.append(genre) return sGenres # Retourne le genre en Texte, à partir d'un ID def getGenreFromID(self, genreID): if not str(genreID).isdigit(): return genreID genre = self.TMDB_GENRES.get(genreID) if genre: return genre return genreID
import re import os import xbmcaddon from resources.lib.comaddon import VSlog, xbmc, VSPath from resources.lib.handler.requestHandler import cRequestHandler try: # Python 2 import urllib2 except ImportError: # Python 3 import urllib.request as urllib2 PathCache = VSPath( xbmcaddon.Addon('plugin.video.vstream').getAddonInfo('profile')) UA = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0' class Stormwall(object): def __init__(self): self.cE = '' self.cK = '' self.cN = '' self.cO = '' self._0xbd1168 = "0123456789qwertyuiopasdfghjklzxcvbnm:?!" self.a = [] self.b = {} self.state = False self.hostComplet = '' self.host = '' self.url = ''
class cDb: #On chercher le profil courant. request = { "jsonrpc": "2.0", "method": "Profiles.GetCurrentProfile", "params": { "properties": ["thumbnail", "lockmode"] }, "id": 1 } req = json.dumps(request) response = xbmc.executeJSONRPC(req) #On recupere le nom. name = json.loads(response)['result']['label'] #Le cas par defaut. if name == 'Master user': DB = 'special://home/userdata/addon_data/plugin.video.vstream/vstream.db' else: DB = 'special://home/userdata/profiles/' + name + '/addon_data/plugin.video.vstream/vstream.db' try: REALDB = VSPath(DB).decode('utf-8') except AttributeError: REALDB = VSPath(DB) del request, req, name, response # delete des objets temporaires def __init__(self): VSlog('DB engine for db : ' + sqlite.__name__) try: if not xbmcvfs.exists(self.DB): self.db = sqlite.connect(self.REALDB) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() self._create_tables() return except: VSlog('Error: Unable to write to %s' % self.REALDB) pass try: self.db = sqlite.connect(self.REALDB) self.db.row_factory = sqlite.Row self.dbcur = self.db.cursor() except: VSlog('Error: Unable to access to %s' % self.REALDB) pass def __del__(self): ''' Cleanup db when object destroyed ''' try: self.dbcur.close() self.db.close() except Exception as e: pass def _create_tables(self, dropTable = ''): if dropTable != '': self.dbcur.execute("DROP TABLE IF EXISTS " + dropTable) self.db.commit() ''' Create table ''' sql_create = "CREATE TABLE IF NOT EXISTS history ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "disp TEXT, "\ "icone TEXT, "\ "isfolder TEXT, "\ "level TEXT, "\ "lastwatched TIMESTAMP "", "\ "UNIQUE(title)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS resume ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "hoster TEXT, "\ "point TEXT, "\ "total TEXT, "\ "UNIQUE(title, hoster)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS watched ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "cat TEXT, "\ "UNIQUE(title)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS favorite ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "siteurl TEXT, "\ "site TEXT, "\ "fav TEXT, "\ "cat TEXT, "\ "icon TEXT, "\ "fanart TEXT, "\ "UNIQUE(title, site)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS viewing ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "tmdb_id TEXT, "\ "title_id TEXT, "\ "title TEXT, "\ "siteurl TEXT, "\ "site TEXT, "\ "fav TEXT, "\ "cat TEXT, "\ "season integer, "\ "UNIQUE (title_id)"\ ");" self.dbcur.execute(sql_create) sql_create = "CREATE TABLE IF NOT EXISTS download ("\ "addon_id integer PRIMARY KEY AUTOINCREMENT, "\ "title TEXT, "\ "url TEXT, "\ "path TEXT, "\ "cat TEXT, "\ "icon TEXT, "\ "size TEXT,"\ "totalsize TEXT, "\ "status TEXT, "\ "UNIQUE(title, path)"\ ");" self.dbcur.execute(sql_create) VSlog('Table initialized') # Ne pas utiliser cette fonction pour les chemins def str_conv(self, data): if not isMatrix(): if isinstance(data, str): # Must be encoded in UTF-8 try: data = data.decode('utf8') except AttributeError: pass import unicodedata data = unicodedata.normalize('NFKD', data).encode('ascii', 'ignore') try: data = data.decode('string-escape') # ATTENTION: provoque des bugs pour les chemins a cause du caractere '/' except: pass else: data = data.encode().decode() return data # *********************************** # History fonctions # *********************************** def insert_history(self, meta): # title = Unquote(meta['title']).decode('ascii', 'ignore') title = self.str_conv(Unquote(meta['title'])) disp = meta['disp'] icon = 'icon.png' try: ex = 'INSERT INTO history (title, disp, icone) VALUES (?, ?, ?)' self.dbcur.execute(ex, (title, disp, icon)) self.db.commit() VSlog('SQL INSERT history Successfully') except Exception as e: if 'UNIQUE constraint failed' in e.message: ex = "UPDATE history set title = '%s', disp = '%s', icone= '%s' WHERE title = '%s'" % (title, disp, icon, title) self.dbcur.execute(ex) self.db.commit() VSlog('SQL UPDATE history Successfully') VSlog('SQL ERROR INSERT, title = %s, %s' % (title, e) ) pass def get_history(self): sql_select = 'SELECT * FROM history' try: self.dbcur.execute(sql_select) # matchedrow = self.dbcur.fetchone() matchedrow = self.dbcur.fetchall() return matchedrow except Exception as e: VSlog('SQL ERROR EXECUTE, %s' % e) return None def del_history(self): from resources.lib.gui.gui import cGui oGui = cGui() oInputParameterHandler = cInputParameterHandler() if oInputParameterHandler.exist('searchtext'): sql_delete = "DELETE FROM history WHERE title = '%s'" % (oInputParameterHandler.getValue('searchtext')) else: sql_delete = 'DELETE FROM history;' try: self.dbcur.execute(sql_delete) self.db.commit() dialog().VSinfo(addon().VSlang(30041)) oGui.updateDirectory() return False, False except Exception as e: VSlog('SQL ERROR DELETE : %s' % sql_delete) return False, False # *********************************** # Watched fonctions # *********************************** def insert_watched(self, meta): title = meta['title'] if not title: return cat = meta['cat'] if 'cat' in meta else '1' ex = 'INSERT or IGNORE INTO watched (title, cat) VALUES (?, ?)' try: self.dbcur.execute(ex, (title, cat)) self.db.commit() VSlog('SQL INSERT watched Successfully') except Exception as e: if 'no such column' in str(e) or 'no column named' in str(e) or 'no such table' in str(e) : if 'named cat' in str(e): # ajout nouvelle colonne 'cat' self.dbcur.execute("ALTER TABLE watched add column cat TEXT") self.db.commit() VSlog('Table recreated : watched') # Deuxieme tentative self.dbcur.execute(ex, (title, cat)) self.db.commit() else: VSlog('SQL ERROR INSERT watched : title = %s' % e ) def get_watched(self, meta): title = meta['title'] if not title: return False cat = meta['cat'] if 'cat' in meta else '1' sql_select = "SELECT * FROM watched WHERE title = '%s'" % title try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchall() # Gestion des homonymes films / séries # Si la cat est enregistrée, on vérifie si c'est la même for data in matchedrow: matchedcat = data['cat'] if matchedcat: return int(matchedcat) == int(cat) if matchedrow: return True return False except Exception as e: if 'no such column' in str(e) or 'no column named' in str(e) or 'no such table' in str(e) : # Deuxieme tentative, sans la cat sql_select = "SELECT * FROM watched WHERE title = '%s'" % title self.dbcur.execute(sql_select) return 1 if self.dbcur.fetchall() else 0 else: VSlog('SQL ERROR %s' % sql_select) return False def del_watched(self, meta): title = meta['title'] if not title: return sql_select = "DELETE FROM watched WHERE title = '%s'" % title try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False # *********************************** # Resume fonctions # *********************************** def insert_resume(self, meta): title = self.str_conv(meta['title']) site = QuotePlus(meta['site']) point = meta['point'] total = meta['total'] ex = "DELETE FROM resume WHERE title = '%s'" % title try: self.dbcur.execute(ex) except Exception as e: VSlog('SQL ERROR - ' + ex) pass try: ex = 'INSERT INTO resume (title, hoster, point, total) VALUES (?, ?, ?, ?)' self.dbcur.execute(ex, (title, site, point, total)) self.db.commit() except Exception as e: if 'no such column' in str(e) or 'no column named' in str(e) or 'no such table' in str(e) : self._create_tables('resume') VSlog('Table recreated : resume') # Deuxieme tentative self.dbcur.execute(ex, (title, site, point, total)) self.db.commit() else: VSlog('SQL ERROR INSERT : %s' % e) def get_resume(self, meta): title = self.str_conv(meta['title']) # site = QuotePlus(meta['site']) sql_select = "SELECT point, total FROM resume WHERE title = '%s'" % title try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchone() # matchedrow = self.dbcur.fetchall() if not matchedrow: return False, False return float(matchedrow[0]), float(matchedrow[1]) except Exception as e: if 'no such column' in str(e) or 'no column named' in str(e) : self._create_tables('resume') VSlog('Table recreated : resume') else: VSlog('SQL ERROR : %s' % e) return False, False def del_resume(self, meta): title = QuotePlus(meta['title']) sql_select = "DELETE FROM resume WHERE title = '%s'" % title try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False # *********************************** # Bookmark fonctions # *********************************** def insert_bookmark(self, meta): title = self.str_conv(meta['title']) siteurl = QuotePlus(meta['siteurl']) try: sIcon = meta['icon'].decode('UTF-8') except: sIcon = meta['icon'] try: ex = 'INSERT INTO favorite (title, siteurl, site, fav, cat, icon, fanart) VALUES (?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute(ex, (title, siteurl, meta['site'], meta['fav'], meta['cat'], sIcon, meta['fanart'])) self.db.commit() dialog().VSinfo(addon().VSlang(30042), meta['title']) VSlog('SQL INSERT favorite Successfully') except Exception as e: if 'UNIQUE constraint failed' in e.message: dialog().VSinfo(addon().VSlang(30043), meta['title']) VSlog('SQL ERROR INSERT : %s' % e) pass def get_bookmark(self): sql_select = 'SELECT * FROM favorite order by addon_id desc' try: self.dbcur.execute(sql_select) # matchedrow = self.dbcur.fetchone() matchedrow = self.dbcur.fetchall() return matchedrow except Exception as e: VSlog('SQL ERROR EXECUTE') return None def del_bookmark(self, sSiteUrl='', sMovieTitle='', sCat = '', sAll = False): sql_delete = None # Tous supprimer if sAll: sql_delete = 'DELETE FROM favorite;' # Supprimer un bookmark selon son titre elif sMovieTitle: siteUrl = QuotePlus(sSiteUrl) title = self.str_conv(sMovieTitle) title = title.replace("'", r"''") sql_delete = "DELETE FROM favorite WHERE siteurl = '%s' AND title = '%s'" % (siteUrl, title) # Supprimer un bookmark selon son url elif sSiteUrl: siteUrl = QuotePlus(sSiteUrl) sql_delete = "DELETE FROM favorite WHERE siteurl = '%s'" % siteUrl # Supprimer toute une catégorie elif sCat: catList = ('1', '7') # films, saga if sCat not in catList: catList = ('2', '3', '4', '8') if sCat not in catList: catList = ('0', sCat) sql_delete = "DELETE FROM favorite WHERE cat in %s" % str(catList) if sql_delete: from resources.lib.gui.gui import cGui try: self.dbcur.execute(sql_delete) self.db.commit() update = self.db.total_changes if not update and sSiteUrl and sMovieTitle: # si pas trouvé, on essaie sans le titre, seulement l'URL return self.del_bookmark(sSiteUrl) dialog().VSinfo(addon().VSlang(30044)) cGui().updateDirectory() return True except Exception as e: VSlog('SQL ERROR %s' % sql_delete) return False # *********************************** # InProgress fonctions # *********************************** def insert_viewing(self, meta): if not 'title' in meta: return if not 'siteurl' in meta: return title = self.str_conv(meta['title']) titleWatched = self.str_conv(meta['titleWatched']) siteurl = QuotePlus(meta['siteurl']) cat = meta['cat'] saison = meta['season'] if 'season' in meta else '' sTmdbId = meta['sTmdbId'] if 'sTmdbId' in meta else '' ex = "DELETE FROM viewing WHERE title_id = '%s' and cat = '%s'" % (titleWatched, cat) try: self.dbcur.execute(ex) except Exception as e: VSlog('SQL ERROR - ' + ex) pass try: ex = 'INSERT INTO viewing (tmdb_id, title_id, title, siteurl, site, fav, cat, season) VALUES (?, ?, ?, ?, ?, ?, ?, ?)' self.dbcur.execute(ex, (sTmdbId, titleWatched, title, siteurl, meta['site'], meta['fav'], cat, saison)) self.db.commit() VSlog('SQL INSERT viewing Successfully') except Exception as e: if 'no such column' in str(e) or 'no column named' in str(e) or 'no such table' in str(e) : self._create_tables('viewing') VSlog('Table recreated : viewing') # Deuxieme tentative self.dbcur.execute(ex, (meta['sTmdbId'], titleWatched, title, siteurl, meta['site'], meta['fav'], meta['cat'], saison, episode)) self.db.commit() else: VSlog('SQL ERROR INSERT : %s' % e) pass def get_viewing(self): sql_select = "SELECT * FROM viewing group by title order by addon_id DESC" try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchall() return matchedrow except Exception as e: VSlog('SQL ERROR : %s' % sql_select) return None def del_viewing(self, meta): sTitleWatched = meta['titleWatched'] if 'titleWatched' in meta else None sql_deleteCat = "" if not sTitleWatched: # delete all sql_delete = "DELETE FROM viewing" else: sql_deleteTitle = "DELETE FROM viewing WHERE title_id = '%s'" % sTitleWatched if 'cat' in meta: sql_deleteCat = " and cat = '%s'" %meta['cat'] sql_delete = sql_deleteTitle + sql_deleteCat update = 0 from resources.lib.gui.gui import cGui try: self.dbcur.execute(sql_delete) self.db.commit() update = self.db.total_changes # si pas trouvé, on essaie sans la cat, juste le titre if not update and sql_deleteCat: del meta['cat'] return self.del_viewing(meta) return True except Exception as e: VSlog('SQL ERROR %s, error = %s' % (sql_delete, e)) return update # *********************************** # Download fonctions # *********************************** def insert_download(self, meta): title = self.str_conv(meta['title']) url = QuotePlus(meta['url']) sIcon = QuotePlus(meta['icon']) sPath = meta['path'] ex = 'INSERT INTO download (title, url, path, cat, icon, size, totalsize, status) VALUES (?, ?, ?, ?, ?, ?, ?, ?)' try: self.dbcur.execute(ex, (title,url, sPath,meta['cat'],sIcon, '', '', 0)) self.db.commit() VSlog('SQL INSERT download Successfully') dialog().VSinfo(addon().VSlang(30042), meta['title']) except Exception as e: VSlog('SQL ERROR INSERT into download') pass def get_download(self, meta=''): if meta == '': sql_select = 'SELECT * FROM download' else: url = QuotePlus(meta['url']) sql_select = "SELECT * FROM download WHERE url = '%s' AND status = '0'" % url try: self.dbcur.execute(sql_select) matchedrow = self.dbcur.fetchall() return matchedrow except Exception as e: VSlog('SQL ERROR %s' % sql_select) return None def clean_download(self): sql_select = "DELETE FROM download WHERE status = '2'" try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False def reset_download(self, meta): url = QuotePlus(meta['url']) sql_select = "UPDATE download SET status = '0' WHERE status = '2' AND url = '%s'" % url try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False def del_download(self, meta): if len(meta['url']) > 1: url = QuotePlus(meta['url']) sql_select = "DELETE FROM download WHERE url = '%s'" % url elif len(meta['path']) > 1: path = meta['path'] sql_select = "DELETE FROM download WHERE path = '%s'" % path else: return try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False def cancel_download(self): sql_select = "UPDATE download SET status = '0' WHERE status = '1'" try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False def update_download(self, meta): path = meta['path'] size = meta['size'] totalsize = meta['totalsize'] status = meta['status'] sql_select = "UPDATE download set size = '%s', totalsize = '%s', status= '%s' WHERE path = '%s'" % (size, totalsize, status, path) try: self.dbcur.execute(sql_select) self.db.commit() return False, False except Exception as e: VSlog('SQL ERROR %s' % sql_select) return False, False
def __getMediaLinkForGuest(self): api_call = '' url = self.__sUrl pathfile = 'special://temp/video_pstream.m3u8' if not isMatrix(): video_pstream_path = VSPath(pathfile).decode('utf-8') else: video_pstream_path = VSPath(pathfile) oRequest = cRequestHandler(url) oRequest.addHeaderEntry( 'Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8') oRequest.addHeaderEntry('Accept-Language', 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3') sHtmlContent = oRequest.request() oParser = cParser() sPattern = "playerBrand.+?var.+?\'([^\']+)" aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): url2 = aResult[1][0] oRequest = cRequestHandler(url2) oRequest.addHeaderEntry( 'Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' ) oRequest.addHeaderEntry('Accept-Language', 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3') sHtmlContent = oRequest.request() sPattern = "NAME=.([^\"']+).+?https([^#]+)" aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): url = [] qua = [] for aEntry in aResult[1]: urls = 'https' + aEntry[1].strip() qua.append(aEntry[0]) url.append(urls.strip()) sUrlselect = dialog().VSselectqual(qua, url) sUrlselect = sUrlselect.strip() oRequest = cRequestHandler(sUrlselect) oRequest.addHeaderEntry( 'Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' ) oRequest.addHeaderEntry('Accept-Language', 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3') sHtmlContent = oRequest.request() if '#EXT' not in sHtmlContent: return False, False with open(video_pstream_path, "w") as subfile: subfile.write(sHtmlContent) api_call = video_pstream_path if (api_call): return True, api_call return False, False
def __getMediaLinkForGuest(self): api_call = False headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0", "Accept-Encoding": "gzip, deflate", "Origin": "https://geoip.redirect-ads.com" } req = urllib.Request(self.__sUrl, None, headers) with urllib.urlopen(req) as response: decomp = zlib.decompressobj(16 + zlib.MAX_WBITS) sHtmlContent = decomp.decompress(response.read()) oParser = cParser() sPattern = '<script src="(.+?)"' aResult = oParser.parse(sHtmlContent, sPattern) urlJS = "https://geoip.redirect-ads.com" + aResult[1][-2] headers['Referer'] = self.__sUrl req = urllib.Request(urlJS, None, headers) with urllib.urlopen(req) as response: decomp = zlib.decompressobj(16 + zlib.MAX_WBITS) sHtmlContent = decomp.decompress(response.read()).decode('utf-8') #2ème partie des données. aResult1 = re.search('([^>]+)', sHtmlContent, re.DOTALL | re.UNICODE).group(1) data = CheckAADecoder(aResult1) dataPartOne = json.loads( re.search('SoTrymConfigDefault = ([^>]+)"', data).group(1)) urlJS = "https://geoip.redirect-ads.com" + aResult[1][-1] req = urllib.Request(urlJS, None, headers) with urllib.urlopen(req) as response: decomp = zlib.decompressobj(16 + zlib.MAX_WBITS) sHtmlContent = decomp.decompress(response.read()).decode('utf-8') #1er partie des données aResult2 = re.search('\}([^>]+)', sHtmlContent, re.DOTALL | re.UNICODE).group(1) data = CheckAADecoder(aResult2) dataPartTwo = json.loads( base64.b64decode(re.search('atob\("(.+?)"', data).group(1))) from resources.lib.comaddon import dialog url = [dataPartOne["sd"], dataPartOne['hd']] qua = ["SD", "HD"] # Affichage du tableau ID = dialog().VSselectqual(qua, url) pathfile = VSPath( 'special://userdata/addon_data/plugin.video.vstream/') HosterUrl = "https://cdn.heycdn21.xyz/{}/{}/{}/".format( dataPartTwo['md5_id'], ID[0], dataPartOne['pieceLength']) response = requests.get(HosterUrl + "0/", headers=headers).content b64response = base64.b64decode(response) with open(pathfile + "video.mp4", "wb") as fh: fh.write(b64response) duration = get_video_duration(pathfile + "video.mp4") splitDuration = str(duration / 79) data = '#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:%s\n#EXT-X-MEDIA-SEQUENCE:0\n' % str( duration) i = 0 while i < 80: data += '#EXTINF:%s,\n' % splitDuration data += "http://127.0.0.1:2424?u=" + HosterUrl + str( i) + "@" + urlEncode(headers) + ' \n' i = i + 1 data += '#EXT-X-ENDLIST' with open(pathfile + "playlist.m3u8", 'w') as file: file.write(data) api_call = pathfile + "playlist.m3u8" if (api_call): return True, api_call return False, False