def _read_suscription_file(): logger.info("suscription._read_suscription_file") # Read file if os.path.exists(SUSCRIPTIONS_FILE): f = open(SUSCRIPTIONS_FILE,"r") data = f.read() f.close() else: data = "" # Parse suscriptions suscriptions = [] matches = scrapertools.find_multiple_matches(data,"<suscription>(.*?)</suscription>") for match in matches: channel = scrapertools.find_single_match(match,"<channel>([^<]+)</channel>") url = scrapertools.find_single_match(match,"<url>([^<]+)</url>") extra = scrapertools.find_single_match(match,"<extra>([^<]+)</extra>") action = scrapertools.find_single_match(match,"<action>([^<]+)</action>") show_name = scrapertools.find_single_match(match,"<show_name>([^<]+)</show_name>") thumbnail = scrapertools.find_single_match(match,"<thumbnail>([^<]+)</thumbnail>") suscriptions.append( Item( channel=channel, url=url, action=action, title=show_name, show=show_name, thumbnail=thumbnail ) ) return suscriptions
def get_channel_parameters(channel_name): #logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name) channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".xml") if os.path.exists(channel_xml): #logger.info("streamondemand.core.channeltools get_channel_parameters "+channel_name+".xml found") infile = open(channel_xml, "rb") data = infile.read() infile.close() # TODO: Pendiente del json :) channel_parameters = {} channel_parameters["title"] = scrapertools.find_single_match( data, "<name>([^<]*)</name>") channel_parameters["channel"] = scrapertools.find_single_match( data, "<id>([^<]*)</id>") channel_parameters["active"] = scrapertools.find_single_match( data, "<active>([^<]*)</active>") channel_parameters["adult"] = scrapertools.find_single_match( data, "<adult>([^<]*)</adult>") channel_parameters["language"] = scrapertools.find_single_match( data, "<language>([^<]*)</language>") channel_parameters["thumbnail"] = scrapertools.find_single_match( data, "<thumbnail>([^<]*)</thumbnail>") channel_parameters["bannermenu"] = scrapertools.find_single_match( data, "<bannermenu>([^<]*)</bannermenu>") channel_parameters["fanart"] = scrapertools.find_single_match( data, "<fanart>([^<]*)</fanart>") channel_parameters[ "include_in_global_search"] = scrapertools.find_single_match( data, "<include_in_global_search>([^<]*)</include_in_global_search>") category_list = [] matches = scrapertools.find_multiple_matches( data, "<category>([^<]*)</category>") for match in matches: category_list.append(match) channel_parameters["categories"] = category_list logger.info( "streamondemand.core.channeltools get_channel_parameters " + channel_name + " -> " + repr(channel_parameters)) else: logger.info( "streamondemand.core.channeltools get_channel_parameters " + channel_name + ".xml NOT found") channel_parameters = dict() channel_parameters["adult"] = "false" return channel_parameters
def get_channel_parameters(channel_name): #logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name) channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml") if os.path.exists(channel_xml): # logger.info("channel_name+".xml found") infile = open(channel_xml, "rb") data = infile.read() infile.close() channel_parameters = dict() channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>") channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>") channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>") channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>") channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>") # Imagenes: se admiten url y archivos locales dentro de "resources/images" channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>") channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>") channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>") channel_parameters["update_url"] = scrapertools.find_single_match(data, "<update_url>([^<]*)</update_url>") if channel_parameters["thumbnail"] and "://" not in channel_parameters["thumbnail"]: channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "images", "squares", channel_parameters["thumbnail"]) if channel_parameters["bannermenu"] and "://" not in channel_parameters["bannermenu"]: channel_parameters["bannermenu"] = os.path.join(config.get_runtime_path(), "resources", "images", "bannermenu", channel_parameters["bannermenu"]) if channel_parameters["fanart"] and "://" not in channel_parameters["fanart"]: channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "images", "fanart", channel_parameters["fanart"]) if channel_parameters["update_url"]=="": channel_parameters["update_url"] = DEFAULT_UPDATE_URL channel_parameters["include_in_global_search"] = scrapertools.find_single_match( data, "<include_in_global_search>([^<]*)</include_in_global_search>") category_list = [] matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>") for match in matches: category_list.append(match) channel_parameters["categories"] = category_list logger.info(channel_name+" -> "+repr(channel_parameters)) else: logger.info(channel_name+".xml NOT found") channel_parameters = dict() channel_parameters["adult"] = "false" channel_parameters["update_url"] = DEFAULT_UPDATE_URL return channel_parameters
def get_channel_parameters(channel_name): # logger.info("channel_name="+channel_name) channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml") if os.path.exists(channel_xml): # logger.info("channel_name+".xml found") infile = open(channel_xml, "rb") data = infile.read() infile.close() channel_parameters = dict() channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>") channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>") channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>") channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>") channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>") # Imagenes: se admiten url y archivos locales dentro de "resources/images" channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>") channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>") channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>") if channel_parameters["thumbnail"] and "://" not in channel_parameters["thumbnail"]: channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "images", "squares", channel_parameters["thumbnail"]) if channel_parameters["bannermenu"] and "://" not in channel_parameters["bannermenu"]: channel_parameters["bannermenu"] = os.path.join(config.get_runtime_path(), "resources", "images", "bannermenu", channel_parameters["bannermenu"]) if channel_parameters["fanart"] and "://" not in channel_parameters["fanart"]: channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "images", "fanart", channel_parameters["fanart"]) channel_parameters["include_in_global_search"] = scrapertools.find_single_match( data, "<include_in_global_search>([^<]*)</include_in_global_search>") category_list = [] matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>") for match in matches: category_list.append(match) channel_parameters["categories"] = category_list logger.info(channel_name+" -> "+repr(channel_parameters)) else: logger.info(channel_name+".xml NOT found") channel_parameters = dict() channel_parameters["adult"] = "false" return channel_parameters
def get_all_settings_addon(): # Lee el archivo settings.xml y retorna un diccionario con {id: value} import scrapertools infile = open(os.path.join(get_data_path(),"settings.xml"), "r") data = infile.read() infile.close() ret = {} matches = scrapertools.find_multiple_matches(data, '<setting id="([^"]*)" value="([^"]*)') for id, value in matches: ret[id] = value return ret
def get_all_settings_addon(): # Lee el archivo settings.xml y retorna un diccionario con {id: value} import scrapertools infile = open(os.path.join(get_data_path(), "settings.xml"), "r") data = infile.read() infile.close() ret = {} matches = scrapertools.find_multiple_matches( data, '<setting id="([^"]*)" value="([^"]*)') for id, value in matches: ret[id] = value return ret
def get_channel_parameters(channel_name): logger.info("pelisalacarta.core.channeltools get_channel_parameters channel_name=" + channel_name) channel_xml = os.path.join(config.get_runtime_path(), "channels", channel_name + ".xml") if os.path.exists(channel_xml): logger.info("pelisalacarta.core.channeltools get_channel_parameters " + channel_name + ".xml found") infile = open(channel_xml, "rb") data = infile.read() infile.close() # TODO: Pendiente del json :) channel_parameters = {} channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>") channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>") channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>") channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>") channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>") channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>") channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>") channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>") channel_parameters["include_in_global_search"] = scrapertools.find_single_match( data, "<include_in_global_search>([^<]*)</include_in_global_search>" ) channel_parameters["type"] = "generic" category_list = [] matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>") for match in matches: category_list.append(match) channel_parameters["categories"] = category_list logger.info( "pelisalacarta.core.channeltools get_channel_parameters channel_parameters=" + repr(channel_parameters) ) else: logger.info("pelisalacarta.core.channeltools get_channel_parameters " + channel_name + ".xml NOT found") channel_parameters = {} channel_parameters["adult"] = "false" return channel_parameters
def verify_directories_created(): from core import logger from core import filetools from platformcode import xbmc_videolibrary config_paths = [["videolibrarypath", "videolibrary"], ["downloadpath", "downloads"], ["downloadlistpath", "downloads/list"], ["settings_path", "settings_channels"]] for path, default in config_paths: saved_path = get_setting(path) # videoteca if path == "videolibrarypath": if not saved_path: saved_path = xbmc_videolibrary.search_library_path() if saved_path: set_setting(path, saved_path) if not saved_path: saved_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/" + default set_setting(path, saved_path) saved_path = xbmc.translatePath(saved_path) if not filetools.exists(saved_path): logger.debug("Creating %s: %s" % (path, saved_path)) filetools.mkdir(saved_path) config_paths = [["folder_movies", "CINE"], ["folder_tvshows", "SERIES"]] flag_call = True for path, default in config_paths: saved_path = get_setting(path) if not saved_path: saved_path = default set_setting(path, saved_path) content_path = filetools.join(get_videolibrary_path(), saved_path) if not filetools.exists(content_path): logger.debug("Creating %s: %s" % (path, content_path)) # si se crea el directorio if filetools.mkdir(content_path): if flag_call: # le pasamos el valor para que sepamos que se ha pasado por creación de directorio xbmc_videolibrary.ask_set_content(1) flag_call = False try: from core import scrapertools # Buscamos el archivo addon.xml del skin activo skindir = filetools.join(xbmc.translatePath("special://home"), 'addons', xbmc.getSkinDir(), 'addon.xml') # Extraemos el nombre de la carpeta de resolución por defecto folder = "" data = filetools.read(skindir) res = scrapertools.find_multiple_matches(data, '(<res .*?>)') for r in res: if 'default="true"' in r: folder = scrapertools.find_single_match(r, 'folder="([^"]+)"') break # Comprobamos si existe en el addon y sino es así, la creamos default = filetools.join(get_runtime_path(), 'resources', 'skins', 'Default') if folder and not filetools.exists(filetools.join(default, folder)): filetools.mkdir(filetools.join(default, folder)) # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente if folder and folder != '720p': for root, folders, files in filetools.walk( filetools.join(default, '720p')): for f in files: if not filetools.exists(filetools.join(default, folder, f)) or \ (filetools.getsize(filetools.join(default, folder, f)) != filetools.getsize(filetools.join(default, '720p', f))): filetools.copy(filetools.join(default, '720p', f), filetools.join(default, folder, f), True) except: import traceback logger.error("Al comprobar o crear la carpeta de resolución") logger.error(traceback.format_exc())
def get_channel_parameters(channel_name): # logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name) channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".xml") if os.path.exists(channel_xml): # logger.info("channel_name+".xml found") infile = open(channel_xml, "rb") data = infile.read() infile.close() channel_parameters = dict() channel_parameters["title"] = scrapertools.find_single_match( data, "<name>([^<]*)</name>") channel_parameters["channel"] = scrapertools.find_single_match( data, "<id>([^<]*)</id>") channel_parameters["active"] = str_to_bool( scrapertools.find_single_match(data, "<active>([^<]*)</active>")) channel_parameters["adult"] = str_to_bool( scrapertools.find_single_match(data, "<adult>([^<]*)</adult>")) channel_parameters["language"] = scrapertools.find_single_match( data, "<language>([^<]*)</language>") channel_parameters["version"] = scrapertools.find_single_match( data, "<version>([^<]*)</version>") # Imagenes: se admiten url y archivos locales dentro de "resources/images" channel_parameters["thumbnail"] = scrapertools.find_single_match( data, "<thumbnail>([^<]*)</thumbnail>") channel_parameters["bannermenu"] = scrapertools.find_single_match( data, "<bannermenu>([^<]*)</bannermenu>") channel_parameters["fanart"] = scrapertools.find_single_match( data, "<fanart>([^<]*)</fanart>") if channel_parameters["thumbnail"] and "://" not in channel_parameters[ "thumbnail"]: channel_parameters["thumbnail"] = os.path.join( config.get_runtime_path(), "resources", "images", "squares", channel_parameters["thumbnail"]) if channel_parameters[ "bannermenu"] and "://" not in channel_parameters["bannermenu"]: channel_parameters["bannermenu"] = os.path.join( config.get_runtime_path(), "resources", "images", "bannermenu", channel_parameters["bannermenu"]) if channel_parameters["fanart"] and "://" not in channel_parameters[ "fanart"]: channel_parameters["fanart"] = os.path.join( config.get_runtime_path(), "resources", "images", "fanart", channel_parameters["fanart"]) channel_parameters["update_url"] = scrapertools.find_single_match( data, "<update_url>([^<]*)</update_url>") if channel_parameters["update_url"] == "": channel_parameters["update_url"] = DEFAULT_UPDATE_URL include_in_global_search = scrapertools.find_single_match( data, "<include_in_global_search>([^<]*)</include_in_global_search>") if include_in_global_search in ["", "true"]: channel_parameters["include_in_global_search"] = True else: channel_parameters["include_in_global_search"] = False category_list = [] matches = scrapertools.find_multiple_matches( data, "<category>([^<]*)</category>") for match in matches: category_list.append(match) channel_parameters["categories"] = category_list # Obtenemos si el canal tiene opciones de configuración channel_parameters["has_settings"] = False # esta regex devuelve 2 valores por elemento <settings>, el contenido del propio nodo y un \t, por lo que hay # posteriormente coger solo el valor del indice 0. matches = scrapertools.find_multiple_matches( data, "<settings>((.|\n)*?)<\/settings>") for match in matches: _id = scrapertools.find_single_match(match[0], "<id>([^<]*)</id>") if _id and "include_in_" not in _id: channel_parameters["has_settings"] = True break # Inicio - condiciones para mostrar canal compatible python_condition = scrapertools.find_single_match( data, "<python>([^<]*)</python>") if python_condition: import sys def versiontuple(v): return tuple(map(int, (v.split(".")))) if sys.version_info < versiontuple(python_condition): python_compatible = False else: python_compatible = True else: python_compatible = True addon_version_condition = scrapertools.find_single_match( data, "<addon_version>([^<]*)</addon_version>") if addon_version_condition: import versiontools addon_version = int( addon_version_condition.replace(".", "").ljust( len(str(versiontools.get_current_plugin_version())), '0')) if versiontools.get_current_plugin_version() < addon_version: addon_version_compatible = False else: addon_version_compatible = True else: addon_version_compatible = True channel_parameters["compatible"] = True if not python_compatible or not addon_version_compatible: channel_parameters["compatible"] = False # Fin - condiciones para mostrar canal compatible logger.info(channel_name + " -> " + repr(channel_parameters)) else: logger.info(channel_name + ".xml NOT found") channel_parameters = dict() channel_parameters["adult"] = False channel_parameters["update_url"] = DEFAULT_UPDATE_URL return channel_parameters
def get_channel_parameters(channel_name): #logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name) channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml") if os.path.exists(channel_xml): # logger.info("channel_name+".xml found") infile = open(channel_xml, "rb") data = infile.read() infile.close() channel_parameters = dict() channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>") channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>") channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>") channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>") channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>") # Imagenes: se admiten url y archivos locales dentro de "resources/images" channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>") channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>") channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>") channel_parameters["update_url"] = scrapertools.find_single_match(data, "<update_url>([^<]*)</update_url>") if channel_parameters["thumbnail"] and "://" not in channel_parameters["thumbnail"]: channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "images", "squares", channel_parameters["thumbnail"]) if channel_parameters["bannermenu"] and "://" not in channel_parameters["bannermenu"]: channel_parameters["bannermenu"] = os.path.join(config.get_runtime_path(), "resources", "images", "bannermenu", channel_parameters["bannermenu"]) if channel_parameters["fanart"] and "://" not in channel_parameters["fanart"]: channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "images", "fanart", channel_parameters["fanart"]) if channel_parameters["update_url"] == "": channel_parameters["update_url"] = DEFAULT_UPDATE_URL channel_parameters["include_in_global_search"] = scrapertools.find_single_match( data, "<include_in_global_search>([^<]*)</include_in_global_search>") category_list = [] matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>") for match in matches: category_list.append(match) channel_parameters["categories"] = category_list # Obtenemos si el canal tiene opciones de configuración channel_parameters["has_settings"] = False # esta regex devuelve 2 valores por elemento <settings>, el contenido del propio nodo y un \t, por lo que hay # posteriormente coger solo el valor del indice 0. matches = scrapertools.find_multiple_matches(data, "<settings>((.|\n)*?)<\/settings>") for match in matches: _id = scrapertools.find_single_match(match[0], "<id>([^<]*)</id>") if _id and "include_in_" not in _id: channel_parameters["has_settings"] = True break logger.info(channel_name+" -> "+repr(channel_parameters)) else: logger.info(channel_name+".xml NOT found") channel_parameters = dict() channel_parameters["adult"] = "false" channel_parameters["update_url"] = DEFAULT_UPDATE_URL return channel_parameters
def Enlaces(self, Nam, URLL="", THUMB="", historial=""): NN = Nam NN = NN.replace("¡", "") NN = NN.replace("¿", "") NN = NN.replace("?", "") NN = NN.replace(":", "") NN = NN.replace("º", "") NN = NN.replace("ª", "") NN = NN.replace("\"", "") NN = NN.replace("\'", "") NN = NN.replace("(", "") NN = NN.replace(")", "") NN = NN.replace("á", "a") NN = NN.replace("Á", "A") NN = NN.replace("é", "e") NN = NN.replace("É", "E") NN = NN.replace("í", "i") NN = NN.replace("Í", "I") NN = NN.replace("ó", "o") NN = NN.replace("Ó", "O") NN = NN.replace("ú", "u") NN = NN.replace("Ú", "U") NN = NN.replace("ñ", "n") NN = NN.replace("Ñ", "N") NN = NN.replace("ñ", "n") NN = NN.replace(""", "") NN = NN.replace("'", "") NN = NN.replace("'", "") ENN = URLL IMG = THUMB Categ = RutaTMP + NN + ".xml" if "###" in ENN: id = ENN.split("###")[1].split(";")[0] type = ENN.split("###")[1].split(";")[1] ENN = ENN.split("###")[0] itemlist = [] it1 = [] it2 = [] url_targets = ENN data_js = httptools.downloadpage( "http://ps3plusteam.ddns.net/hdfull/jquery.hdfull.view.min.js", headers={ 'referer': 'http://ps3plusteam.ddns.net/' }).data key = scrapertools.find_single_match( data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)') data_js = httptools.downloadpage( "http://ps3plusteam.ddns.net/hdfull/providers.js", headers={ 'referer': 'http://ps3plusteam.ddns.net/' }).data decoded = jhexdecode(data_js).replace("'", '"') providers_pattern = 'p\[(\d+)\]= {"t":"([^"]+)","d":".*?","e":.function.*?,"l":.function.*?return "([^"]+)".*?};' providers = scrapertools.find_multiple_matches(decoded, providers_pattern) provs = {} for provider, e, l in providers: provs[provider] = [e, l] data = agrupa_datos(ENN) data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'") data_decrypt = jsontools.load( obfs(base64.b64decode(data_obf), 126 - int(key))) FF = open(Categ, 'w') FF.write( '<?xml version="1.0" encoding="iso-8859-1"?>\n<items>\n<playlist_name><![CDATA[' + NN + ']]></playlist_name>\n\n') Conteo = 0 matches = [] for match in data_decrypt: if match['provider'] in provs: try: embed = provs[match['provider']][0] url = provs[match['provider']][1] + match['code'] matches.append([match['lang'], match['quality'], url, embed]) except: pass for idioma, calidad, url, embed in matches: if embed == 'd': option = "Descargar" option1 = 2 if idioma == "ESP": if url.find('uptobox') != -1: Conteo = Conteo + 1 FF.write("<channel>\n") FF.write(" <title><![CDATA[Ver en gamovideo " + NN.encode('utf8') + " " + calidad + "]]></title>\n") FF.write(' <description><![CDATA[' + IMG + ']]></description>\n') FF.write(' <playlist_url><![CDATA[' + url + ']]></playlist_url>\n') FF.write( ' <stream_url><![CDATA[http://ps3plusteam.ddns.net/teamps3plus/pro/uptobox.txt]]></stream_url>\n' ) FF.write( ' <img_src><![CDATA[http://ps3plusteam.ddns.net/ps3plus/images/letras/uptobox.png]]></img_src>\n' ) FF.write(' <tipo><![CDATA[hdfullLinks]]></tipo>\n') FF.write('</channel>\n\n') else: option = "Ver" option1 = 1 if idioma == "ESP": Conteo = Conteo + 1 if url.find('vidoza') != -1: FF.write("<channel>\n") FF.write(" <title><![CDATA[Ver en vidoza " + NN.encode('utf8') + " " + calidad + "]]></title>\n") FF.write(' <description><![CDATA[' + IMG + ']]></description>\n') FF.write(' <playlist_url><![CDATA[' + url + ']]></playlist_url>\n') FF.write( ' <stream_url><![CDATA[http://ps3plusteam.ddns.net/teamps3plus/pro/vidoza.txt]]></stream_url>\n' ) FF.write( ' <img_src><![CDATA[http://ps3plusteam.ddns.net/ps3plus/images/letras/vidoza.png]]></img_src>\n' ) FF.write(' <tipo><![CDATA[hdfullLinks]]></tipo>\n') FF.write('</channel>\n\n') if url.find('gamovideo') != -1: Conteo = Conteo + 1 buscaID = re.findall(r'com/(.*)', url) buscaID = buscaID[0] FF.write("<channel>\n") FF.write(" <title><![CDATA[Ver en gamovideo " + NN.encode('utf8') + " " + calidad + "]]></title>\n") FF.write(' <description><![CDATA[' + IMG + ']]></description>\n') FF.write( ' <playlist_url><![CDATA[http://gamovideo.com/embed-' + buscaID + '-640x360.html]]></playlist_url>\n') FF.write( ' <stream_url><![CDATA[http://ps3plusteam.ddns.net/teamps3plus/props3/gamo.txt]]></stream_url>\n' ) FF.write( ' <img_src><![CDATA[http://ps3plusteam.ddns.net/ps3plus/images/letras/gamovideo.png]]></img_src>\n' ) FF.write(' <tipo><![CDATA[hdfullLinks]]></tipo>\n') FF.write('</channel>\n\n') FF.write('<prev_page_url text="CH- ATRAS"><![CDATA[' + historial + ']]></prev_page_url>\n</items>') if Conteo == 0: return None return Categ
def verify_directories_created(): from platformcode import logger from core import filetools from platformcode import xbmc_library config_paths = [["librarypath", "library"], ["downloadpath", "downloads"], ["downloadlistpath", "downloads/list"], ["settings_path", "settings_channels"]] for path, default in config_paths: saved_path = get_setting(path) # Biblioteca if path == "librarypath": set_setting("library_version", "v4") if not saved_path: saved_path = xbmc_library.search_library_path() if saved_path: set_setting(path, saved_path) if not saved_path: saved_path = "storage/sod" + default set_setting(path, saved_path) if get_setting("library_set_content") == True and path in [ "librarypath", "downloadpath" ]: # logger.debug("library_set_content %s" % get_setting("library_set_content")) xbmc_library.add_sources(saved_path) saved_path = xbmc.translatePath(saved_path) if not filetools.exists(saved_path): logger.debug("Creating %s: %s" % (path, saved_path)) filetools.mkdir(saved_path) config_paths = [["folder_movies", "CINE"], ["folder_tvshows", "SERIES"]] for path, default in config_paths: saved_path = get_setting(path) if not saved_path: saved_path = default set_setting(path, saved_path) content_path = filetools.join(get_library_path(), saved_path) if not filetools.exists(content_path): logger.debug("Creating %s: %s" % (path, content_path)) if filetools.mkdir(content_path) and get_setting( "library_set_content") == True: xbmc_library.set_content(default) elif get_setting("library_ask_set_content") == 2: xbmc_library.set_content(default) try: from core import scrapertools # Buscamos el archivo addon.xml del skin activo skindir = filetools.join(xbmc.translatePath("special://home"), 'addons', xbmc.getSkinDir(), 'addon.xml') # Extraemos el nombre de la carpeta de resolución por defecto folder = "" data = filetools.read(skindir) res = scrapertools.find_multiple_matches(data, '(<res .*?>)') for r in res: if 'default="true"' in r: folder = scrapertools.find_single_match(r, 'folder="([^"]+)"') break # Comprobamos si existe en pelisalacarta y sino es así, la creamos default = filetools.join(get_runtime_path(), 'resources', 'skins', 'Default') if folder and not filetools.exists(filetools.join(default, folder)): filetools.mkdir(filetools.join(default, folder)) # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente if folder and folder != '720p': for root, folders, files in filetools.walk( filetools.join(default, '720p')): for f in files: if not filetools.exists(filetools.join(default, folder, f)) or \ (filetools.getsize(filetools.join(default, folder, f)) != filetools.getsize(filetools.join(default, '720p', f))): filetools.copy(filetools.join(default, '720p', f), filetools.join(default, folder, f), True) except: import traceback logger.error("Al comprobar o crear la carpeta de resolución") logger.error(traceback.format_exc())