def get_versions_from_repo(urls=[], xml_repo='addons.xml'): ''' Devuelve los números de versiones de los addons y repos incluidos en el Alfa Repo, así como la url desde donde se ha descargado ''' from core import httptools from core import filetools versiones = {} if not urls: url_base = [ 'https://github.com/alfa-addon/alfa-repo/raw/master/', 'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/' ] elif isinstance(urls, (list, tuple)): url_base = urls else: url_base = [urls] for url in url_base: response = httptools.downloadpage(url + xml_repo, timeout=5, ignore_response_code=True, alfa_s=True) if response.code != 200: continue try: import xmltodict xml = xmltodict.parse(response.data) for addon in xml["addons"]["addon"]: versiones[addon["@id"]] = addon["@version"] versiones['url'] = url response = httptools.downloadpage(url + xml_repo + '.md5', timeout=5, ignore_response_code=True, alfa_s=True) if response.code == 200 and response.data: versiones['repository.alfa-addon.md5'] = response.data for f in sorted(filetools.listdir("special://userdata/Database"), reverse=True): path_f = filetools.join("special://userdata/Database", f) if filetools.isfile(path_f) and f.lower().startswith( 'addons') and f.lower().endswith('.db'): versiones['addons_db'] = path_f break versiones = filetools.decode(versiones) break except: import traceback from platformcode import logger logger.error("Unable to download repo xml: %s" % versiones) versiones = {} logger.error(traceback.format_exc()) else: from platformcode import logger logger.error("Unable to download repo xml: %s, %s" % (xml_repo, url_base)) return versiones
def abandomoviez_search(item): logger.info() # Comprueba si es una búsqueda de cero o viene de la opción Siguiente if item.page != "": data = httptools.downloadpage(item.page).data else: #titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1') titulo = decode(item.contentTitle) post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1', 'anioin': item.year, 'anioout': item.year, 'orderby': '1'}) url = "http://www.abandomoviez.net/db/busca_titulo.php?busco2=%s" %item.contentTitle item.prefix = "db/" data = httptools.downloadpage(url, post=post).data if "No hemos encontrado ninguna" in data: url = "http://www.abandomoviez.net/indie/busca_titulo.php?busco2=%s" %item.contentTitle item.prefix = "indie/" data = httptools.downloadpage(url, post=post).data if not PY3: data = data.decode("iso-8859-1").encode('utf-8') itemlist = [] patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \ '.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)' matches = scrapertools.find_multiple_matches(data, patron) # Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados if len(matches) == 1: item.url = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, matches[0][1]) item.thumbnail = matches[0][0] itemlist = search_links_abando(item) elif len(matches) > 1: for scrapedthumbnail, scrapedurl, scrapedtitle in matches: scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl) scrapedtitle = scrapertools.htmlclean(scrapedtitle) itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando", url=scrapedurl, thumbnail=scrapedthumbnail, text_color="white")) next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente') if next_page != "": next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page) itemlist.append(item.clone(title=config.get_localized_string(70502), action="abandomoviez_search", page=next_page, thumbnail="", text_color="")) if not itemlist: itemlist.append(item.clone(title=config.get_localized_string(70501), action="", thumbnail="", text_color="")) if keyboard: if item.contextual: title = "[COLOR green]%s[/COLOR]" else: title = "%s" itemlist.append(item.clone(title=title % config.get_localized_string(70511), action="manual_search", thumbnail="", text_color="green", extra="abandomoviez")) return itemlist
def youtube_search(item): logger.info() itemlist = [] titulo = item.contentTitle if item.extra != "youtube": titulo += " trailer" # Comprueba si es una búsqueda de cero o viene de la opción Siguiente if item.page != "": data = httptools.downloadpage(item.page).data else: titulo = urllib.quote(titulo) titulo = titulo.replace("%20", "+") data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo).data patron = 'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?' patron += 'text":"([^"]+).*?' patron += 'simpleText":"[^"]+.*?simpleText":"([^"]+).*?' patron += 'url":"([^"]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches: #scrapedtitle = scrapedtitle.decode('utf8').encode('utf8') scrapedtitle = decode(scrapedtitle) scrapedtitle = scrapedtitle + " (" + scrapedduration + ")" if item.contextual: scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle url = urlparse.urljoin('https://www.youtube.com/', scrapedurl) itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url, thumbnail=scrapedthumbnail, text_color="white")) next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">' 'Siguiente') if next_page != "": next_page = urlparse.urljoin("https://www.youtube.com", next_page) itemlist.append(item.clone(title=config.get_localized_string(70502), action="youtube_search", extra="youtube", page=next_page, thumbnail="", text_color="")) if not itemlist: itemlist.append(item.clone(title=config.get_localized_string(70501) % titulo, action="", thumbnail="", text_color="")) if keyboard: if item.contextual: title = "[COLOR green]%s[/COLOR]" else: title = "%s" itemlist.append(item.clone(title=title % config.get_localized_string(70510), action="manual_search", text_color="green", thumbnail="", extra="youtube")) return itemlist
def get_versions_from_repo(urls=[], xml_repo='addons.xml'): ''' Devuelve los números de versiones de los addons y repos incluidos en el Alfa Repo, así como la url desde donde se ha descargado ''' from core import httptools from core.filetools import decode versiones = {} if not urls: url_base = [ 'https://github.com/alfa-addon/alfa-repo/raw/master/', 'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/' ] elif isinstance(urls, (list, tuple)): url_base = urls else: url_base = [urls] for url in url_base: response = httptools.downloadpage(url + xml_repo, timeout=5, ignore_response_code=True, alfa_s=True) if response.code != 200: continue try: import xmltodict xml = xmltodict.parse(response.data) for addon in xml["addons"]["addon"]: versiones[addon["@id"]] = addon["@version"] versiones['url'] = url versiones = decode(versiones) break except: import traceback from platformcode import logger logger.error("Unable to download repo xml: %s" % versiones) versiones = {} logger.error(traceback.format_exc()) else: from platformcode import logger logger.error("Unable to download repo xml: %s, %s" % (xml_repo, url_base)) return versiones
def download_from_url(url, item): logger.info("pelisalacarta.channels.descargas download_from_url - Intentando descargar: %s" % (url)) if url.lower().endswith(".m3u8") or url.lower().startswith("rtmp"): save_server_statistics(item.server, 0, False) return {"downloadStatus": STATUS_CODES.error} # Obtenemos la ruta de descarga y el nombre del archivo download_path = filetools.dirname(filetools.join(config.get_setting("downloadpath"), item.downloadFilename)) file_name = filetools.basename(filetools.join(config.get_setting("downloadpath"), item.downloadFilename)) # Creamos la carpeta si no existe if not filetools.exists(download_path): filetools.mkdir(download_path) # Mostramos el progreso progreso = platformtools.dialog_progress("Descargas", "Iniciando descarga...") # Lanzamos la descarga d = Downloader(url, filetools.encode(download_path), filetools.encode(file_name)) d.start() # Monitorizamos la descarga hasta que se termine o se cancele while d.state == d.states.downloading and not progreso.iscanceled(): time.sleep(0.1) line1 = "%s" % (filetools.decode(d.filename)) line2 = "%.2f%% - %.2f %s de %.2f %s a %.2f %s/s (%d/%d)" % ( d.progress, d.downloaded[1], d.downloaded[2], d.size[1], d.size[2], d.speed[1], d.speed[2], d.connections[0], d.connections[1]) line3 = "Tiempo restante: %s" % (d.remaining_time) progreso.update(int(d.progress), line1, line2, line3) # Descarga detenida. Obtenemos el estado: # Se ha producido un error en la descarga if d.state == d.states.error: logger.info("pelisalacarta.channels.descargas download_video - Error al intentar descargar %s" % (url)) d.stop() progreso.close() status = STATUS_CODES.error # Aun está descargando (se ha hecho click en cancelar) elif d.state == d.states.downloading: logger.info("pelisalacarta.channels.descargas download_video - Descarga detenida") d.stop() progreso.close() status = STATUS_CODES.canceled # La descarga ha finalizado elif d.state == d.states.completed: logger.info("pelisalacarta.channels.descargas download_video - Descargado correctamente") progreso.close() status = STATUS_CODES.completed if item.downloadSize and item.downloadSize != d.size[0]: status = STATUS_CODES.error save_server_statistics(item.server, d.speed[0], d.state != d.states.error) if progreso.iscanceled(): status = STATUS_CODES.canceled dir = os.path.dirname(item.downloadFilename) file = filetools.join(dir, filetools.decode(d.filename)) if status == STATUS_CODES.completed: move_to_libray(item.clone(downloadFilename = file)) return {"downloadUrl": d.download_url, "downloadStatus": status, "downloadSize": d.size[0], "downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file}
def download_from_url(url, item): logger.info("Intentando descargar: %s" % (url)) if url.lower().endswith(".m3u8") or url.lower().startswith("rtmp"): save_server_statistics(item.server, 0, False) return {"downloadStatus": STATUS_CODES.error} # Obtenemos la ruta de descarga y el nombre del archivo download_path = filetools.dirname( filetools.join(DOWNLOAD_PATH, item.downloadFilename)) file_name = filetools.basename( filetools.join(DOWNLOAD_PATH, item.downloadFilename)) # Creamos la carpeta si no existe if not filetools.exists(download_path): filetools.mkdir(download_path) # Mostramos el progreso progreso = platformtools.dialog_progress("Descargas", "Iniciando descarga...") # Lanzamos la descarga d = Downloader(url, download_path, file_name) d.start() # Monitorizamos la descarga hasta que se termine o se cancele while d.state == d.states.downloading and not progreso.iscanceled(): time.sleep(0.1) line1 = "%s" % (filetools.decode(d.filename)) line2 = "%.2f%% - %.2f %s de %.2f %s a %.2f %s/s (%d/%d)" % ( d.progress, d.downloaded[1], d.downloaded[2], d.size[1], d.size[2], d.speed[1], d.speed[2], d.connections[0], d.connections[1]) line3 = "Tiempo restante: %s" % (d.remaining_time) progreso.update(int(d.progress), line1, line2, line3) # Descarga detenida. Obtenemos el estado: # Se ha producido un error en la descarga if d.state == d.states.error: logger.info("Error al intentar descargar %s" % (url)) d.stop() progreso.close() status = STATUS_CODES.error # Aun está descargando (se ha hecho click en cancelar) elif d.state == d.states.downloading: logger.info("Descarga detenida") d.stop() progreso.close() status = STATUS_CODES.canceled # La descarga ha finalizado elif d.state == d.states.completed: logger.info("Descargado correctamente") progreso.close() status = STATUS_CODES.completed if item.downloadSize and item.downloadSize != d.size[0]: status = STATUS_CODES.error save_server_statistics(item.server, d.speed[0], d.state != d.states.error) if progreso.iscanceled(): status = STATUS_CODES.canceled dir = os.path.dirname(item.downloadFilename) file = filetools.join(dir, d.filename) if status == STATUS_CODES.completed: move_to_libray(item.clone(downloadFilename=file)) return { "downloadUrl": d.download_url, "downloadStatus": status, "downloadSize": d.size[0], "downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file }
def get_videos_watched_on_kodi(item, value=1, list_videos=False): """ Obtiene la lista de videos vistos o no vistos en la libreria de Kodi @type item: item @param item: elemento a obtener @type value: int @param value: >0 para visto, 0 para no visto @type list_videos: bool @param list_videos: True: devuelve la lista obtenida en la query @type Return: bool si list_videos=False @param Return: True si list_videos=False y todos tiene el estado "value". Si list_videos=True, devuelve lista de vídeos """ logger.info() # logger.debug("item:\n" + item.tostring('\n')) # Solo podemos obtener los vídeos como vistos en la BBDD de Kodi si la BBDD es local, # en caso de compartir BBDD esta funcionalidad no funcionara if config.get_setting("db_mode", "videolibrary"): return request_season = '' if item.contentSeason: request_season = ' and c12= %s' % item.contentSeason if item.video_path: path = item.video_path else: path = item.path if item.contentType == 'movie': video_path = filetools.join(config.get_videolibrary_path(), config.get_setting("folder_movies")) view = 'movie' else: video_path = filetools.join(config.get_videolibrary_path(), config.get_setting("folder_tvshows")) view = 'episode' tvshows_path = filetools.join(config.get_videolibrary_path(), config.get_setting("folder_tvshows")) item_path1 = "%" + path.replace("\\\\", "\\").replace(tvshows_path, "") if item_path1[:-1] != "\\": item_path1 += "\\" item_path2 = item_path1.replace("\\", "/") sql = 'select strFileName, playCount from %s_view where (strPath like "%s" or strPath like "%s")' % (view, item_path1, item_path2) nun_records, records = execute_sql_kodi(sql, silent=True) if not nun_records: if list_videos: return {} else: return False records = filetools.decode(records, trans_none=0) records = dict(records) if list_videos: return records for path, mark in list(records.items()): if mark != value: return False return True
def download_from_url(url, item): logger.info( "pelisalacarta.channels.descargas download_from_url - Intentando descargar: %s" % (url)) # Obtenemos la ruta de descarga y el nombre del archivo download_path = os.path.dirname( filetools.join(config.get_setting("downloadpath"), item.downloadFilename)) file_name = os.path.basename( filetools.join(config.get_setting("downloadpath"), item.downloadFilename)) # Creamos la carpeta si no existe if not filetools.exists(download_path): filetools.mkdir(download_path) # Mostramos el progreso progreso = platformtools.dialog_progress("Descargas", "Iniciando descarga...") # Lanzamos la descarga d = Downloader(url, filetools.encode(download_path), filetools.encode(file_name)) d.start() # Monitorizamos la descarga hasta que se termine o se cancele while d.state == d.states.downloading and not progreso.iscanceled(): time.sleep(0.1) line1 = "%s" % (filetools.decode(d.filename)) line2 = "%.2f%% - %.2f %s de %.2f %s a %.2f %s/s (%d/%d)" % ( d.progress, d.downloaded[1], d.downloaded[2], d.size[1], d.size[2], d.speed[1], d.speed[2], d.connections[0], d.connections[1]) line3 = "Tiempo restante: %s" % (d.remaining_time) progreso.update(int(d.progress), line1, line2, line3) # Descarga detenida. Obtenemos el estado: # Se ha producido un error en la descarga if d.state == d.states.error: logger.info( "pelisalacarta.channels.descargas download_video - Error al intentar descargar %s" % (url)) d.stop() progreso.close() status = 3 # Aun está descargando (se ha hecho click en cancelar) elif d.state == d.states.downloading: logger.info( "pelisalacarta.channels.descargas download_video - Descarga detenida" ) d.stop() progreso.close() status = 1 # La descarga ha finalizado elif d.state == d.states.completed: logger.info( "pelisalacarta.channels.descargas download_video - Descargado correctamente" ) progreso.close() status = 2 if item.downloadSize and item.downloadSize != d.size[0]: status = 3 dir = os.path.dirname(item.downloadFilename) file = filetools.join(dir, filetools.decode(d.filename)) if status == 2: move_to_libray(item.clone(downloadFilename=file)) return { "downloadUrl": d.download_url, "downloadStatus": status, "downloadSize": d.size[0], "downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file }