def channel_search(item): logger.debug(item) start = time.time() searching = list() searching_titles = list() results = list() valid = list() ch_list = dict() mode = item.mode if item.infoLabels['tvshowtitle']: item.text = item.infoLabels['tvshowtitle'].split(' - ')[0] item.title = item.text elif item.infoLabels['title']: item.text = item.infoLabels['title'].split(' - ')[0] item.title = item.text temp_search_file = config.get_temp_file('temp-search') if filetools.isfile(temp_search_file): itemlist = [] f = filetools.read(temp_search_file) if f.startswith(item.text): for it in f.split(','): if it and it != item.text: itemlist.append(Item().fromurl(it)) return itemlist else: filetools.remove(temp_search_file) searched_id = item.infoLabels['tmdb_id'] channel_list, channel_titles = get_channels(item) searching += channel_list searching_titles += channel_titles cnt = 0 progress = platformtools.dialog_progress( config.get_localized_string(30993) % item.title, config.get_localized_string(70744) % len(channel_list) + '\n' + ', '.join(searching_titles)) config.set_setting('tmdb_active', False) search_action_list = [] module_dict = {} for ch in channel_list: try: module = __import__('channels.%s' % ch, fromlist=["channels.%s" % ch]) mainlist = getattr(module, 'mainlist')(Item(channel=ch, global_search=True)) module_dict[ch] = module search_action_list.extend([ elem for elem in mainlist if elem.action == "search" and ( mode == 'all' or elem.contentType in [mode, 'undefined']) ]) if progress.iscanceled(): return [] except: import traceback logger.error('error importing/getting search items of ' + ch) logger.error(traceback.format_exc()) total_search_actions = len(search_action_list) with futures.ThreadPoolExecutor(max_workers=set_workers()) as executor: c_results = [] for search_action in search_action_list: c_results.append( executor.submit(get_channel_results, item, module_dict, search_action)) if progress.iscanceled(): break for res in futures.as_completed(c_results): search_action = res.result()[0] channel = search_action.channel if res.result()[1]: if channel not in ch_list: ch_list[channel] = [] ch_list[channel].extend(res.result()[1]) if res.result()[2]: valid.extend(res.result()[2]) if progress.iscanceled(): break search_action_list.remove(search_action) # if no action of this channel remains for it in search_action_list: if it.channel == channel: break else: cnt += 1 searching_titles.remove( searching_titles[searching.index(channel)]) searching.remove(channel) progress.update( old_div(((total_search_actions - len(search_action_list)) * 100), total_search_actions), config.get_localized_string(70744) % str(len(channel_list) - cnt) + '\n' + ', '.join(searching_titles)) progress.close() cnt = 0 progress = platformtools.dialog_progress( config.get_localized_string(30993) % item.title, config.get_localized_string(60295) + '\n' + config.get_localized_string(60293)) config.set_setting('tmdb_active', True) # res_count = 0 for key, value in ch_list.items(): ch_name = channel_titles[channel_list.index(key)] grouped = list() cnt += 1 progress.update(old_div((cnt * 100), len(ch_list)), config.get_localized_string(60295)) for it in value: if it.channel == item.channel: it.channel = key if it in valid: continue if mode == 'all' or (it.contentType and mode == it.contentType): if config.get_setting('result_mode') != 0: if config.get_localized_string(30992) not in it.title: it.title += typo(ch_name, '_ [] color kod bold') results.append(it) else: grouped.append(it) elif (mode == 'movie' and it.contentTitle) or (mode == 'tvshow' and (it.contentSerieName or it.show)): grouped.append(it) else: continue if not grouped: continue # to_temp[key] = grouped if config.get_setting('result_mode') == 0: if not config.get_setting('unify'): title = typo(ch_name, 'bold') + typo(str(len(grouped)), '_ [] color kod bold') else: title = typo( '%s %s' % (len(grouped), config.get_localized_string(70695)), 'bold') # res_count += len(grouped) plot = '' for it in grouped: plot += it.title + '\n' ch_thumb = channeltools.get_channel_parameters(key)['thumbnail'] results.append( Item(channel='search', title=title, action='get_from_temp', thumbnail=ch_thumb, itemlist=[ris.tourl() for ris in grouped], plot=plot, page=1)) progress.close() # "All Together" and movie mode -> search servers if config.get_setting('result_mode') == 1 and mode == 'movie': progress = platformtools.dialog_progress( config.get_localized_string(30993) % item.title, config.get_localized_string(60683)) valid_servers = [] with futures.ThreadPoolExecutor(max_workers=set_workers()) as executor: c_results = [ executor.submit(get_servers, v, module_dict) for v in valid ] completed = 0 for res in futures.as_completed(c_results): if progress.iscanceled(): break if res.result(): completed += 1 valid_servers.extend(res.result()) progress.update(old_div(completed * 100, len(valid))) valid = valid_servers progress.close() # send_to_temp(to_temp) results = sorted(results, key=lambda it: it.title) results_statistic = config.get_localized_string(59972) % ( item.title, time.time() - start) if mode == 'all': results.insert( 0, Item(title=typo(results_statistic, 'color kod bold'), thumbnail=get_thumb('search.png'))) else: if not valid: valid.append( Item(title=config.get_localized_string(60347), thumbnail=get_thumb('nofolder.png'))) valid.insert( 0, Item(title=typo(results_statistic, 'color kod bold'), thumbnail=get_thumb('search.png'))) results.insert( 0, Item(title=typo(config.get_localized_string(30025), 'color kod bold'), thumbnail=get_thumb('search.png'))) # logger.debug(results_statistic) itlist = valid + results writelist = item.text for it in itlist: writelist += ',' + it.tourl() filetools.write(temp_search_file, writelist) return itlist
def wait_for_update_trakt(): logger.debug() t = Thread(update_all) t.setDaemon(True) t.start() t.is_alive()
def test_video_exists(page_url): logger.debug("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) if data.code == 404: return False, config.get_localized_string(70449) % "CinemaUpload" return True, ""
def start(itemlist, item): ''' Metodo principal desde donde se reproduce automaticamente los enlaces - En caso la opcion de personalizar activa utilizara las opciones definidas por el usuario. - En caso contrario intentara reproducir cualquier enlace que cuente con el idioma preferido. :param itemlist: list (lista de items listos para reproducir, o sea con action='play') :param item: item (el item principal del canal) :return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio ''' logger.info() if not config.is_xbmc(): #platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') return itemlist global autoplay_node if not autoplay_node: # Obtiene el nodo AUTOPLAY desde el json autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') if not item.channel in autoplay_node: return itemlist # Agrega servidores y calidades que no estaban listados a autoplay_node new_options = check_value(item.channel, itemlist) # Obtiene el nodo del canal desde autoplay_node channel_node = autoplay_node.get(item.channel, {}) # Obtiene los ajustes des autoplay para este canal settings_node = channel_node.get('settings', {}) if settings_node['active']: url_list_valid = [] autoplay_list = [] autoplay_b = [] favorite_servers = [] favorite_quality = [] # Guarda el valor actual de "Accion y Player Mode" en preferencias user_config_setting_action = config.get_setting("default_action") user_config_setting_player = config.get_setting("player_mode") # Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive) if user_config_setting_action != 2: config.set_setting("default_action", 2) if user_config_setting_player != 0: config.set_setting("player_mode", 0) # Informa que AutoPlay esta activo #platformtools.dialog_notification('AutoPlay Activo', '', sound=False) # Prioridades a la hora de ordenar itemlist: # 0: Servidores y calidades # 1: Calidades y servidores # 2: Solo servidores # 3: Solo calidades # 4: No ordenar if settings_node['custom_servers'] and settings_node['custom_quality']: priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores elif settings_node['custom_servers']: priority = 2 # Solo servidores elif settings_node['custom_quality']: priority = 3 # Solo calidades else: priority = 4 # No ordenar # Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay server_list = channel_node.get('servers', []) for server in server_list: server = server.lower() quality_list = channel_node.get('quality', []) # Si no se definen calidades la se asigna default como calidad unica if len(quality_list) == 0: quality_list =['default'] # Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload', # 'streamcloud'] for num in range(1, 4): favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]].lower()) favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]]) # Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay for item in itemlist: autoplay_elem = dict() b_dict = dict() # Comprobamos q se trata de un item de video if 'server' not in item: continue # Agrega la opcion configurar AutoPlay al menu contextual if 'context' not in item: item.context = list() if not filter(lambda x: x['action'] == 'autoplay_config', context): item.context.append({"title": "Configurar AutoPlay", "action": "autoplay_config", "channel": "autoplay", "from_channel": item.channel}) # Si no tiene calidad definida le asigna calidad 'default' if item.quality == '': item.quality = 'default' # Se crea la lista para configuracion personalizada if priority < 2: # 0: Servidores y calidades o 1: Calidades y servidores # si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida, # descartamos el item if item.server.lower() not in favorite_servers or item.quality not in favorite_quality \ or item.url in url_list_valid: item.type_b = True b_dict['videoitem']= item autoplay_b.append(b_dict) continue autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) elif priority == 2: # Solo servidores # si el servidor no se encuentra en la lista de favoritos o la url esta repetida, # descartamos el item if item.server.lower() not in favorite_servers or item.url in url_list_valid: item.type_b = True b_dict['videoitem'] = item autoplay_b.append(b_dict) continue autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) elif priority == 3: # Solo calidades # si la calidad no se encuentra en la lista de favoritos o la url esta repetida, # descartamos el item if item.quality not in favorite_quality or item.url in url_list_valid: item.type_b = True b_dict['videoitem'] = item autoplay_b.append(b_dict) continue autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) else: # No ordenar # si la url esta repetida, descartamos el item if item.url in url_list_valid: continue # Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list url_list_valid.append(item.url) item.plan_b=True autoplay_elem['videoitem'] = item # autoplay_elem['server'] = item.server # autoplay_elem['quality'] = item.quality autoplay_list.append(autoplay_elem) # Ordenamos segun la prioridad if priority == 0: # Servidores y calidades autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality'])) elif priority == 1: # Calidades y servidores autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server'])) elif priority == 2: # Solo servidores autoplay_list.sort(key=lambda orden: orden['indice_server']) elif priority == 3: # Solo calidades autoplay_list.sort(key=lambda orden: orden['indice_quality']) # Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final plan_b = settings_node['plan_b'] ready = False text_b = '' if plan_b: autoplay_list.extend(autoplay_b) # Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno # funcional o fallen todos if autoplay_list or (plan_b and autoplay_b): played = False max_intentos = 5 max_intentos_servers = {} # Si se esta reproduciendo algo detiene la reproduccion if platformtools.is_playing(): platformtools.stop_video() for autoplay_elem in autoplay_list: play_item = Item # Si no es un elemento favorito si agrega el texto plan b if autoplay_elem['videoitem'].type_b: text_b = '(Plan B)' if not platformtools.is_playing() and not played: videoitem = autoplay_elem['videoitem'] logger.debug('videoitem %s' % videoitem) if videoitem.server.lower() not in max_intentos_servers: max_intentos_servers[videoitem.server.lower()] = max_intentos # Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente if max_intentos_servers[videoitem.server.lower()] == 0: continue lang = " " if hasattr(videoitem, 'language') and videoitem.language != "": lang = " '%s' " % videoitem.language platformtools.dialog_notification("AutoPlay %s" %text_b, "%s%s%s" % ( videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False) # TODO videoitem.server es el id del server, pero podria no ser el nombre!!! # Intenta reproducir los enlaces # Si el canal tiene metodo play propio lo utiliza channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel]) if hasattr(channel, 'play'): resolved_item = getattr(channel, 'play')(videoitem) if len(resolved_item) > 0: if isinstance(resolved_item[0], list): videoitem.video_urls = resolved_item else: videoitem = resolved_item[0] # Si no directamente reproduce y marca como visto # Verifica si el item viene de la videoteca try: if item.contentChannel =='videolibrary': # Marca como visto from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_auto_as_watched(item) # Rellena el video con los datos del item principal y reproduce play_item = item.clone(url=videoitem) platformtools.play_video(play_item.url, autoplay=True) else: # Si no viene de la videoteca solo reproduce platformtools.play_video(videoitem, autoplay=True) except: pass try: if platformtools.is_playing(): played = True break except: logger.debug(str(len(autoplay_list))) # Si hemos llegado hasta aqui es por q no se ha podido reproducir max_intentos_servers[videoitem.server.lower()] -= 1 # Si se han alcanzado el numero maximo de intentos de este servidor # preguntar si queremos seguir probando o lo ignoramos if max_intentos_servers[videoitem.server.lower()] == 0: text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper() if not platformtools.dialog_yesno("AutoPlay", text, "¿Desea ignorar todos los enlaces de este servidor?"): max_intentos_servers[videoitem.server.lower()] = max_intentos # Si no quedan elementos en la lista se informa if autoplay_elem == autoplay_list[-1]: platformtools.dialog_notification('AutoPlay', 'No hubo enlaces funcionales') else: platformtools.dialog_notification('AutoPlay No Fue Posible', 'No Hubo Coincidencias') if new_options: platformtools.dialog_notification("AutoPlay", "Nueva Calidad/Servidor disponible en la " "configuracion", sound=False) # Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias if user_config_setting_action != 2: config.set_setting("default_action", user_config_setting_action) if user_config_setting_player != 0: config.set_setting("player_mode", user_config_setting_player) # devuelve la lista de enlaces para la eleccion manual return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def int_bckup_method(): global data, headers page_url = scrapertools.find_single_match( data, r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""") if not page_url: page_url = scrapertools.find_single_match( data, r"""<form action=['"]([^'"]+)['"]""") if page_url.startswith('/'): page_url = 'https://wstream.video' + page_url if page_url: data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, post={ 'g-recaptcha-response': captcha }, verify=False).data def getSources(data): possibileSources = scrapertools.find_multiple_matches( data, r'sources:\s*(\[[^\]]+\])') for data in possibileSources: try: data = re.sub('([A-z]+):(?!/)', '"\\1":', data) keys = json.loads(data) for key in keys: if 'label' in key: if not 'type' in key: key['type'] = 'mp4' if not 'src' in key and 'file' in key: key['src'] = key['file'] video_urls.append([ '%s [%s]' % (key['type'].replace('video/', ''), key['label']), key['src'].replace('https', 'http') + '|' + _headers ]) elif type(key) != dict: filetype = key.split('.')[-1] if '?' in filetype: filetype = filetype.split('?')[0] video_urls.append([ filetype, key.replace('https', 'http') + '|' + _headers ]) else: if not 'src' in key and 'file' in key: key['src'] = key['file'] if key['src'].split('.')[-1] == 'mpd': pass video_urls.append([ key['src'].split('.')[-1], key['src'].replace('https', 'http') + '|' + _headers ]) except: pass logger.debug("[Wstream] url=" + page_url) video_urls = [] global data, real_url, headers sitekey = scrapertools.find_multiple_matches( data, """data-sitekey=['"] *([^"']+)""") if sitekey: sitekey = sitekey[-1] captcha = platformtools.show_recaptcha( sitekey, page_url.replace('116.202.226.34', headers[1][1]).replace( 'nored.icu', headers[1][1])) if sitekey else '' possibleParam = scrapertools.find_multiple_matches( data, r"""<input.*?(?:name=["']([^'"]+).*?value=["']([^'"]*)['"]>|>)""") if possibleParam and possibleParam[0][0]: post = {param[0]: param[1] for param in possibleParam if param[0]} if captcha: post['g-recaptcha-response'] = captcha if post: data = httptools.downloadpage(real_url, headers=headers, post=post, follow_redirects=True, verify=False).data elif captcha: int_bckup_method() elif captcha or not sitekey: int_bckup_method() else: platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(707434)) return [] headers.append([ 'Referer', real_url.replace('116.202.226.34', headers[1][1]).replace('nored.icu', headers[1][1]) ]) _headers = urllib.urlencode(dict(headers)) post_data = scrapertools.find_single_match( data, r"<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>" ) if post_data != "": from lib import jsunpack data = jsunpack.unpack(post_data) getSources(data) else: getSources(data) if not video_urls: media_urls = scrapertools.find_multiple_matches( data, r'(http[^\s]*?\.(?:mp4|m3u8))') for media_url in media_urls: video_urls.append([ media_url.split('.')[-1] + " [Wstream] ", media_url + '|' + _headers ]) video_urls.sort(key=lambda x: x[0]) return video_urls
def registerOrLogin(page_url): if config.get_setting('username', server='hdmario') and config.get_setting( 'password', server='hdmario'): if login(): return True if platformtools.dialog_yesno( 'HDmario', 'Questo server necessita di un account, ne hai già uno oppure vuoi tentare una registrazione automatica?', yeslabel='Accedi', nolabel='Tenta registrazione'): from specials import setting from core.item import Item user_pre = config.get_setting('username', server='hdmario') password_pre = config.get_setting('password', server='hdmario') setting.server_config(Item(config='hdmario')) user_post = config.get_setting('username', server='hdmario') password_post = config.get_setting('password', server='hdmario') if user_pre != user_post or password_pre != password_post: return registerOrLogin(page_url) else: return [] else: import random import string logger.debug('Registrazione automatica in corso') mailbox = Gmailnator() randPsw = ''.join( random.choice(string.ascii_letters + string.digits) for i in range(10)) captcha = httptools.downloadpage(baseUrl + '/captchaInfo').json logger.debug('email: ' + mailbox.address) logger.debug('pass: '******'/register/', email=True, password=True, email_default=mailbox.address, password_default=randPsw, captcha_img=captcha['captchaUrl']) if not reg: return False regPost = httptools.downloadpage(baseUrl + '/register/', post={ 'email': reg['email'], 'email_confirmation': reg['email'], 'password': reg['password'], 'password_confirmation': reg['password'], 'captchaUuid': captcha['captchaUuid'], 'captcha': reg['captcha'] }) if '/register' in regPost.url: error = scrapertools.htmlclean( scrapertools.find_single_match( regPost.data, 'Impossibile proseguire.*?</div>')) error = scrapertools.unescape( scrapertools.re.sub('\n\s+', ' ', error)) platformtools.dialog_ok('HDmario', error) return False if reg['email'] == mailbox.address: if "L'indirizzo email è già stato utilizzato" in regPost.data: # httptools.downloadpage(baseUrl + '/forgotPassword', post={'email': reg['email']}) platformtools.dialog_ok('HDmario', 'Indirizzo mail già utilizzato') return False mail = mailbox.waitForMail() if mail: checkUrl = scrapertools.find_single_match( mail.body, 'href="([^"]+)">Premi qui').replace(r'\/', '/') logger.debug('CheckURL: ' + checkUrl) httptools.downloadpage(checkUrl) config.set_setting('username', mailbox.address, server='hdmario') config.set_setting('password', randPsw, server='hdmario') platformtools.dialog_ok( 'HDmario', 'Registrato automaticamente con queste credenziali:\nemail:' + mailbox.address + '\npass: '******'HDmario', 'Impossibile registrarsi automaticamente') return False else: platformtools.dialog_ok( 'HDmario', 'Hai modificato la mail quindi KoD non sarà in grado di effettuare la verifica in autonomia, apri la casella ' + reg['email'] + ' e clicca sul link. Premi ok quando fatto') logger.debug('Registrazione completata') return True
def findvideos(item): logger.info() itemlist = [] # Descarga la pagina data = httptools.downloadpage(item.url).data item.plot = scrapertools.find_single_match( data, '<div class="post-entry" style="height:300px;">(.*?)</div>') item.plot = scrapertools.htmlclean(item.plot).strip() item.contentPlot = item.plot al_url_fa = scrapertools.find_single_match( data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"' ) if al_url_fa == "": al_url_fa = scrapertools.find_single_match( data, 'location\.href.*?=.*?"http:\/\/divxatope1.com/(.*?)"') if al_url_fa != "": al_url_fa = "http://www.divxatope1.com/" + al_url_fa logger.info("torrent=" + al_url_fa) itemlist.append( Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title, url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False, parentContent=item)) patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)' patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?' patron += '<\/div[^<]+<div class="box6">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) itemlist_ver = [] itemlist_descargar = [] for servername, idioma, calidad, scrapedurl, comentarios in matches: title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")" servername = servername.replace("uploaded", "uploadedto").replace( "1fichier", "onefichier") if comentarios.strip() != "": title = title + " (" + comentarios.strip() + ")" url = urlparse.urljoin(item.url, scrapedurl) mostrar_server = servertools.is_server_enabled(servername) if mostrar_server: thumbnail = servertools.guess_server_thumbnail(title) plot = "" logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") action = "play" if "partes" in title: action = "extract_url" new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, parentContent=item, server=servername, quality=calidad) if comentarios.startswith("Ver en"): itemlist_ver.append(new_item) else: itemlist_descargar.append(new_item) itemlist.extend(itemlist_ver) itemlist.extend(itemlist_descargar) return itemlist
def episodios(item): logger.info() itemlist = [] infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") logger.debug('data: %s' % data) pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: pattern = '<li><a href="([^"]+)">Last<\/a>' full_url = scrapertools.find_single_match(pagination, pattern) url, last_page = scrapertools.find_single_match( full_url, r'(.*?\/pg\/)(\d+)') list_pages = [] for x in range(1, int(last_page) + 1): list_pages.append("%s%s" % (url, x)) else: list_pages = [item.url] logger.debug('pattern: %s' % pattern) for index, page in enumerate(list_pages): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>' matches = re.compile(pattern, re.DOTALL).findall(data) for url, thumb, info in matches: if "<span" in info: # new style pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \ "(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \ "[\[]\s*(?P<quality>.*?)\s*[\]]</span>" r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % ( item.show, match["season"], str( match["episode"]).zfill(2), str( match["episode2"]).zfill(2), match["lang"], match["quality"]) else: multi = False title = "%s (%sx%s) [%s][%s]" % ( item.show, match["season"], str( match["episode"]).zfill(2), match["lang"], match["quality"]) else: # old style pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \ "(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?" r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] # logger.debug("data %s" % match) str_lang = "" if match["lang"] is not None: str_lang = "[%s]" % match["lang"] if match["season2"] and match["episode2"]: multi = True if match["season"] == match["season2"]: title = "%s (%sx%s-%s) %s[%s]" % ( item.show, match["season"], match["episode"], match["episode2"], str_lang, match["quality"]) else: title = "%s (%sx%s-%sx%s) %s[%s]" % ( item.show, match["season"], match["episode"], match["season2"], match["episode2"], str_lang, match["quality"]) else: title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, match["quality"]) multi = False season = match['season'] episode = match['episode'] itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels=infoLabels)) # order list tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if len(itemlist) > 1: itemlist = sorted( itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) return itemlist
def list_tvshows(item): logger.info() itemlist = [] dead_list = [] zombie_list = [] # Obtenemos todos los tvshow.nfo de la videoteca de SERIES recursivamente for raiz, subcarpetas, ficheros in filetools.walk( videolibrarytools.TVSHOWS_PATH): for f in ficheros: if f == "tvshow.nfo": tvshow_path = filetools.join(raiz, f) # logger.debug(tvshow_path) #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa try: if config.is_xbmc(): #Si es Kodi, lo hacemos from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_content_as_watched_on_alfa( tvshow_path) except: logger.error(traceback.format_exc()) head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path) if not item_tvshow: #Si no ha leído bien el .nfo, pasamos a la siguiente logger.error('.nfo erroneo en ' + str(tvshow_path)) continue if len(item_tvshow.library_urls) > 1: multicanal = True else: multicanal = False ## verifica la existencia de los canales, en caso de no existir el canal se pregunta si se quieren ## eliminar los enlaces de dicho canal for canal in item_tvshow.library_urls: canal = generictools.verify_channel(canal) try: channel_verify = __import__( 'channels.%s' % canal, fromlist=["channels.%s" % canal]) logger.debug('El canal %s parece correcto' % channel_verify) except: dead_item = Item( multicanal=multicanal, contentType='tvshow', dead=canal, path=raiz, nfo=tvshow_path, library_urls=item_tvshow.library_urls, infoLabels={'title': item_tvshow.contentTitle}) if canal not in dead_list and canal not in zombie_list: confirm = platformtools.dialog_yesno( 'Videoteca', 'Parece que el canal [COLOR red]%s[/COLOR] ya no existe.' % canal.upper(), 'Deseas eliminar los enlaces de este canal?') elif canal in zombie_list: confirm = False else: confirm = True if confirm: delete(dead_item) if canal not in dead_list: dead_list.append(canal) continue else: if canal not in zombie_list: zombie_list.append(canal) if len(dead_list) > 0: for canal in dead_list: if canal in item_tvshow.library_urls: del item_tvshow.library_urls[canal] ### continua la carga de los elementos de la videoteca try: #A veces da errores aleatorios, por no encontrar el .nfo. Probablemente problemas de timing item_tvshow.title = item_tvshow.contentTitle item_tvshow.path = raiz item_tvshow.nfo = tvshow_path # Menu contextual: Marcar como visto/no visto visto = item_tvshow.library_playcounts.get( item_tvshow.contentTitle, 0) item_tvshow.infoLabels["playcount"] = visto if visto > 0: texto_visto = config.get_localized_string(60020) contador = 0 else: texto_visto = config.get_localized_string(60021) contador = 1 except: logger.error('No encuentra: ' + str(tvshow_path)) logger.error(traceback.format_exc()) continue # Menu contextual: Buscar automáticamente nuevos episodios o no if item_tvshow.active and int(item_tvshow.active) > 0: texto_update = config.get_localized_string(60022) value = 0 item_tvshow.text_color = "green" else: texto_update = config.get_localized_string(60023) value = 1 item_tvshow.text_color = "0xFFDF7401" # Menu contextual: Eliminar serie/canal num_canales = len(item_tvshow.library_urls) if "downloads" in item_tvshow.library_urls: num_canales -= 1 if num_canales > 1: texto_eliminar = config.get_localized_string(60024) else: texto_eliminar = config.get_localized_string(60025) item_tvshow.context = [{ "title": texto_visto, "action": "mark_content_as_watched", "channel": "videolibrary", "playcount": contador }, { "title": texto_update, "action": "mark_tvshow_as_updatable", "channel": "videolibrary", "active": value }, { "title": texto_eliminar, "action": "delete", "channel": "videolibrary", "multicanal": multicanal }, { "title": config.get_localized_string(70269), "action": "update_tvshow", "channel": "videolibrary" }] # ,{"title": "Cambiar contenido (PENDIENTE)", # "action": "", # "channel": "videolibrary"}] # logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n')) ## verifica la existencia de los canales ## if len(item_tvshow.library_urls) > 0: itemlist.append(item_tvshow) if itemlist: itemlist = sorted(itemlist, key=lambda it: it.title.lower()) itemlist.append( Item(channel=item.channel, action="update_videolibrary", thumbnail=item.thumbnail, title=config.get_localized_string(60026), folder=False)) return itemlist
def listado(item): logger.info() itemlist = [] url_next_page = '' data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") #logger.debug(data) logger.debug('item.modo: %s' % item.modo) logger.debug('item.extra: %s' % item.extra) if item.modo != 'next' or item.modo == '': logger.debug('item.title: %s' % item.title) patron = '<ul class="' + item.extra + '">(.*?)</ul>' logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: fichas = data page_extra = item.extra patron = '<li><a href="([^"]+).*?' # url patron += 'title="([^"]+).*?' # titulo patron += '<img src="([^"]+)"[^>]+>.*?' # thumbnail patron += '<span>([^<]*)</span>' # calidad matches = re.compile(patron, re.DOTALL).findall(fichas) logger.debug('item.next_page: %s' % item.next_page) # Paginacion if item.next_page != 'b': if len(matches) > 30: url_next_page = item.url matches = matches[:30] next_page = 'b' modo = 'continue' else: matches = matches[30:] next_page = 'a' patron_next_page = '<a href="([^"]+)">Next<\/a>' matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) modo = 'continue' if len(matches_next_page) > 0: url_next_page = matches_next_page[0] modo = 'next' for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches: url = scrapedurl title = scrapedtitle thumbnail = scrapedthumbnail action = "findvideos" extra = "" year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') if "1.com/series" in url: action = "episodios" extra = "serie" title = scrapertools.find_single_match(title, '([^-]+)') title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "", 1).strip() else: title = title.replace("Descargar", "", 1).strip() if title.endswith("gratis"): title = title[:-7] show = title if item.extra != "buscar-list": title = title + ' ' + calidad context = "" context_title = scrapertools.find_single_match( url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/") if context_title: try: context = context_title[0].replace("descargar-", "").replace( "pelicula", "movie").replace("series", "tvshow") context_title = context_title[1].replace("-", " ") if re.search('\d{4}', context_title[-4:]): context_title = context_title[:-4] elif re.search('\(\d{4}\)', context_title[-6:]): context_title = context_title[:-6] except: context_title = show logger.debug('contxt title: %s' % context_title) logger.debug('year: %s' % year) logger.debug('context: %s' % context) if not 'array' in title: new_item = Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, extra=extra, show=context_title, contentTitle=context_title, contentType=context, context=["buscar_trailer"], infoLabels={'year': year}) if year: tmdb.set_infoLabels_item(new_item, seekTmdb=True) itemlist.append(new_item) if url_next_page: itemlist.append( Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, next_page=next_page, folder=True, text_color='yellow', text_bold=True, modo=modo, plot=extra, extra=page_extra)) return itemlist
def findvideos(item): logger.info() itemlist = [] ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace("1.com/","1.com/ver-online/") # item.url = item.url.replace("1.com/","1.com/descarga-directa/") item.url = item.url.replace("1.com/", "1.com/descarga-torrent/") # Descarga la página data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") title = scrapertools.find_single_match( data, "<h1><strong>([^<]+)</strong>[^<]+</h1>") title += scrapertools.find_single_match( data, "<h1><strong>[^<]+</strong>([^<]+)</h1>") caratula = scrapertools.find_single_match( data, '<div class="entry-left">.*?src="([^"]+)"') # <a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a> patron = 'openTorrent.*?"title=".*?" class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";' # escraped torrent url = scrapertools.find_single_match(data, patron) if url != "": itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title + " [torrent]", fulltitle=title, url=url, thumbnail=caratula, plot=item.plot, folder=False)) logger.debug("matar %s" % data) # escraped ver vídeos, descargar vídeos un link, múltiples liks data = data.replace("'", '"') data = data.replace( 'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=', "") data = data.replace( "http://tumejorserie.com/descargar/url_encript.php?link=", "") data = data.replace("$!", "#!") patron_descargar = '<div id="tab2"[^>]+>.*?</ul>' patron_ver = '<div id="tab3"[^>]+>.*?</ul>' match_ver = scrapertools.find_single_match(data, patron_ver) match_descargar = scrapertools.find_single_match(data, patron_descargar) patron = '<div class="box1"><img src="([^"]+)".*?' # logo patron += '<div class="box2">([^<]+)</div>' # servidor patron += '<div class="box3">([^<]+)</div>' # idioma patron += '<div class="box4">([^<]+)</div>' # calidad patron += '<div class="box5"><a href="([^"]+)".*?' # enlace patron += '<div class="box6">([^<]+)</div>' # titulo enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver) enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: servidor = servidor.replace("streamin", "streaminto") titulo = titulo + " [" + servidor + "]" mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") p = 1 for enlace in partes: parte_titulo = titulo + " (%s/%s)" % ( p, len(partes)) + " [" + servidor + "]" p += 1 mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass return itemlist
def search_trailers(item): logger.info() from core.tmdb import Tmdb import xbmcgui, xbmc tipo = 'movie' if item.contentType == 'movie' else 'tv' nombre = item.contentTitle if item.contentType == 'movie' else item.contentSerieName if item.infoLabels['tmdb_id']: tmdb_search = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda='es') else: anyo = item.infoLabels['year'] if item.infoLabels['year'] else '-' tmdb_search = Tmdb(texto_buscado=nombre, tipo=tipo, year=anyo, idioma_busqueda='es') opciones = [] resultados = tmdb_search.get_videos() for res in resultados: # ~ logger.debug(res) it = xbmcgui.ListItem(res['name'], '[%sp] (%s)' % (res['size'], res['language'])) if item.thumbnail: it.setArt({'thumb': item.thumbnail}) opciones.append(it) if len(resultados) == 0: platformtools.dialog_ok(nombre, 'No se encuentra ningún tráiler en TMDB') else: while not xbmc.Monitor().abortRequested(): # (while True) ret = xbmcgui.Dialog().select('Tráilers para %s' % nombre, opciones, useDetails=True) if ret == -1: break platformtools.dialog_notification(resultados[ret]['name'], 'Cargando tráiler ...', time=3000, sound=False) from core import servertools if 'youtube' in resultados[ret]['url']: video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( 'youtube', resultados[ret]['url']) else: video_urls = [] #TODO si no es youtube ... logger.debug(resultados[ret]) if len(video_urls) > 0: # ~ logger.debug(video_urls) xbmc.Player().play( video_urls[-1][1]) # el último es el de más calidad xbmc.sleep(1000) while not xbmc.Monitor().abortRequested() and xbmc.Player( ).isPlaying(): xbmc.sleep(1000) else: platformtools.dialog_notification( resultados[ret]['name'], 'No se puede reproducir el tráiler', time=3000, sound=False) if len(resultados) == 1: break # si sólo hay un vídeo no volver al diálogo de tráilers
def sub_menu(item): logger.debug() itemlist = [ Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70306), mode='movie', thumbnail=get_thumb("movie_genre.png")), Item(channel=item.channel, action='years_menu', title=config.get_localized_string(70742), mode='movie', thumbnail=get_thumb("movie_year.png")), Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70307), search_type='list', list_type='movie/popular', mode='movie', thumbnail=get_thumb("movie_popular.png")), Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70308), search_type='list', list_type='movie/top_rated', mode='movie', thumbnail=get_thumb("movie_top.png")), Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70309), search_type='list', list_type='movie/now_playing', mode='movie', thumbnail=get_thumb("movie_now_playing.png")), Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70310), mode='tvshow', thumbnail=get_thumb("tvshow_genre.png")), Item(channel=item.channel, action='years_menu', title=config.get_localized_string(70743), mode='tvshow', thumbnail=get_thumb("tvshow_year.png")), Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70311), search_type='list', list_type='tv/popular', mode='tvshow', thumbnail=get_thumb("popular.png")), Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70312), search_type='list', list_type='tv/on_the_air', mode='tvshow', thumbnail=get_thumb("tvshow_on_the_air.png")), Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70313), search_type='list', list_type='tv/top_rated', mode='tvshow', thumbnail=get_thumb("tvshow_top.png")), ] itemlist = set_context(itemlist) return itemlist
def get_info(itemlist): logger.debug() tmdb.set_infoLabels_itemlist(itemlist, True, forced=True) return itemlist
def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=True): """ Lee el contenido de un archivo y devuelve los datos @param path: ruta del fichero @type path: str @param linea_inicio: primera linea a leer del fichero @type linea_inicio: int positivo @param total_lineas: numero maximo de lineas a leer. Si es None o superior al total de lineas se leera el fichero hasta el final. @type total_lineas: int positivo @rtype: str @return: datos que contiene el fichero """ path = encode(path) try: if not isinstance(linea_inicio, int): try: linea_inicio = int(linea_inicio) except: logger.error('Read: ERROR de linea_inicio: %s' % str(linea_inicio)) linea_inicio = 0 if total_lineas != None and not isinstance(total_lineas, int): try: total_lineas = int(total_lineas) except: logger.error('Read: ERROR de total_lineas: %s' % str(total_lineas)) total_lineas = None if xbmc_vfs and vfs: if not exists(path): return False f = xbmcvfs.File(path, "rb") if linea_inicio > 0: if not isinstance(whence, int): try: whence = int(whence) except: return False f.seek(linea_inicio, whence) logger.debug('POSICIÓN de comienzo de lectura, tell(): %s' % f.seek(0, 1)) if total_lineas == None: total_lineas = 0 data = f.read(total_lineas) return "".join(data) elif path.lower().startswith("smb://"): f = samba.smb_open(path, "rb") else: f = open(path, "rb") data = [] for x, line in enumerate(f): if x < linea_inicio: continue if len(data) == total_lineas: break data.append(line) f.close() except: if not silent: logger.error("ERROR al leer el archivo: %s" % path) logger.error(traceback.format_exc()) return False else: if not PY3: return "".join(data) else: return b"".join(data)
def findvideos(item): from channels import autoplay logger.info() # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] list_canales = {} item_local = None # Desactiva autoplay autoplay.set_status(False) if not item.contentTitle or not item.strm_path: logger.debug("No se pueden buscar videos por falta de parametros") return [] #content_title = [c for c in item.contentTitle.strip().lower() if c not in ":*?<>|\/"] content_title = "".join(c for c in item.contentTitle.strip().lower() if c not in ":*?<>|\/") if item.contentType == 'movie': item.strm_path = filetools.join(videolibrarytools.MOVIES_PATH, item.strm_path) path_dir = filetools.dirname(item.strm_path) item.nfo = filetools.join(path_dir, filetools.basename(path_dir) + ".nfo") else: item.strm_path = filetools.join(videolibrarytools.TVSHOWS_PATH, item.strm_path) path_dir = filetools.dirname(item.strm_path) item.nfo = filetools.join(path_dir, 'tvshow.nfo') for fd in filetools.listdir(path_dir): if fd.endswith('.json'): contenido, nom_canal = fd[:-6].split('[') if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in \ list(list_canales.keys()): list_canales[nom_canal] = filetools.join(path_dir, fd) num_canales = len(list_canales) if 'downloads' in list_canales: json_path = list_canales['downloads'] item_json = Item().fromjson(filetools.read(json_path)) ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial try: if item_json: item_json, it, overwrite = generictools.redirect_clone_newpct1( item_json) except: logger.error(traceback.format_exc()) item_json.contentChannel = "local" # Soporte para rutas relativas en descargas if filetools.is_relative(item_json.url): item_json.url = filetools.join(videolibrarytools.VIDEOLIBRARY_PATH, item_json.url) del list_canales['downloads'] # Comprobar q el video no haya sido borrado if filetools.exists(item_json.url): item_local = item_json.clone(action='play') itemlist.append(item_local) else: num_canales -= 1 filtro_canal = '' if num_canales > 1 and config.get_setting("ask_channel", "videolibrary"): opciones = [ config.get_localized_string(70089) % k.capitalize() for k in list(list_canales.keys()) ] opciones.insert(0, config.get_localized_string(70083)) if item_local: opciones.append(item_local.title) from platformcode import platformtools index = platformtools.dialog_select(config.get_localized_string(30163), opciones) if index < 0: return [] elif item_local and index == len(opciones) - 1: filtro_canal = 'downloads' platformtools.play_video(item_local) elif index > 0: filtro_canal = opciones[index].replace( config.get_localized_string(70078), "").strip() itemlist = [] for nom_canal, json_path in list(list_canales.items()): if filtro_canal and filtro_canal != nom_canal.capitalize(): continue item_canal = Item() item_canal.channel = nom_canal ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial try: item_canal, it, overwrite = generictools.redirect_clone_newpct1( item_canal) except: logger.error(traceback.format_exc()) nom_canal = item_canal.channel # Importamos el canal de la parte seleccionada try: channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) except ImportError: exec("import channels." + nom_canal + " as channel") item_json = Item().fromjson(filetools.read(json_path)) ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial try: if item_json: item_json, it, overwrite = generictools.redirect_clone_newpct1( item_json) except: logger.error(traceback.format_exc()) list_servers = [] try: # FILTERTOOLS # si el canal tiene filtro se le pasa el nombre que tiene guardado para que filtre correctamente. if "list_language" in item_json: # si se viene desde la videoteca del addon if "library_filter_show" in item: item_json.show = item.library_filter_show.get( nom_canal, "") # Ejecutamos find_videos, del canal o común item_json.contentChannel = 'videolibrary' if hasattr(channel, 'findvideos'): from core import servertools if item_json.videolibray_emergency_urls: del item_json.videolibray_emergency_urls list_servers = getattr(channel, 'findvideos')(item_json) list_servers = servertools.filter_servers(list_servers) elif item_json.action == 'play': from platformcode import platformtools autoplay.set_status(True) item_json.contentChannel = item_json.channel item_json.channel = "videolibrary" platformtools.play_video(item_json) return '' else: from core import servertools list_servers = servertools.find_video_items(item_json) except Exception as ex: logger.error("Ha fallado la funcion findvideos para el canal %s" % nom_canal) template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) logger.error(traceback.format_exc()) # Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y # las infoLabels y las imagenes del item si el server no tiene for server in list_servers: #if not server.action: # Ignorar/PERMITIR las etiquetas # continue server.contentChannel = server.channel server.channel = "videolibrary" server.nfo = item.nfo server.strm_path = item.strm_path #### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos if server.action == 'play': server.folder = False # Se añade el nombre del canal si se desea if config.get_setting("quit_channel_name", "videolibrary") == 0: server.title = "%s: %s" % (nom_canal.capitalize(), server.title) #server.infoLabels = item_json.infoLabels if not server.thumbnail: server.thumbnail = item.thumbnail # logger.debug("server:\n%s" % server.tostring('\n')) itemlist.append(server) # return sorted(itemlist, key=lambda it: it.title.lower()) autoplay.play_multi_channel(item, itemlist) from inspect import stack from channels import nextep if nextep.check(item) and stack()[1][3] == 'run': nextep.videolibrary(item) return itemlist
def verify_directories_created(): """ Test if all the required directories are created """ from platformcode import logger from core import filetools from platformcode import xbmc_videolibrary import time logger.info() time.sleep(1) config_paths = [["videolibrarypath", "videolibrary"], ["downloadpath", "downloads"], ["downloadlistpath", "downloads/list"], ["settings_path", "settings_channels"]] for path, default in config_paths: saved_path = get_setting(path) # videoteca if path == "videolibrarypath": if not saved_path: saved_path = xbmc_videolibrary.search_library_path() if saved_path: set_setting(path, saved_path) if not saved_path: saved_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/" + default set_setting(path, saved_path) saved_path = translatePath(saved_path) if not filetools.exists(saved_path): logger.debug("Creating %s: %s" % (path, saved_path)) filetools.mkdir(saved_path) config_paths = [["folder_movies", "CINE"], ["folder_tvshows", "SERIES"]] for path, default in config_paths: saved_path = get_setting(path) if not saved_path: saved_path = default set_setting(path, saved_path) content_path = filetools.join(get_videolibrary_path(), saved_path) if not filetools.exists(content_path): logger.debug("Creating %s: %s" % (path, content_path)) # si se crea el directorio filetools.mkdir(content_path) try: from core import scrapertools # Buscamos el archivo addon.xml del skin activo skindir = filetools.join("special://home", 'addons', xbmc.getSkinDir(), 'addon.xml') if not os.path.isdir(skindir): return # No hace falta mostrar error en el log si no existe la carpeta # Extraemos el nombre de la carpeta de resolución por defecto folder = "" data = filetools.read(skindir) res = scrapertools.find_multiple_matches(data, '(<res .*?>)') for r in res: if 'default="true"' in r: folder = scrapertools.find_single_match(r, 'folder="([^"]+)"') break # Comprobamos si existe en el addon y sino es así, la creamos default = filetools.join(get_runtime_path(), 'resources', 'skins', 'Default') if folder and not filetools.exists(filetools.join(default, folder)): filetools.mkdir(filetools.join(default, folder)) # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente if folder and folder != '720p': for root, folders, files in filetools.walk(filetools.join(default, '720p')): for f in files: if not filetools.exists(filetools.join(default, folder, f)) or \ (filetools.getsize(filetools.join(default, folder, f)) != filetools.getsize(filetools.join(default, '720p', f))): filetools.copy(filetools.join(default, '720p', f), filetools.join(default, folder, f), True) except: import traceback logger.error("Al comprobar o crear la carpeta de resolución") logger.error(traceback.format_exc())
def list_movies(item, silent=False): logger.info() itemlist = [] dead_list = [] zombie_list = [] for raiz, subcarpetas, ficheros in filetools.walk( videolibrarytools.MOVIES_PATH): for f in ficheros: if f.endswith(".nfo"): nfo_path = filetools.join(raiz, f) #Sincronizamos las películas vistas desde la videoteca de Kodi con la de Alfa try: if config.is_xbmc(): #Si es Kodi, lo hacemos from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_content_as_watched_on_alfa( nfo_path) except: logger.error(traceback.format_exc()) head_nfo, new_item = videolibrarytools.read_nfo(nfo_path) if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente logger.error('.nfo erroneo en ' + str(nfo_path)) continue if len(new_item.library_urls) > 1: multicanal = True else: multicanal = False ## verifica la existencia de los canales, en caso de no existir el canal se pregunta si se quieren ## eliminar los enlaces de dicho canal for canal_org in new_item.library_urls: canal = generictools.verify_channel(canal_org) try: channel_verify = __import__( 'channels.%s' % canal, fromlist=["channels.%s" % canal]) logger.debug('El canal %s parece correcto' % channel_verify) except: dead_item = Item( multicanal=multicanal, contentType='movie', dead=canal, path=raiz, nfo=nfo_path, library_urls=new_item.library_urls, infoLabels={'title': new_item.contentTitle}) if canal not in dead_list and canal not in zombie_list: confirm = platformtools.dialog_yesno( 'Videoteca', 'Parece que el canal [COLOR red]%s[/COLOR] ya no existe.' % canal.upper(), 'Deseas eliminar los enlaces de este canal?') elif canal in zombie_list: confirm = False else: confirm = True if confirm: delete(dead_item) if canal not in dead_list: dead_list.append(canal) continue else: if canal not in zombie_list: zombie_list.append(canal) if len(dead_list) > 0: for canal in dead_list: if canal in new_item.library_urls: del new_item.library_urls[canal] new_item.nfo = nfo_path new_item.path = raiz new_item.thumbnail = new_item.contentThumbnail new_item.text_color = "blue" strm_path = new_item.strm_path.replace("\\", "/").rstrip("/") if '/' in new_item.path: new_item.strm_path = strm_path if not filetools.exists( filetools.join(new_item.path, filetools.basename(strm_path))): # Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo continue ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial try: new_item, new_item, overwrite = generictools.redirect_clone_newpct1( new_item, head_nfo, new_item, raiz) except: logger.error(traceback.format_exc()) # Menu contextual: Marcar como visto/no visto visto = new_item.library_playcounts.get( os.path.splitext(f)[0], 0) new_item.infoLabels["playcount"] = visto if visto > 0: texto_visto = config.get_localized_string(60016) contador = 0 else: texto_visto = config.get_localized_string(60017) contador = 1 # Menu contextual: Eliminar serie/canal num_canales = len(new_item.library_urls) if "downloads" in new_item.library_urls: num_canales -= 1 if num_canales > 1: texto_eliminar = config.get_localized_string(60018) else: texto_eliminar = config.get_localized_string(60019) new_item.context = [{ "title": texto_visto, "action": "mark_content_as_watched", "channel": "videolibrary", "playcount": contador }, { "title": texto_eliminar, "action": "delete", "channel": "videolibrary", "multicanal": multicanal }] # ,{"title": "Cambiar contenido (PENDIENTE)", # "action": "", # "channel": "videolibrary"}] # logger.debug("new_item: " + new_item.tostring('\n')) itemlist.append(new_item) if silent == False: return sorted(itemlist, key=lambda it: it.title.lower()) else: return
def lista(item): logger.info() itemlist = [] # Descarga la pagina data = httptools.downloadpage(item.url, post=item.extra).data # logger.info("data="+data) bloque = scrapertools.find_single_match( data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>') patron = '<a href="([^"]+).*?' # la url patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail patron += '<h2[^>]*>(.*?)</h2.*?' # el titulo patron += '<span>([^<].*?)<' # la calidad matches = re.compile(patron, re.DOTALL).findall(bloque) scrapertools.printMatches(matches) for scrapedurl, scrapedthumbnail, scrapedtitle, calidad in matches: scrapedtitle = scrapertools.htmlclean(scrapedtitle) title = scrapedtitle.strip() if scrapertools.htmlclean(calidad): title += " (" + scrapertools.htmlclean(calidad) + ")" url = urlparse.urljoin(item.url, scrapedurl) thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) plot = "" logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') contentTitle = scrapertools.htmlclean(scrapedtitle).strip() patron = '([^<]+)<br>' matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>') idioma = '' if "divxatope1.com/serie" in url: contentTitle = re.sub('\s+-|\.{3}$', '', contentTitle) capitulo = '' temporada = 0 episodio = 0 if len(matches) == 3: calidad = matches[0].strip() idioma = matches[1].strip() capitulo = matches[2].replace('Cap', 'x').replace('Temp', '').replace( ' ', '') temporada, episodio = capitulo.strip().split('x') itemlist.append( Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle, language=idioma, contentSeason=int(temporada), contentEpisodeNumber=int(episodio), quality=calidad)) else: if len(matches) == 2: calidad = matches[0].strip() idioma = matches[1].strip() itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle, language=idioma, contentThumbnail=thumbnail, quality=calidad)) next_page_url = scrapertools.find_single_match( data, '<li><a href="([^"]+)">Next</a></li>') if next_page_url != "": itemlist.append( Item(channel=item.channel, action="lista", title=">> Página siguiente", url=urlparse.urljoin(item.url, next_page_url), folder=True)) else: next_page_url = scrapertools.find_single_match( data, '<li><input type="button" class="btn-submit" value="Siguiente" onClick="paginar..(\d+)' ) if next_page_url != "": itemlist.append( Item(channel=item.channel, action="lista", title=">> Página siguiente", url=item.url, extra=item.extra + "&pg=" + next_page_url, folder=True)) return itemlist
def novedades(item): logger.info() global list_newest threads = [] list_newest = [] start_time = time.time() mode = item.mode if mode == '': mode = 'normal' if mode == 'get_cached': if os.path.exists(menu_cache_path): return get_from_cache(item) multithread = config.get_setting("multithread", "news") logger.info("multithread= " + str(multithread)) if not multithread: if platformtools.dialog_yesno(config.get_localized_string(60515), config.get_localized_string(60516), config.get_localized_string(60517), config.get_localized_string(60518)): if config.set_setting("multithread", True, "news"): multithread = True if mode == 'normal': progreso = platformtools.dialog_progress( item.category, config.get_localized_string(60519)) list_canales, any_active = get_channels_list() if config.is_xbmc(): from channels import side_menu if mode == 'silent' and any_active and len( list_canales[item.extra]) > 0: side_menu.set_menu_settings(item) aux_list = [] for canal in list_canales[item.extra]: if len(aux_list) < 2: aux_list.append(canal) list_canales[item.extra] = aux_list if mode == 'set_cache': list_canales[item.extra] = list_canales[item.extra][2:] if any_active and len(list_canales[item.extra]) > 0: import math # fix float porque la division se hace mal en python 2.x number_of_channels = float(100) / len(list_canales[item.extra]) for index, channel in enumerate(list_canales[item.extra]): channel_id, channel_title = channel percentage = int(math.ceil((index + 1) * number_of_channels)) # if progreso.iscanceled(): # progreso.close() # logger.info("Búsqueda cancelada") # return itemlist # Modo Multi Thread if multithread: t = Thread(target=get_newest, args=[channel_id, item.extra], name=channel_title) t.start() threads.append(t) if mode == 'normal': progreso.update( percentage, "", config.get_localized_string(60520) % channel_title) # Modo single Thread else: if mode == 'normal': logger.info("Obteniendo novedades de channel_id=" + channel_id) progreso.update( percentage, "", config.get_localized_string(60520) % channel_title) get_newest(channel_id, item.extra) # Modo Multi Thread: esperar q todos los hilos terminen if multithread: pendent = [a for a in threads if a.isAlive()] t = float(100) / len(pendent) while pendent: index = (len(threads) - len(pendent)) + 1 percentage = int(math.ceil(index * t)) list_pendent_names = [a.getName() for a in pendent] if mode == 'normal': mensaje = config.get_localized_string(30994) % ( ", ".join(list_pendent_names)) progreso.update( percentage, config.get_localized_string(60521) % (len(threads) - len(pendent), len(threads)), mensaje) logger.debug(mensaje) if progreso.iscanceled(): logger.info("Busqueda de novedades cancelada") break time.sleep(0.5) pendent = [a for a in threads if a.isAlive()] if mode == 'normal': mensaje = config.get_localized_string(60522) % ( len(list_newest), time.time() - start_time) progreso.update(100, mensaje, " ", " ") logger.info(mensaje) start_time = time.time() # logger.debug(start_time) result_mode = config.get_setting("result_mode", "news") if mode != 'normal': result_mode = 0 if result_mode == 0: # Agrupados por contenido ret = group_by_content(list_newest) elif result_mode == 1: # Agrupados por canales ret = group_by_channel(list_newest) else: # Sin agrupar ret = no_group(list_newest) while time.time() - start_time < 2: # mostrar cuadro de progreso con el tiempo empleado durante almenos 2 segundos time.sleep(0.5) if mode == 'normal': progreso.close() if mode == 'silent': set_cache(item) item.mode = 'set_cache' ret = add_menu_items(item, ret) if mode != 'set_cache': return ret else: if mode != 'set_cache': no_channels = platformtools.dialog_ok( 'Novedades - %s' % item.extra, 'No se ha definido ningun canal para la ' 'busqueda.', 'Utilice el menu contextual ' 'para agregar al menos uno') return
from platformcode import config, logger, platformtools from core.item import Item from platformcode.config import WebErrorException logger.info('Starting with %s' % sys.argv[1]) # Obtener parámetros de lo que hay que ejecutar # --------------------------------------------- if sys.argv[2]: item = Item().fromurl(sys.argv[2]) else: item = Item(channel='mainmenu', action='mainlist') logger.debug(item) # Establecer si channel es un canal web o un módulo # ------------------------------------------------- tipo_channel = '' if item.channel == '' or item.action == '': logger.info('Empty channel/action, nothing to do') else: # channel puede ser un canal web o un módulo path = os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py") if os.path.exists(path): tipo_channel = 'channels.' else:
def start(self, data, caption="Información del vídeo", item=None, scraper=Tmdb): """ Muestra una ventana con la info del vídeo. Opcionalmente se puede indicar el titulo de la ventana mendiante el argumento 'caption'. Si se pasa un item como argumento 'data' usa el scrapper Tmdb para buscar la info del vídeo En caso de peliculas: Coge el titulo de los siguientes campos (en este orden) 1. contentTitle (este tiene prioridad 1) 2. fulltitle (este tiene prioridad 2) 3. title (este tiene prioridad 3) El primero que contenga "algo" lo interpreta como el titulo (es importante asegurarse que el titulo este en su sitio) En caso de series: 1. Busca la temporada y episodio en los campos contentSeason y contentEpisodeNumber 2. Intenta Sacarlo del titulo del video (formato: 1x01) Aqui hay dos opciones posibles: 1. Tenemos Temporada y episodio Muestra la información del capitulo concreto 2. NO Tenemos Temporada y episodio En este caso muestra la informacion generica de la serie Si se pasa como argumento 'data' un objeto InfoLabels(ver item.py) muestra en la ventana directamente la información pasada (sin usar el scrapper) Formato: En caso de peliculas: infoLabels({ "type" : "movie", "title" : "Titulo de la pelicula", "original_title" : "Titulo original de la pelicula", "date" : "Fecha de lanzamiento", "language" : "Idioma original de la pelicula", "rating" : "Puntuacion de la pelicula", "votes" : "Numero de votos", "genres" : "Generos de la pelicula", "thumbnail" : "Ruta para el thumbnail", "fanart" : "Ruta para el fanart", "plot" : "Sinopsis de la pelicula" } En caso de series: infoLabels({ "type" : "tv", "title" : "Titulo de la serie", "episode_title" : "Titulo del episodio", "date" : "Fecha de emision", "language" : "Idioma original de la serie", "rating" : "Puntuacion de la serie", "votes" : "Numero de votos", "genres" : "Generos de la serie", "thumbnail" : "Ruta para el thumbnail", "fanart" : "Ruta para el fanart", "plot" : "Sinopsis de la del episodio o de la serie", "seasons" : "Numero de Temporadas", "season" : "Temporada", "episodes" : "Numero de episodios de la temporada", "episode" : "Episodio" } Si se pasa como argumento 'data' un listado de InfoLabels() con la estructura anterior, muestra los botones 'Anterior' y 'Siguiente' para ir recorriendo la lista. Ademas muestra los botones 'Aceptar' y 'Cancelar' que llamaran a la funcion 'callback' del canal desde donde se realiza la llamada pasandole como parametros el elemento actual (InfoLabels()) o None respectivamente. @param data: información para obtener datos del scraper. @type data: item, InfoLabels, list(InfoLabels) @param caption: titulo de la ventana. @type caption: str @param item: elemento del que se va a mostrar la ventana de información @type item: Item @param scraper: scraper que tiene los datos de las peliculas o series a mostrar en la ventana. @type scraper: Scraper """ # Capturamos los parametros self.caption = caption self.item = item self.indexList = -1 self.listData = None self.return_value = None self.scraper = scraper logger.debug(data) if type(data) == list: self.listData = data self.indexList = 0 data = self.listData[self.indexList] self.get_scraper_data(data) # Muestra la ventana self.doModal() return self.return_value
def test_video_exists(page_url): global headers real_host = '116.202.226.34' headers = [[ 'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0' ], ['Host', scrapertools.get_domain_from_url(page_url)]] logger.debug("(page_url='%s')" % page_url) if 'wstream' in page_url: resp = httptools.downloadpage(page_url.replace(headers[1][1], real_host), headers=headers, verify=False) else: resp = httptools.downloadpage(page_url, headers=headers, verify=False) global data, real_url data = resp.data page_url = resp.url.replace(headers[1][1], real_host) if '/streaming.php' in page_url in page_url: code = httptools.downloadpage( page_url, headers=headers, follow_redirects=False, only_headers=True, verify=False).headers['location'].split('/')[-1].replace( '.html', '') # logger.debug('WCODE=' + code) page_url = 'https://' + real_host + '/video.php?file_code=' + code data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data if 'nored.icu' in str(headers): var = scrapertools.find_single_match( data, r'var [a-zA-Z0-9]+ = \[([^\]]+).*?') value = scrapertools.find_single_match( data, r'String\.fromCharCode\(parseInt\(value\) \D (\d+)') if var and value: dec = '' for v in var.split(','): dec += chr(int(v) - int(value)) page_url = 'https://' + real_host + '/video.php?file_code=' + scrapertools.find_single_match( dec, "src='([^']+)").split('/')[-1].replace('.html', '') headers = [[ 'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0' ], ['Host', 'wstream.video']] new_data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data logger.debug('NEW DATA: \n' + new_data) if new_data: data = new_data real_url = page_url for e in errorsStr: if e in data: return False, config.get_localized_string(70449) % 'Wstream' return True, ""
def title_format(item): #logger.info() lang = False valid = True language_color = 'otro' simple_language = '' #logger.debug('item.title antes de formatear: %s' % item.title.lower()) # TODO se deberia quitar cualquier elemento que no sea un enlace de la lista de findvideos para quitar esto #Palabras "prohibidas" en los titulos (cualquier titulo que contengas estas no se procesara en unify) excluded_words = [ 'online', 'descarga', 'downloads', 'trailer', 'videoteca', 'gb', 'autoplay' ] # Actions excluidos, (se define canal y action) los titulos que contengan ambos valores no se procesaran en unify excluded_actions = [('videolibrary', 'get_episodes')] # Verifica el item sea valido para ser formateado por unify if item.channel == 'trailertools' or (item.channel.lower(), item.action.lower()) in excluded_actions or \ item.action == '': valid = False else: for word in excluded_words: if word in item.title.lower(): valid = False break if not valid: return item # Verifica si hay marca de visto de trakt visto = False #logger.debug('titlo con visto? %s' % item.title) if '[[I]v[/I]]' in item.title or '[COLOR limegreen][v][/COLOR]' in item.title: visto = True # Se elimina cualquier formato previo en el titulo if item.action != '' and item.action != 'mainlist' and item.channel != 'downloads' and item.unify: item.title = remove_format(item.title) #logger.debug('visto? %s' % visto) # Evita que aparezcan los idiomas en los mainlist de cada canal if item.action == 'mainlist': item.language = '' info = item.infoLabels #logger.debug('item antes de formatear: %s'%item) if hasattr(item, 'text_color'): item.text_color = '' if valid and item.unify != False: # Formamos el titulo para serie, se debe definir contentSerieName # o show en el item para que esto funcione. if item.contentSerieName: # Si se tiene la informacion en infolabels se utiliza if item.contentType == 'episode' and info['episode'] != '': if info['title'] == '': info['title'] = '%s - Episodio %s' % (info['tvshowtitle'], info['episode']) elif item.channel == 'downloads': item.title = item.title elif 'Episode' in info['title']: episode = info['title'].lower().replace( 'episode', 'episodio') info['title'] = '%s - %s' % (info['tvshowtitle'], episode.capitalize()) elif info['episodio_titulo'] != '': #logger.debug('info[episode_titulo]: %s' % info['episodio_titulo']) if 'episode' in info['episodio_titulo'].lower(): episode = info['episodio_titulo'].lower().replace( 'episode', 'episodio') item.title = '%sx%s - %s' % (info['season'], info['episode'], episode.capitalize()) else: item.title = '%sx%s - %s' % ( info['season'], info['episode'], info['episodio_titulo'].capitalize()) else: item.title = '%sx%s - %s' % ( info['season'], info['episode'], info['title']) item.title = set_color(item.title, 'tvshow') else: # En caso contrario se utiliza el titulo proporcionado por el canal #logger.debug ('color_scheme[tvshow]: %s' % color_scheme['tvshow']) item.title = '%s' % set_color(item.title, 'tvshow') elif item.contentTitle: # Si el titulo no tiene contentSerieName entonces se formatea como pelicula saga = False if 'saga' in item.title.lower(): item.title = '%s [Saga]' % set_color(item.contentTitle, 'movie') elif 'miniserie' in item.title.lower(): item.title = '%s [Miniserie]' % set_color( item.contentTitle, 'movie') elif 'extend' in item.title.lower(): item.title = '%s [V.Extend.]' % set_color( item.contentTitle, 'movie') elif item.channel == 'downloads' or item.from_channel == 'news': item.title = '%s' % set_color(item.title, 'movie') else: item.title = '%s' % set_color(item.contentTitle, 'movie') if item.contentType == 'movie': if item.context: if isinstance(item.context, list): item.context.append( 'Buscar esta pelicula en otros canales') if 'Novedades' in item.category and item.from_channel == 'news' and item.channel not in item.title.lower( ): #logger.debug('novedades') item.title = '%s [%s]' % (item.title, item.channel) # Verificamos si item.language es una lista, si lo es se toma # cada valor y se normaliza formado una nueva lista if hasattr(item, 'language') and item.language != '': #logger.debug('tiene language: %s'%item.language) if isinstance(item.language, list): language_list = [] for language in item.language: if language != '': lang = True language_list.append( set_lang(remove_format(language)).upper()) #logger.debug('language_list: %s' % language_list) simple_language = language_list else: # Si item.language es un string se normaliza if item.language != '': lang = True simple_language = set_lang(item.language).upper() else: simple_language = '' #item.language = simple_language # Damos formato al año si existiera y lo agregamos # al titulo excepto que sea un episodio if info and info.get("year", "") not in [ "", " " ] and item.contentType != 'episode' and not info['season']: try: year = '%s' % set_color(info['year'], 'year') item.title = item.title = '%s %s' % (item.title, year) except: logger.debug('infoLabels: %s' % info) # Damos formato al puntaje si existiera y lo agregamos al titulo if info and info[ 'rating'] and info['rating'] != '0.0' and not info['season']: # Se normaliza el puntaje del rating rating_value = check_rating(info['rating']) # Asignamos el color dependiendo el puntaje, malo, bueno, muy bueno, en caso de que exista if rating_value: value = float(rating_value) if value <= 3: color_rating = 'rating_1' elif value > 3 and value <= 7: color_rating = 'rating_2' else: color_rating = 'rating_3' rating = '%s' % rating_value else: rating = '' color_rating = 'otro' item.title = '%s %s' % (item.title, set_color( rating, color_rating)) # Damos formato a la calidad si existiera y lo agregamos al titulo if item.quality and isinstance(item.quality, str): quality = item.quality.strip() else: quality = '' # Damos formato al idioma-calidad si existieran y los agregamos al plot quality_ = set_color(quality, 'quality') if (lang or quality) and item.action == "play": if hasattr(item, "clean_plot"): item.contentPlot = item.clear_plot if lang: item.title = add_languages(item.title, simple_language) if quality: item.title = '%s %s' % (item.title, quality_) elif (lang or quality) and item.action != "play": if item.contentPlot: item.clean_plot = item.contentPlot plot_ = add_info_plot(item.contentPlot, simple_language, quality_) item.contentPlot = plot_ else: item.clean_plot = None plot_ = add_info_plot('', simple_language, quality_) item.contentPlot = plot_ # Para las busquedas por canal if item.from_channel != '' and item.from_channel != 'news': from core import channeltools channel_parameters = channeltools.get_channel_parameters( item.from_channel) logger.debug(channel_parameters) item.title = '%s [%s]' % (item.title, channel_parameters['title']) # Formato para actualizaciones de series en la videoteca sobreescribe los colores anteriores if item.channel == 'videolibrary' and item.context != '': if item.action == 'get_seasons': if 'Desactivar' in item.context[1]['title']: item.title = '%s' % (set_color(item.title, 'update')) if 'Activar' in item.context[1]['title']: item.title = '%s' % (set_color(item.title, 'no_update')) #logger.debug('Despues del formato: %s' % item) # Damos formato al servidor si existiera if item.server: server = '%s' % set_color(item.server.strip().capitalize(), 'server') # Compureba si estamos en findvideos, y si hay server, si es asi no se muestra el # titulo sino el server, en caso contrario se muestra el titulo normalmente. #logger.debug('item.title antes de server: %s'%item.title) if item.action != 'play' and item.server: item.title = '%s %s' % (item.title, server.strip()) elif item.action == 'play' and item.server: if hasattr(item, "clean_plot"): item.contentPlot = item.clean_plot if item.quality == 'default': quality = '' #logger.debug('language_color: %s'%language_color) item.title = '%s %s' % (server, set_color(quality, 'quality')) if lang: item.title = add_languages(item.title, simple_language) #logger.debug('item.title: %s' % item.title) # Torrent_info if item.server == 'torrent' and item.torrent_info != '': item.title = '%s [%s]' % (item.title, item.torrent_info) if item.channel == 'videolibrary': item.title += ' [%s]' % item.contentChannel # si hay verificacion de enlaces if item.alive != '': if item.alive.lower() == 'no': item.title = '[[COLOR red][B]X[/B][/COLOR]] %s' % item.title elif item.alive == '??': item.title = '[[COLOR yellow][B]?[/B][/COLOR]] %s' % item.title else: item.title = '%s' % item.title if item.channel == 'downloads' and item.contentChannel and item.contentAction: serie = '-serie-' if item.contentType != 'tvshow': serie = '' item.title = '%s [%s%s]' % (item.title, item.contentChannel, serie) #logger.debug('item.title despues de server: %s' % item.title) elif 'library' in item.action: item.title = '%s' % set_color(item.title, 'library') elif item.action == '' and item.title != '': item.title = '**- %s -**' % item.title elif item.unify: item.title = '%s' % set_color(item.title, 'otro') #logger.debug('antes de salir %s' % item.title) if visto: try: check = u'\u221a' title = '[B][COLOR limegreen][%s][/COLOR][/B] %s' % ( check, item.title.decode('utf-8')) item.title = title.encode('utf-8') if PY3: item.title = item.title.decode('utf-8') except: check = 'v' title = '[B][COLOR limegreen][%s][/COLOR][/B] %s' % ( check, item.title.decode('utf-8')) item.title = title.encode('utf-8') if PY3: item.title = item.title.decode('utf-8') return item
def get_trakt_watched(id_type, mediatype, update=False): logger.debug() id_list = [] id_dict = dict() token_auth = config.get_setting("token_trakt", "trakt") if token_auth: sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt') if os.path.exists(sync_path) and not update: trakt_node = jsontools.get_node_from_file('trakt', "TRAKT") if mediatype == 'shows': return trakt_node['shows'] if mediatype == 'movies': return trakt_node['movies'] else: token_auth = config.get_setting("token_trakt", "trakt") if token_auth: try: token_auth = config.get_setting("token_trakt", "trakt") headers = [['Content-Type', 'application/json'], ['trakt-api-key', client_id], ['trakt-api-version', '2']] if token_auth: headers.append( ['Authorization', "Bearer %s" % token_auth]) url = "https://api.trakt.tv/sync/watched/%s" % mediatype data = httptools.downloadpage(url, headers=headers) if data.code == 401: token_trakt(Item(extra="renew")) return get_trakt_watched(id_type, mediatype, update) watched_dict = jsontools.load(data.data) if mediatype == 'shows': dict_show = dict() for item in watched_dict: temp = [] id_ = str(item['show']['ids']['tmdb']) season_dict = dict() for season in item['seasons']: ep = [] number = str(season['number']) # season_dict = dict() for episode in season['episodes']: ep.append(str(episode['number'])) season_dict[number] = ep temp.append(season_dict) dict_show[id_] = season_dict id_dict = dict_show return id_dict elif mediatype == 'movies': for item in watched_dict: id_list.append( str(item['movie']['ids'][id_type])) except: pass return id_list
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.debug("url=" + page_url) global data # logger.debug(data) video_urls = support.get_jwplayer_mediaurl(data, 'Onlystream') return video_urls
def update(path, p_dialog, i, t, serie, overwrite): logger.info("Actualizando " + path) from core import filetools from core import channeltools, videolibrarytools from platformcode import platformtools from channels import videolibrary from lib import generictools if config.is_xbmc(): from platformcode import xbmc_videolibrary insertados_total = 0 insertados = 0 sobreescritos = 0 fallidos = 0 overwrite_back = overwrite head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') category = serie.category # logger.debug("%s: %s" %(serie.contentSerieName,str(list_canales) )) for channel, url in list(serie.library_urls.items()): serie.channel = channel serie.url = url ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial try: head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') #Refresca el .nfo para recoger actualizaciones if not it: logger.error('.nfo erroneo en ' + str(path)) continue if it.emergency_urls: serie.emergency_urls = it.emergency_urls serie.category = category serie, it, overwrite = generictools.redirect_clone_newpct1(serie, head_nfo, it, path, overwrite) except: logger.error(traceback.format_exc()) channel_enabled = channeltools.is_enabled(serie.channel) if channel_enabled: heading = config.get_localized_string(60389) p_dialog.update(int(math.ceil((i + 1) * t)), heading, "%s: %s" % (serie.contentSerieName, serie.channel.capitalize())) try: pathchannels = filetools.join(config.get_runtime_path(), "channels", serie.channel + '.py') logger.info("Cargando canal: " + pathchannels) if serie.library_filter_show: serie.show = serie.library_filter_show.get(serie.channel, serie.contentSerieName) obj = __import__('channels.%s' % serie.channel, fromlist=["channels.%s" % serie.channel]) itemlist = getattr(obj, 'episodios')(serie) #... se procesa Episodios para ese canal try: if int(overwrite_back) == 3: # Sobrescribir todos los archivos (tvshow.nfo, 1x01.nfo, 1x01 [canal].json, 1x01.strm, etc...) insertados, sobreescritos, fallidos, notusedpath = videolibrarytools.save_tvshow(serie, itemlist, silent=True, overwrite=overwrite_back) #serie= videolibrary.check_season_playcount(serie, serie.contentSeason) #if videolibrarytools.write_nfo(path + '/tvshow.nfo', head_nfo, it): # serie.infoLabels['playcount'] = serie.playcount else: insertados, sobreescritos, fallidos = videolibrarytools.save_episodes(path, itemlist, serie, silent=True, overwrite=overwrite) #it = videolibrary.check_season_playcount(it, it.contentSeason) #if videolibrarytools.write_nfo(path + '/tvshow.nfo', head_nfo, it): # serie.infoLabels['playcount'] = serie.playcount insertados_total += insertados except Exception as ex: logger.error("Error al guardar los capitulos de la serie") template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) logger.error(traceback.format_exc()) continue except Exception as ex: logger.error("Error al obtener los episodios de: %s" % serie.show) template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) logger.error(traceback.format_exc()) continue #Si el canal lo permite, se comienza el proceso de descarga de los nuevos episodios descargados serie.channel = generictools.verify_channel(serie.channel) if insertados > 0 and config.get_setting('auto_download_new', serie.channel, default=False) and int(overwrite_back) != 3: config.set_setting("search_new_content", 1, "videolibrary") # Escaneamos a final todas la series serie.sub_action = 'auto' serie.category = itemlist[0].category from channels import downloads downloads.save_download(serie, silent=True) if serie.sub_action: del serie.sub_action else: logger.debug("Canal %s no activo no se actualiza" % serie.channel) #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa try: if config.is_xbmc() and not config.get_setting('cleanlibrary', 'videolibrary', default=False) \ and int(overwrite_back) != 3: #Si es Kodi, lo hacemos xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo') except: logger.error(traceback.format_exc()) return insertados_total > 0
def mainlist(item): logger.info() itemlist = [] if item.url == "": item.url = "http://www.ecartelera.com/videos/" # ------------------------------------------------------ # Descarga la página # ------------------------------------------------------ data = httptools.downloadpage(item.url).data # ------------------------------------------------------ # Extrae las películas # ------------------------------------------------------ patron = '<div class="viditem"[^<]+' patron += '<div class="fimg"><a href="([^"]+)"><img alt="([^"]+)"\s*src="([^"]+)"\s*/><p class="length">([^<]+)</p></a></div[^<]+' patron += '<div class="fcnt"[^<]+' patron += '<h4><a[^<]+</a></h4[^<]+' patron += '<p class="desc">([^<]+)</p>' #logger.info(patron) #logger.info(data) matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail, duration, scrapedplot in matches: title = scrapertools.htmlclean(scrapedtitle + " (" + duration + ")") url = scrapedurl thumbnail = scrapedthumbnail #mejora imagen thumbnail = re.sub('/(\d+)_th.jpg', '/f\\1.jpg', thumbnail) plot = scrapedplot.strip() logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot, folder=False)) # ------------------------------------------------------ # Extrae la página siguiente # ------------------------------------------------------ patron = '<a href="([^"]+)">Siguiente</a>' matches = re.compile(patron, re.DOTALL).findall(data) for match in matches: scrapedtitle = "Pagina siguiente" scrapedurl = match scrapedthumbnail = "" scrapeddescription = "" # Añade al listado de XBMC itemlist.append( Item(channel=item.channel, action="mainlist", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, server="directo", folder=True, viewmode="movie_with_plot")) return itemlist
def episodios(item): logger.debug() def load_more(url): second_url = host if url.startswith('/') else '' + url.replace( '\u002F', '/').replace('%5C', '/') new_data = support.match(host + second_url).data match = support.scrapertools.decodeHtmlentities( support.match(new_data, headers=headers, patron=r'"items":([^\]]+])').match.replace( '\x01', 'l').replace('\x02', 'a')) return jsontools.load(match) itemlist = [] data = [] page_data = support.match(item.url).data seasons = support.match( page_data, patron=r'href="([^"]+)"[^>]+>Stagione\s*\d+').matches more = support.match(page_data, patron=r'loadingTitle":[^,]+,"url":"([^"]+)"').match data = jsontools.load( support.scrapertools.decodeHtmlentities( support.match( page_data, patron=r'"isEpisodes":[^,]+,"items":(.*?),"isKidsUI"').match)) if data: if more: data += load_more(more) if seasons: for url in seasons: new_data = support.match(host + url).data data += jsontools.load( support.scrapertools.decodeHtmlentities( support.match( new_data, patron=r'isEpisodes":[^,]+,"items":(.*?),"isKidsUI"' ).match.replace('\x01', 'l').replace('\x02', 'a'))) match = support.match( new_data, patron=r'loadingTitle":[^,]+,"url":"([^"]+)"').match if match and match != load_more: data += load_more(match) for it in data: if 'text' in it['meta']['header']['title']: se = it['meta']['header']['title']['text'] s = support.match(se, patron=r'S\s*(?P<season>\d+)').match e = support.match(se, patron=r'E\s*(?P<episode>\d+)').match if not e: e = support.match(it['meta']['subHeader'], patron=r'(\d+)').match title = support.typo( (s + 'x' if s else 'Episodio ') + e.zfill(2) + ' - ' + it['meta']['subHeader'], 'bold') else: s = e = '0' title = support.typo(it['meta']['header']['title'], 'bold') itemlist.append( item.clone( title=title, season=int(s) if s else '', episode=int(e), url=host + it['url'] if it['url'].startswith('/') else it['url'], thumbnail=it['media']['image']['url'], fanart=it['media']['image']['url'], plot=it['meta']['description'], contentType='episode', action='findvideos')) itemlist.sort(key=lambda item: (item.season, item.episode)) if inspect.stack()[1][3] not in ['find_episodes']: autorenumber.start(itemlist, item) return support.videolibrary(itemlist, item)
def new_search(item): logger.debug() temp_search_file = config.get_temp_file('temp-search') if filetools.isfile(temp_search_file): filetools.remove(temp_search_file) itemlist = [] if config.get_setting('last_search'): last_search = channeltools.get_channel_setting('Last_searched', 'search', '') else: last_search = '' if item.search_text: searched_text = item.search_text else: searched_text = platformtools.dialog_input(default=last_search, heading='') save_search(searched_text) if not searched_text: return channeltools.set_channel_setting('Last_searched', searched_text, 'search') searched_text = searched_text.replace("+", " ") if item.mode == 'person': item.searched_text = searched_text return actor_list(item) if item.mode != 'all': tmdb_info = tmdb.Tmdb(texto_buscado=searched_text, tipo=item.mode.replace('show', '')) results = tmdb_info.results for result in results: result = tmdb_info.get_infoLabels(result, origen=result) if item.mode == 'movie': title = result['title'] else: title = result['name'] item.mode = 'tvshow' thumbnail = result.get('thumbnail', '') fanart = result.get('fanart', '') new_item = Item(channel=item.channel, action='channel_search', title=title, text=searched_text, thumbnail=thumbnail, fanart=fanart, mode=item.mode, contentType=item.mode, infoLabels=result) if item.mode == 'movie': new_item.contentTitle = result['title'] else: new_item.contentSerieName = result['name'] itemlist.append(new_item) if item.mode == 'all' or not itemlist: return channel_search( Item(channel=item.channel, title=searched_text, text=searched_text, mode='all', infoLabels={})) return itemlist