def episodesxseason(item): logger.info() itemlist = [] episodios = item.json_episodios for episodio in episodios: infoLabels = item.infoLabels language = 'VOSE' if not item.language: if episodio.get('estado'): if 'SUBT' in episodio.get('estado'): language = 'VOSE' else: language = item.language it = Item( action = 'findvideos', channel = item.channel, infoLabels = infoLabels, language = language, thumbnail = item.thumbnail, title = episodio['capitulo'], urls = episodio['opciones'], url = item.url ) if not item.contentType == 'movie': it.contentSeason = item.contentSeason it.contentEpisodeNumber = episodio['capitulo'] itemlist.append(it) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True) for it in itemlist: it.title = unify.add_languages('{}x{}: {}'.format(it.contentSeason, it.contentEpisodeNumber, it.contentTitle), it.language) if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def findvideos(item): logger.info() itemlist = [] if not item.urls: soup = get_source(item.url, soup=True) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) seriesdata = json['props']['pageProps']['data'] seasons = seriesdata['seasons'] item.urls = seasons[0]['episodes'][0]['players'] # Recorremos la lista de servidores for option in item.urls: server = server_list.get(option['name'].lower()) # Si no hay server (server nuevo o inválido), continuamos if not server: continue url = '{}{}'.format(server_urls.get(server, ''), option['id']) serv_name = servertools.get_server_name(server) new_item = Item(action='play', channel=item.channel, infoLabels=item.infoLabels, language=item.language, server=server, thumbnail=item.thumbnail, title='{}: {} {}'.format( config.get_localized_string(60335), serv_name.title(), unify.add_languages('', item.language)), url=url) # Chequeos (asignar fanart, plot y formatear títulos) if item.fanart and not new_item.fanart: new_item.fanart = item.fanart if item.contentPlot and not new_item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) # Si es peli y podemos, agregamos el elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_pelicula_to_library", channel=item.channel, contentType="movie", contentTitle=item.contentTitle, extra="findvideos", infoLabels={'year': item.infoLabels.get('year')}, title="[COLOR yellow]{}[/COLOR]".format( config.get_localized_string(60353)), url=item.url, videolibrary=True)) return itemlist
def findvideos(item): logger.info() itemlist = [] if item.videolibrary: return seasons(item) servers = [ opcion for opcion in ({key: val for key, val in sub.items() if val} for sub in item.urls) if opcion ] # Recorremos la lista de servidores for option in servers: server = server_list.get(option['opcion'].lower()) # Si no hay server (server nuevo o inválido), continuamos if not server: continue url = '{}{}'.format(server_urls.get(server, ''), option['url']) serv_name = servertools.get_server_name(server) new_item = Item(action='play', channel=item.channel, infoLabels=item.infoLabels, language=item.language, server=server, thumbnail=item.thumbnail, title=unify.add_languages( '{}: {}'.format(config.get_localized_string(60335), serv_name.title()), item.language), url=url) # Chequeos (asignar fanart, plot y formatear títulos) if item.fanart and not new_item.fanart: new_item.fanart = item.fanart if item.contentPlot and not new_item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) # Si es peli y podemos, agregamos el elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_pelicula_to_library", channel=item.channel, contentType="movie", contentTitle=item.contentTitle, extra="findvideos", infoLabels={'year': item.infoLabels.get('year')}, title="[COLOR yellow]{}[/COLOR]".format( config.get_localized_string(60353)), url=item.url, videolibrary=True)) return itemlist
def episodesxseason(item): logger.info() itemlist = [] # Los episodios deben venir en un JSON en el item episodios = item.json_episodios for episodio in episodios: infoLabels = item.infoLabels language = 'VOSE' # Asignación de idioma if not item.language: if episodio.get('estado'): if 'SUBT' in episodio.get('estado'): language = 'VOSE' else: language = item.language it = Item(action='findvideos', channel=item.channel, infoLabels=infoLabels, language=language, thumbnail=item.thumbnail, title=episodio['capitulo'], urls=episodio['opciones'], url=item.url) # Determinación dinámica de contentType if not item.contentType == 'movie': it.contentSeason = item.contentSeason it.contentEpisodeNumber = episodio['capitulo'] itemlist.append(it) # Asignación de infoLabels if not item.videolibrary: tmdb.set_infoLabels(itemlist, True) # Formateamos título for it in itemlist: it.title = unify.add_languages( '{}x{}: {}'.format(it.contentSeason, it.contentEpisodeNumber, it.contentTitle), it.language) # Si es peli, mandamos directo a findvideos if len(itemlist) == 1 and item.contentType == 'movie': return findvideos(itemlist[0]) else: return itemlist
def findvideos(item): logger.info() itemlist = [] if item.videolibrary: return seasons(item) servers = item.urls for option in servers: url = '' server = server_list.get(option['opcion'].lower()) url = '{}{}'.format(server_urls.get(server, ''), option['url']) if not server: continue serv_name = servertools.get_server_name(server) new_item = Item( action = 'play', channel = item.channel, infoLabels = item.infoLabels, language = item.language, server = server, thumbnail = item.thumbnail, title = unify.add_languages('{}: {}'.format(config.get_localized_string(60335), serv_name.title()), item.language), url = url ) if hasattr(item, 'fanart'): new_item.fanart = item.fanart if item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) if len(itemlist) > 0 and config.get_videolibrary_support() and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item( action = "add_pelicula_to_library", channel = item.channel, contentType = "movie", contentTitle = item.contentTitle, extra = "findvideos", infoLabels = {'year': item.infoLabels.get('year')}, title = "[COLOR yellow]{}[/COLOR]".format(config.get_localized_string(60353)), url = item.url, videolibrary = True ) ) return itemlist
def findvideos(item): logger.info() itemlist = [] soup = get_source(item.url, soup=True) if not soup: return [] items = [] linklists = soup.findAll('div', class_='linkSorter') items.extend( soup.find('div', class_='contEP contepID_1 contEP_A').find( 'div', class_='innerSelector').find_all('div', class_="playerItem")) for lst in linklists: items.extend(lst.find_all('li')) for li in items: language = IDIOMAS.get(li.get('data-lang', '').lower(), '') quality = li.get('data-quality', '') if quality: server = SERVIDORES.get(li.get('data-provider', '').lower(), '') url = li.find('a')['href'] else: data = li.find('div', class_='meta') if data: quality = data.p.span.text server = data.find('h3').text url = '%sajax.php' % host title = item.title if not server: server = servertools.get_server_from_url(url) if server == 'directo': continue title = "{}".format(server.title()) if language: try: title = unify.add_languages(title, language) except Exception: import traceback traceback.format_exc() if quality: title += ' [COLOR=cyan][{}][/COLOR]'.format(quality.upper()) itemlist.append( item.clone(action='play', language=language, player=li.get('data-loadplayer', ''), quality=quality, server=server, title=title, url=url)) return itemlist
def list_all(item): logger.info() itemlist = [] matches = [] if apihost in item.url: # El JSON viene desde el API, la mayoría de info ya vendrá en el JSON json = get_source(item.url, json=True) for j in json: contentType = 'tvshow' if j.get('extras', {}).get('ultimoCap') else 'movie' title, language = set_lang(j['uniqid'].split('-', 1)[1]) _id = j['uniqid'].split('-', 1) action = 'seasons' if contentType == 'tvshow' else 'findvideos' contentSerieName = title if contentType == 'tvshow' else None contentTitle = title status = j['estado'] thumb = 'https://img.{}/{}-medium.webp'.format(domain, j['img']) url = '{}v/{}/{}'.format(host, _id[0], _id[1].replace('-', ' ')) viewType = 'episodes' if contentType == 'tvshow' else 'files' matches.append([ action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url, viewType ]) elif host in item.url: # El JSON vendrá en la página, incrustado como __NEXT_DATA__ soup = get_source(item.url, soup=True) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) json = json['props']['pageProps']['data']['sections'][0]['data'] for j in json: contentType = 'tvshow' if j.get('lastEpisodeEdited') else 'movie' title, language = set_lang(j['title']) action = 'seasons' if contentType == 'tvshow' else 'findvideos' contentSerieName = title if contentType == 'tvshow' else None contentTitle = title thumb = 'https://img.{}/{}-medium.webp'.format( domain, j['img']['vertical']) url = '{}v/{}/{}/'.format(host, j['_id'], j['title']) url = '{}{}'.format( url, j['lastEpisodeEdited']) if contentType == 'tvshow' else url viewType = 'episodes' if contentType == 'tvshow' else 'files' status = [] if j['status']['isOnAir']: status.append('En emisión') if j['status']['isOnAir']: status.append('Subtitulando') status = ", ".join(status) matches.append([ action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url, viewType ]) else: # La sección cambió drásticamente, requiere reconstrucción soup = get_source(item.url, soup=True) logger.debug("\n" + str(soup.prettify())) return # Recorremos la lista construída de matches for action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url, viewType in matches: it = Item(action=action, contentType=contentType, channel=item.channel, language=language, title=unify.add_languages(title, language), thumbnail=thumb, url=url, viewType=viewType) # Determinación dinámica de contentType if contentType == 'tvshow': it.contentSerieName = contentSerieName elif contentTitle: it.contentTitle = contentTitle itemlist.append(it) return itemlist
def seasons(item): logger.info() itemlist = [] soup = create_soup(item.url) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) content_id = json['props']['pageProps'].get('id') if not content_id: id_ = item.url.replace(host, '').split('/')[2].split('-', 1) content_id = '{}-{}'.format(id_[0], id_[1].replace('-', '%20')) episodios = httptools.downloadpage('https://fapi.comamosramen.com/api/byUniqId/{}'.format(content_id)).json seasons = [] for episodio in episodios['temporadas']: if len(seasons) > 0 and seasons[-1]['temporada'] == int(episodio['temporada']): seasons[-1]['episodios'].append(episodio) else: seasons.append({'temporada': int(episodio['temporada']), 'episodios': []}) seasons[-1]['episodios'].append(episodio) for season in seasons: title, language = set_lang(episodios.get('titulo')) ogtitle = title # if scrapertools.find_single_match(ogtitle, '(?is)Título original (.*?)\n'): # ogtitle = scrapertools.find_single_match(ogtitle, '(?is)Título original (.*?)\n') if item.language: language = item.language if episodios.get('categorias'): if 'Audio Latino' in episodios.get('categorias'): language = 'LAT' if episodios.get('tipo'): if episodios.get('tipo') in ['pelicula']: contentType = 'movie' else: contentType = 'tvshow' else: contentType = '' infoLabels = {'year': episodios.get('año')} it = Item( action = 'episodesxseason', channel = item.channel, contentType = contentType, infoLabels = infoLabels, json_episodios = season['episodios'], language = language, plot = episodios.get('descripcion'), thumbnail = item.thumbnail, title = unify.add_languages((config.get_localized_string(60027) % str(season['temporada'])), language), url = item.url ) if contentType == 'movie': it.contentTitle = ogtitle else: it.contentSeason = season['temporada'] it.contentSerieName = ogtitle # unify.title_format(it) itemlist.append(it) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True, force_no_year = True) if len(itemlist) == 1: itemlist = episodesxseason(itemlist[0]) if len(itemlist) > 0 and config.get_videolibrary_support() and not itemlist[0].contentType == 'movie' and not item.videolibrary: itemlist.append( Item( action = "add_serie_to_library", channel = item.channel, contentType = 'tvshow', contentSerieName = item.contentSerieName, extra = "episodios", title = '[COLOR yellow]{}[/COLOR]'.format(config.get_localized_string(60352)), url = item.url ) ) return itemlist
def list_all(item): logger.info() itemlist = [] matches = [] if item.list_type in ['pais', 'pelicula', 'categorias', 'data', 'buscar', 'novedades']: if item.list_type in ['pais', 'pelicula', 'categorias', 'data']: soup = create_soup(item.url) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text)['props']['pageProps']['data'] if item.list_type in ['pelicula']: contentType = 'movie' elif item.list_type in ['categorias', 'pais', 'data']: contentType = 'tvshow' container = soup.find('div', class_='container wrapper').find('div', class_='row') items = container.find_all('a', class_='mb-3') for i, it in enumerate(items): j = json[i] action = 'seasons' status = j['estado'] title, language = set_lang(it.find('span', class_='text-dark').text) if contentType == 'movie': contentSerieName = None contentTitle = title else: contentSerieName = title contentTitle = None thumb = 'https://img.comamosramen.com/{}-high.webp'.format(j['img']) url = '{}{}'.format(host, it['href']) matches.append([action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url]) elif item.list_type in ['buscar', 'novedades']: json = httptools.downloadpage(item.url).json contentType = '' for j in json: action = 'seasons' status = j['estado'] title, language = set_lang(j['uniqid'].split('-', 1)[1]) if contentType == 'movie': contentSerieName = None contentTitle = title else: contentSerieName = title contentTitle = None thumb = 'https://img.comamosramen.com/{}-high.webp'.format(j['img']) id_ = j['uniqid'].split('-', 1) url = '{}/ver/{}'.format(host, '{}-{}'.format(id_[0], id_[1].replace('-', '%20'))) matches.append([action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url]) else: logger.debug("\n" + str(soup.prettify())) raise Exception('No soportado (¿Cambio de estructura?)') return for action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url in matches: it = Item( action = action, contentType = contentType, channel = item.channel, language = language, title = unify.add_languages(title, language), thumbnail = thumb, url = url ) if contentSerieName: it.contentSerieName = contentSerieName elif contentTitle: it.contentTitle = contentTitle itemlist.append(it) return itemlist
def findvideos(item): logger.info() itemlist = list() itemlist2 = list() servers = {'fcom': 'fembed', 'dood': 'doodstream', 'hqq': '', 'youtube': '', 'saruch': '', 'supervideo': '', 'aparat': 'aparatcam'} headers = {"Referer": host} soup = get_source(item.url, soup=True) matches = soup.find("ul", id="playeroptionsul") if not matches: return itemlist for elem in matches.find_all("li"): server = elem.find("span", class_="server").text server = re.sub(r"\.\w{2,4}", "", server.lower()) server = servers.get(server, server) if not server: continue eplang = elem.find('span', class_='title').text eplang = re.sub(r'SERVER \d+ ', '', eplang) language = IDIOMAS.get(eplang.lower(), "VOSE") title = '%s [%s]' % (server.capitalize(), language) server = elem.find('span', class_='server') server = server.text if server else '' # Sistema movidy # NOTE: De vez en cuando cambian entre un sistema de la API REST # de WordPress, y uno de iframes, mantener el código comentado aquí if server == 'saidochesto.top': # players = soup.find("li", id=re.compile(r"player-option-\d+")) # doo_url = players.find("iframe")["src"] doo_url = "{}wp-json/dooplayer/v1/post/{}?type={}&source={}".format( host, elem["data-post"], elem["data-type"], elem["data-nume"]) data = get_source(doo_url, json=True, headers=headers) url = data.get("embed_url", "") # url = players.find("iframe")["src"] new_soup = get_source(url, soup=True).find("div", class_="OptionsLangDisp") resultset = new_soup.find_all("li") if new_soup else [] resultset = new_soup.find_all("li") if new_soup else [] for elem in resultset: url = elem["onclick"] url = scrapertools.find_single_match(url, r"\('([^']+)") if "cloudemb.com" in url: continue server = elem.find("span").text lang = elem.find("p").text server = re.sub(r"\.\w{2,4}", "", server.lower()) server = servers.get(server, server) if not server: continue lang = re.sub(' -.*', '', lang) language = IDIOMAS.get(lang.lower(), "VOSE") stitle = unify.add_languages("", language) if not "multiserver" in eplang.lower(): stitle = ": %s %s" % (eplang.title(), stitle) if url: itemlist2.append( Item( action = "play", channel = item.channel, infoLabels = item.infoLabels, language = language, title = '%s' + stitle, url = url, ) ) else: itemlist.append( Item( action = "play", channel = item.channel, headers = headers, infoLabels = item.infoLabels, language = language, server = server, title = title, url = doo_url ) ) if itemlist2: itemlist = servertools.get_servers_itemlist(itemlist2, lambda x: x.title % x.server.capitalize()) else: itemlist.sort(key=lambda i: (i.language, i.server)) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if item.contentType != "episode": if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != "findvideos": itemlist.append( Item( action = "add_pelicula_to_library", channel = item.channel, contentTitle = item.contentTitle, extra = "findvideos", text_color = "yellow", title = "Añadir esta pelicula a la videoteca", url = item.url ) ) # Para ver dónde están los "directos" y # descartarlos o arreglarlos apropiadamente # logger.debug([f"url: {x.url}\nserver: {x.server}\n\n" for x in itemlist]) return itemlist
def findvideos(item): logger.info() itemlist = list() itemlist2 = list() servers = { 'fcom': 'fembed', 'dood': 'doodstream', 'hqq': '', 'youtube': '', 'saruch': '', 'supervideo': '', 'aparat': 'aparatcam' } headers = {"Referer": item.url} soup = create_soup(item.url) matches = soup.find("ul", id="playeroptionsul") if not matches: return itemlist for elem in matches.find_all("li"): server = elem.find("span", class_="server").text server = re.sub(r"\.\w{2,4}", "", server.lower()) server = servers.get(server, server) if not server: continue lang = elem.find("span", class_="title").text lang = re.sub(r'SERVER \d+ ', '', lang) language = IDIOMAS.get(lang.lower(), "VOSE") title = '%s [%s]' % (server.capitalize(), language) #Sistema movidy if lang.lower() == 'multiserver': # players = soup.find_all("div", id=re.compile(r"^source-player-\d+")) # doo_url = players.find("iframe")["src"] doo_url = "%swp-json/dooplayer/v1/post/%s?type=%s&source=%s" % \ (host, elem["data-post"], elem["data-type"], elem["data-nume"]) data = httptools.downloadpage(doo_url, headers=headers).json url = data.get("embed_url", "") soup = create_soup(url).find("div", class_="OptionsLangDisp") for elem in soup.find_all("li"): url = elem["onclick"] url = scrapertools.find_single_match(url, r"\('([^']+)") if "cloudemb.com" in url: continue server = elem.find("span").text lang = elem.find("p").text server = re.sub(r"\.\w{2,4}", "", server.lower()) server = servers.get(server, server) if not server: continue lang = re.sub(' -.*', '', lang) language = IDIOMAS.get(lang.lower(), "VOSE") stitle = unify.add_languages("", language) if url: itemlist2.append( Item(channel=item.channel, title='%s' + stitle, action="play", url=url, language=language, infoLabels=item.infoLabelss)) else: itemlist.append( Item(channel=item.channel, title=title, action="play", language=language, infoLabels=item.infoLabels, server=server, headers=headers, url=doo_url)) if itemlist2: itemlist = servertools.get_servers_itemlist( itemlist2, lambda x: x.title % x.server.capitalize()) else: itemlist.sort(key=lambda i: (i.language, i.server)) for i in itemlist: logger.info(i) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) if item.contentType != "episode": if config.get_videolibrary_support( ) and len(itemlist) > 0 and item.extra != "findvideos": itemlist.append( Item( channel=item.channel, title= "[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist
def seasons(item): logger.info() itemlist = [] seasons = [] # Obtenemos el HTML y cargamos el JSON soup = create_soup(item.url) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) # Buscamos el "content_id", requerido para búsqueda en la API de la página content_id = json['props']['pageProps'].get('id') if not content_id: id_ = item.url.replace(host, '').split('/')[2].split('-', 1) content_id = '{}-{}'.format(id_[0], id_[1].replace('-', '%20')) # Obtenemos el JSON con los episodios desde la API para clasificar temporadas (vienen en lotes) episodios = httptools.downloadpage( 'https://fapi.comamosramen.com/api/byUniqId/{}'.format( content_id)).json # Recorremos la lista de episodios y obtenemos las temporadas según haya diferencias entre c/ep for episodio in episodios['temporadas']: if len(seasons) > 0 and seasons[-1]['temporada'] == int( episodio['temporada']): seasons[-1]['episodios'].append(episodio) else: seasons.append({ 'temporada': int(episodio['temporada']), 'episodios': [] }) seasons[-1]['episodios'].append(episodio) # Recorremos la lista de temporadas para procesamiento for season in seasons: title, language = set_lang(episodios.get('titulo')) infoLabels = {'year': episodios.get('año')} ogtitle = title # Determinación del idioma if item.language: language = item.language if episodios.get('categorias'): if 'Audio Latino' in episodios.get('categorias'): language = 'LAT' # Determinación dinámica del contentType if episodios.get('tipo'): if episodios.get('tipo') in ['pelicula']: contentType = 'movie' else: contentType = 'tvshow' else: contentType = '' it = Item(action='episodesxseason', channel=item.channel, contentType=contentType, infoLabels=infoLabels, json_episodios=season['episodios'], language=language, plot=episodios.get('descripcion'), thumbnail=item.thumbnail, title=unify.add_languages( (config.get_localized_string(60027) % str(season['temporada'])), language), url=item.url) # Asignamos valores al item según su contentType if contentType == 'movie': it.contentTitle = ogtitle else: it.contentSeason = season['temporada'] it.contentSerieName = ogtitle itemlist.append(it) # Asignamos las infoLabels (si aplica) if not item.videolibrary: tmdb.set_infoLabels(itemlist, True, force_no_year=True) # Si solo hay una temporada, retornamos directamente los episodios if len(itemlist) == 1: itemlist = episodesxseason(itemlist[0]) # Agregamos elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and not itemlist[0].contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_serie_to_library", channel=item.channel, contentType='tvshow', contentSerieName=item.contentSerieName, extra="episodios", title='[COLOR yellow]{}[/COLOR]'.format( config.get_localized_string(60352)), url=item.url)) return itemlist
def list_all(item): logger.info() itemlist = [] matches = [] if item.list_type in [ 'pais', 'pelicula', 'categorias', 'data', 'buscar', 'novedades' ]: # Si es de este tipo de página (títulos en html) if item.list_type in ['pais', 'pelicula', 'categorias', 'data']: # Descargamos la página (contiene el JSON) soup = create_soup(item.url) # Cargamos el JSON (contiene la info de episodios, imág., url) json = jsontools.load( soup.find( 'script', id='__NEXT_DATA__').text)['props']['pageProps']['data'] # Criterios de determinación de contentType if item.list_type in ['pelicula']: contentType = 'movie' elif item.list_type in ['categorias', 'pais', 'data']: contentType = 'tvshow' # Obtenemos el listado de elementos (contiene los títulos) container = soup.find('div', class_='container wrapper').find( 'div', class_='row') if item.list_type in ['categorias', 'pais', 'data']: items = container.find_all('div', class_='mb-3') else: items = container.find_all('a', class_='mb-3') # Recorremos los títulos for i, it in enumerate(items): j = json[i] # No. de elem. en el JSON action = 'seasons' status = j['estado'] title, language = set_lang( it.find('span', class_='text-dark').text) thumb = 'https://img.comamosramen.com/{}-high.webp'.format( j['img']) url = '{}/v/{}'.format(host, j.get('uniqid', '')) # Criterios de determinación de contentType, parte 2 if contentType == 'movie': contentSerieName = None contentTitle = title else: contentSerieName = title contentTitle = None matches.append([ action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url ]) # Si es de este tipo de página (todos los datos en JSON) elif item.list_type in ['buscar', 'novedades']: # El JSON viene desde el API, la mayoría de info ya vendrá en el JSON json = httptools.downloadpage(item.url).json for j in json: action = 'seasons' status = j['estado'] title, language = set_lang(j['uniqid'].split('-', 1)[1]) contentSerieName = title contentTitle = None contentType = '' thumb = 'https://img.comamosramen.com/{}-high.webp'.format( j['img']) id_ = j['uniqid'].split('-', 1) url = '{}/v/{}'.format( host, '{}-{}'.format(id_[0], id_[1].replace('-', '%20'))) matches.append([ action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url ]) else: # La sección cambió drásticamente, requiere reconstrucción logger.debug("\n" + str(soup.prettify())) raise Exception('Item malformado, list_type no válido') return # Recorremos la lista construída de matches for action, contentSerieName, contentTitle, contentType, language, status, title, thumb, url in matches: it = Item(action=action, contentType=contentType, channel=item.channel, language=language, title=unify.add_languages(title, language), thumbnail=thumb, url=url) # Determinación dinámica de contentType if contentSerieName: it.contentSerieName = contentSerieName elif contentTitle: it.contentTitle = contentTitle itemlist.append(it) return itemlist