def list_all(item): logger.info() itemlist = [] soup = create_soup(item.url) match = soup.find("div", class_="listados") context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) first = item.first last = first + 25 matches = match.find_all("div", class_="serie") if last >= len(matches): last = len(matches) for elem in matches[first:last]: scrapedurl = elem.a["href"] scrapedthumbnail = elem.img["data-src"] scrapedtitle = elem.a.p.text scrapedplot = elem.find("span", class_="mini").text if item.category == "series": action = "seasons" else: action = "findvideos" itemlist.append(Item(channel=item.channel, title=scrapedtitle, contentSerieName=scrapedtitle, url=host + scrapedurl, plot=scrapedplot, thumbnail=scrapedthumbnail, action=action, context=context, category = item.category)) tmdb.set_infoLabels(itemlist, seekTmdb=True) url_next_page = item.url first = last if url_next_page and len(matches) > 26: itemlist.append(Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url_next_page, action='list_all', first=first)) return itemlist
def series(item): logger.info() page_html = get_url_contents(item.url) series = __find_series(page_html) items = [] for serie in series: title, url, thumbnail, plot = serie logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, thumbnail)) items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) url_next_page = __find_next_page(page_html) if url_next_page: items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page, thumbnail="", plot="", folder=True, viewmode="movies_with_plot")) return items
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<a href="([^"]+)" ' patron += 'class="link">.+?<img src="([^"]+)".*?' patron += 'title="([^"]+)">' matches = scrapertools.find_multiple_matches(data, patron) # Paginacion num_items_x_pagina = 30 min = item.page * num_items_x_pagina max = min + num_items_x_pagina - 1 for link, img, name in matches[min:max]: title = name url = host + link scrapedthumbnail = host + img itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title, context=renumbertools.context(item))) itemlist.append( Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1)) tmdb.set_infoLabels(itemlist) return itemlist
def search(item, texto): logger.info() texto = texto.replace(" ", "%20") item.url = "{0}{1}".format(item.url, texto) html = get_url_contents(item.url) try: # Se encontro un solo resultado y se redicciono a la página de la serie if html.find('<title>Ver') >= 0: series = [__extract_info_from_serie(html)] # Se obtuvo una lista de resultados else: series = __find_series(html) items = [] for serie in series: title, url, thumbnail, plot = serie logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( title, url, thumbnail)) items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return items
def sub_search(item): logger.info() itemlist = [] logger.info(item.url) post = "k=" + item.texto logger.info(post) results = httptools.downloadpage(item.url, post=post, canonical=canonical).data #.json results = json.loads(results) if not results: return itemlist for result in results["dt"]: scrapedthumbnail = "{}tb/{}.jpg".format(host, result[0]) scrapedtitle = result[1] scrapedurl = urlparse.urljoin(host, result[2]) context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) try: scrapedyear = result[3] except: scrapedyear = '' filtro_tmdb = list({"first_air_date": scrapedyear}.items()) itemlist.append( item.clone(action="seasons", title=scrapedtitle, thumbnail=scrapedthumbnail, url=scrapedurl, context=context, contentSerieName=scrapedtitle, page=0, infoLabels={'filtro': filtro_tmdb})) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def novedades_anime(item): logger.info() itemlist = [] patr = '<ul class="ListAnimes[^>]+>(.*?)</ul>' data = get_source(item.url, patron=patr) patron = 'href="([^"]+)".+?<img src="([^"]+)".+?' patron += '<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?' patron += '(?:</p><p>(.*?)</p>.+?)?</article></li>' matches = re.compile(patron, re.DOTALL).findall(data) for url, thumbnail, _type, title, plot in matches: url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot) if _type != "Película": new_item.contentSerieName = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def search(item, texto): logger.info() itemlist = [] item.url = urlparse.urljoin(HOST, "search_suggest") texto = texto.replace(" ", "+") post = "value=%s" % texto try: dict_data = httptools.downloadpage(item.url, post=post).json for e in dict_data: title = clean_title(scrapertools.htmlclean(e["name"])) url = e["url"] plot = e["description"] thumbnail = e["thumb"] new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail) if "Pelicula" in e["genre"]: new_item.contentType = "movie" new_item.contentTitle = title else: new_item.contentSerieName = title new_item.context = renumbertools.context(item) itemlist.append(new_item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def series(item): logger.info() page_html = get_url_contents(item.url) show_list = __find_series(page_html) items = [] for show in show_list: title, url, thumbnail, plot = show logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail)) items.append( Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) url_next_page = __find_next_page(page_html) if url_next_page: items.append( Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page)) return items
def novedades_anime(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = scrapertools.find_single_match( data, '<ul class="ListAnimes[^>]+>(.*?)</ul>') matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data) for thumbnail, url, title in matches: url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) title = clean_title(title) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentTitle=title) new_item.contentSerieName = title new_item.context = renumbertools.context(item) itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def search_results(item): logger.info() itemlist = [] full_data = get_source(item.url) data = scrapertools.find_single_match( full_data, '<div class="search-results">(.*?)<h4') patron = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: url = scrapedurl title = re.sub('online|Audio|Latino', '', scrapedtitle) title = title.lstrip() title = title.rstrip() context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append( Item(channel=item.channel, action="episodios", title=title, contentSerieName=title, url=url, context=context, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def series(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '(?s)<article class="item"[^<]+' patron += '<a href="([^"]+)"[^<]+<header>([^<]+)</header[^<]+.*?' patron += 'src="([^"]+)".*?<p>(.*?)<' matches = scrapertools.find_multiple_matches(data, patron) for url, title, thumbnail, plot in matches: scrapedtitle = title scrapedurl = urlparse.urljoin(item.url, url) scrapedthumbnail = thumbnail scrapedplot = plot context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, context=context, viewmode="movie_with_plot")) itemlist = sorted(itemlist, key=lambda it: it.title) try: page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">></a></li>') itemlist.append(Item(channel=item.channel, action="series", title="[COLOR cyan]>> Página siguiente[/COLOR]", url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="", plot="")) except: pass return itemlist
def search(item, texto): logger.info() texto = texto.replace(" ", "%20") item.url = "%s%s" % (item.url, texto) html = get_url_contents(item.url) try: # Se encontro un solo resultado y se redicciono a la página de la serie if html.find('<title>Ver') >= 0: show_list = [__extract_info_from_serie(html)] # Se obtuvo una lista de resultados else: show_list = __find_series(html) items = [] context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) for show in show_list: title, url, thumbnail, plot = show items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", context=context)) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return items
def search(item, texto): logger.info() itemlist = [] item.url = urlparse.urljoin(HOST, "search_suggest") texto = texto.replace(" ", "+") post = "value=%s" % texto data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) try: dict_data = jsontools.load(data) for e in dict_data: title = clean_title(scrapertools.htmlclean(e["name"])) url = e["url"] plot = e["description"] thumbnail = e["thumb"] new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail) if "Pelicula" in e["genre"]: new_item.contentType = "movie" new_item.contentTitle = title else: new_item.show = title new_item.context = renumbertools.context(item) itemlist.append(new_item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist
def listado(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">') data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>') data = "".join(data) matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>' '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data) itemlist = [] for url, thumbnail, _type, title, plot in matches: url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fulltitle=title, plot=plot) if _type == "Anime": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) if url_pagination: url = urlparse.urljoin(HOST, url_pagination) title = ">> Pagina Siguiente" itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) return itemlist
def sub_search(item): logger.info() itemlist = [] post = "k=" + item.texto results = httptools.downloadpage(item.url, post=post).json if not results: return itemlist for result in results: scrapedthumbnail = host + "/tb/" + result[0] + ".jpg" scrapedtitle = result[1] scrapedurl = host + "/" + result[2] context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) try: scrapedyear = result[3] except: scrapedyear = '' filtro_tmdb = {"first_air_date": scrapedyear}.items() itemlist.append( item.clone(action="episodios", title=scrapedtitle, thumbnail=scrapedthumbnail, url=scrapedurl, context=context, contentSerieName=scrapedtitle, infoLabels={'filtro': filtro_tmdb})) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def lista_gen(item): logger.info() itemlist = [] data1 = httptools.downloadpage(item.url).data data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1) patron_sec = '<section class="content">.+?<\/section>' data = scrapertools.find_single_match(data1, patron_sec) patron = '<article id=.+? class=.+?><div.+?>' patron += '<a href="([^"]+)" title="([^"]+)' # scrapedurl, # scrapedtitle patron += ' Capítulos Completos ([^"]+)">' # scrapedlang patron += '<img.+? data-src=.+? data-lazy-src="([^"]+)"' # scrapedthumbnail matches = scrapertools.find_multiple_matches(data, patron) i = 0 for scrapedurl, scrapedtitle, scrapedlang, scrapedthumbnail in matches: i = i + 1 if 'HD' in scrapedlang: scrapedlang = scrapedlang.replace('HD', '') title = scrapedtitle + " [ " + scrapedlang + "]" itemlist.append( Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios", show=scrapedtitle, context=renumbertools.context(item))) tmdb.set_infoLabels(itemlist) # Paginacion patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">' next_page_url = scrapertools.find_single_match(data, patron_pag) if next_page_url != "" and i != 1: item.url = next_page_url itemlist.append(Item(channel=item.channel, action="lista_gen", title=">> Página siguiente", url=next_page_url, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) return itemlist
def search(item, texto): logger.info() itemlist = [] item.url = urlparse.urljoin(HOST, "api/animes/search") texto = texto.replace(" ", "+") post = "value=%s" % texto data = httptools.downloadpage(item.url, post=post).data try: dict_data = jsontools.load(data) for e in dict_data: if e["id"] != e["last_id"]: _id = e["last_id"] else: _id = e["id"] url = "%sanime/%s/%s" % (HOST, _id, e["slug"]) title = e["title"] thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"]) new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail) if e["type"] != "movie": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist
def list_all(item): logger.info() itemlist = list() soup = create_soup(item.url).find("div", class_="list-series") for elem in soup.find_all("article", class_="serie-card"): url = elem.a["href"] title = elem.a["title"] thumb = elem.img["src"] context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append( Item(channel=item.channel, title=title, url=url, action='episodios', thumbnail=thumb, contentSerieName=title, context=context)) tmdb.set_infoLabels_itemlist(itemlist, True) return itemlist
def listado(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">') data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination') matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?' '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>', re.DOTALL).findall(data) itemlist = [] for thumbnail, url, title, genres, plot in matches: title = clean_title(title) url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentTitle=title, plot=plot, ) if "Pelicula Anime" in genres: new_item.contentType = "movie" new_item.contentTitle = title else: new_item.contentSerieName = title new_item.context = renumbertools.context(item) itemlist.append(new_item) if url_pagination: url = urlparse.urljoin(HOST, url_pagination) title = ">> Pagina Siguiente" itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<div class="pel play" dt="(.+?)" .+?><img src="(.+?)" .+? title="(.*?)"><span class=".+?">(.+?)<\/span><a href="(.+?)" class.+?>' matches = scrapertools.find_multiple_matches(data, patron) # Paginacion num_items_x_pagina = 30 min = item.page * num_items_x_pagina min=min-item.page max = min + num_items_x_pagina - 1 b=0 for scrapedplot,scrapedthumbnail, scrapedtitle, scrapedyear, scrapedurl in matches[min:max]: b=b+1 url = host + scrapedurl thumbnail = host +scrapedthumbnail context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append(item.clone(title=scrapedtitle+"-"+scrapedyear, url=url, action="findvideos", thumbnail=thumbnail, plot=scrapedplot, show=scrapedtitle,contentSerieName=scrapedtitle,context=context)) if b<29: pass else: itemlist.append( Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="peliculas", page=item.page + 1)) tmdb.set_infoLabels(itemlist) return itemlist
def series(item): logger.info() page_html = get_url_contents(item.url) show_list = __find_series(page_html) items = [] context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) for show in show_list: title, url, thumbnail, plot = show items.append( Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentSerieName=title, plot=plot, show=title, viewmode="movies_with_plot", context=context)) url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE) if url_next_page: items.append( Item(channel=item.channel, action="series", title=">> Página Siguiente", url=CHANNEL_HOST + url_next_page)) return items
def novedades_anime(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = scrapertools.find_single_match( data, '<ul class="ListAnimes[^>]+>(.*?)</ul>') matches = re.compile( 'href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?' '(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data) itemlist = [] for url, thumbnail, _type, title, plot in matches: url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fulltitle=title, plot=plot) if _type != "Película": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) return itemlist
def list_all(item): logger.info() itemlist = [] data = get_source(item.url) patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?' patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches: type = type.strip().lower() url = scrapedurl thumbnail = re.sub("image/imagen/160/224/", "assets/img/serie/imagen/", scrapedthumbnail) lang, title = clear_title(scrapedtitle) stitle = title if not config.get_setting('unify'): stitle += ' [COLOR lightsteelblue](%s)[/COLOR]' % year if lang != 'VOSE' and not config.get_setting('unify'): stitle += ' [COLOR gold][%s][/COLOR]' % lang context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) new_item = Item(channel=item.channel, action='folders', title=stitle, url=url, plot=type.capitalize(), type=item.type, thumbnail=thumbnail, language=lang, infoLabels={'year': year}) if not type in ('anime', 'ova'): new_item.contentTitle = title else: new_item.contentSerieName = title new_item.context = context itemlist.append(new_item) # Paginacion next_page = scrapertools.find_single_match( data, '"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">') if next_page != "": actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?') itemlist.append( Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=actual_page + next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png')) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def listado(item): logger.info() itemlist = [] data = jsontools.load(httptools.downloadpage(item.url).data) status = data.get('status') data = data.get('result') for it in data.get("items", []): scrapedtitle = it["title"] url = "%s/%s/" % (host, it["slug"]) thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % it[ 'id'] title = re.sub( r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle) tipo = "tvshow" show = title action = "episodios" if url.endswith("-pelicula/") or url.endswith("-pelicula"): tipo = "movie" show = "" action = "peliculas" infoLabels = {'filtro': {"original_language": "ja"}.items()} itemlist.append( item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3, contentTitle=title, contentSerieName=show, infoLabels=infoLabels, context=renumbertools.context(item), contentType=tipo)) try: from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) except: pass if status and itemlist: offset = scrapertools.find_single_match(item.url, 'offset=(\d+)') if offset: offset = int(offset) + 2 else: offset = 0 url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url) itemlist.append( Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente", thumbnail=item.thumbnail, text_color=color2)) return itemlist
def lista(item): logger.info() itemlist = [] soup = create_soup(item.url) classitems = "link" if item.category == "series" else "min-la" matches = soup.find_all("a", class_=classitems) next_page = soup.find("a", class_="sa fr") num_items_x_pagina = 25 min = item.page max = item.page + num_items_x_pagina f_page = item.page + num_items_x_pagina for elem in matches[min:max]: scrapedurl = elem["href"] scrapedthumbnail = elem.find("img")["src"] scrapedtitle = elem.find( "h3").text if item.category == "series" else elem.find("div").text title = scrapedtitle.replace( " y ", " & ") if " y " in scrapedtitle else scrapedtitle url = urlparse.urljoin(host, scrapedurl) thumbnail = urlparse.urljoin(host, scrapedthumbnail) context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append( Item(channel=item.channel, title=title, url=url, action="seasons", thumbnail=thumbnail, contentSerieName=title, context=context)) if f_page < len(matches): itemlist.append( item.clone(title="[COLOR cyan]Página Siguiente >>[/COLOR]", page=f_page)) elif next_page: next_page = "{}{}".format(host, next_page) itemlist.append( Item(channel=item.channel, url=next_page, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", page=0, tipo=item.tipo)) tmdb.set_infoLabels(itemlist) return itemlist
def list_all(item): logger.info() itemlist = [] data = get_source(item.url) patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?' patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches: type = type.strip().lower() url = scrapedurl thumbnail = scrapedthumbnail if 'latino' in scrapedtitle.lower(): lang = 'Latino' elif 'castellano' in scrapedtitle.lower(): lang = 'Castellano' else: lang = 'VOSE' title = re.sub('Audio|Latino|Castellano', '', scrapedtitle) context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) new_item = Item(channel=item.channel, action='episodios', title=title, url=url, thumbnail=thumbnail, language=lang, infoLabels={'year': year}) if type != 'anime': new_item.contentTitle = title else: new_item.plot = type new_item.contentSerieName = title new_item.context = context itemlist.append(new_item) # Paginacion next_page = scrapertools.find_single_match( data, '"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">') if next_page != "": actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?') itemlist.append( Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=actual_page + next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png')) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist
def list_by_json(item): logger.info() itemlist = [] repeat = 1 status = False if item.url == '': item.url = host + "/api/buscador?limit=30&estado=1&dia=%s" repeat = 6 for element in range(0, repeat): if repeat != 1: data = jsontools.load( httptools.downloadpage(item.url % element).data) else: data = jsontools.load(httptools.downloadpage(item.url).data) status = data.get('status') json_data = data.get('result') elem_data = json_data['items'] for item_data in elem_data: url = '%s/%s/' % (host, item_data['slug']) title = item_data['title'] title = re.sub( r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title) thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % item_data[ 'id'] infoLabels = {'filtro': {"original_language": "ja"}.items()} itemlist.append( item.clone(action="episodios", title=title, url=url, thumbnail=thumb, text_color=color3, contentTitle=title, contentSerieName=title, extra="recientes", context=renumbertools.context(item), infoLabels=infoLabels)) if status and itemlist: offset = scrapertools.find_single_match(item.url, 'offset=(\d+)') if offset: offset = int(offset) + 2 else: offset = 0 url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url) itemlist.append( Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente", thumbnail=item.thumbnail, text_color=color2)) return itemlist
def list_all(item): logger.info() itemlist = list() soup = get_source(item.url, soup=True) matches = soup.find("div", class_="list-series").find_all("article", class_="serie-card") for elem in matches: url = elem.a["href"] title = elem.a["title"] thumb = elem.img["src"] context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append( Item( action = 'episodios', channel = item.channel, contentSerieName = title, context = context, thumbnail = thumb, title = title, url = url ) ) tmdb.set_infoLabels_itemlist(itemlist, True) next_page = soup.find('div', class_='pagination') if next_page and len(next_page.find_all('li')) > 0: try: next_page = next_page.find_all('li')[-1] next_url = next_page.find('a')['href'] if next_page.find('a') and next_page.find('a').get('href') else '' base_url = scrapertools.find_single_match(item.url, '(.+?)\?') if next_url: itemlist.append( Item( action = item.action, channel = item.channel, thumbnail = thumb, title = 'Siguiente página >', url = "%s%s" % (base_url, next_url) if not base_url in next_url else next_url ) ) except: import traceback logger.error(traceback.format_exc()) return itemlist
def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '<a href="([^"]+)" ' if item.title == "Series": patron += 'class="link">.+?<img src="([^"]+)".*?' else: patron += 'class="link-la">.+?<img src="([^"]+)".*?' patron += 'title="([^"]+)">' if item.url==host or item.url==host+"/liveaction": a=1 else: num=(item.url).split('-') a=int(num[1]) matches = scrapertools.find_multiple_matches(data, patron) # Paginacion num_items_x_pagina = 30 min = item.page * num_items_x_pagina min=min-item.page max = min + num_items_x_pagina - 1 b=0 for link, img, name in matches[min:max]: b=b+1 if " y " in name: title=name.replace(" y "," & ") else: title = name url = host + link scrapedthumbnail = host + img context = renumbertools.context(item) context2 = autoplay.context context.extend(context2) itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title, context=context)) if b<29: a=a+1 url=host+"/pag-"+str(a) if b>10: itemlist.append( Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0)) else: itemlist.append( Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1)) tmdb.set_infoLabels(itemlist) return itemlist
def search_section(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) patron = 'id="%s_select"[^>]+>(.*?)</select>' % item.extra data = scrapertools.find_single_match(data, patron) matches = re.compile('<option value="([^"]+)">(.*?)</option>', re.DOTALL).findall(data) for _id, title in matches: url = "%s?%s=%s&order=title" % (item.url, item.extra, _id) itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, context=renumbertools.context(item))) return itemlist