Example #1
0
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<p>(.*?)</p>.+?'
                         '<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
    itemlist = []

    for thumbnail, _type, plot, url, title in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)
        if _type != "Película":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    return itemlist
Example #2
0
def novedades_episodios(item):
    logger.info("pelisalacarta.channels.animeflv novedades")

    data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST)

    '''
    <div class="not">
        <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12">
        <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0">
        <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span>
        </a>
    </div>
    '''

    patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" ' \
                   'src="([^"]+)"[^<]+<span class="tit_ep"><span class="tit">([^<]+)<'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    itemlist = []

    for match in matches:
        scrapedtitle = scrapertools.entityunescape(match[3])
        fulltitle = scrapedtitle
        # directory = match[1]
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedthumbnail = urlparse.urljoin(item.url, match[2].replace("mini", "portada"))
        scrapedplot = ""
        #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(scrapedtitle, scrapedurl, scrapedthumbnail))

        new_item = Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
                        thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle)

        content_title = scrapertools.entityunescape(match[1])
        if content_title:
            episode = scrapertools.get_match(content_title, '\s+(\d+)$')
            content_title = content_title.replace(episode, '')
            season, episode = numbered_for_tratk(content_title, 1, episode)
            new_item.hasContentDetails = "true"
            new_item.contentTitle = content_title
            new_item.contentSeason = season
            new_item.contentEpisodeNumber = int(episode)

        itemlist.append(new_item)

    return itemlist
Example #3
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    logger.debug("datito %s" % data)

    url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')

    data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')

    matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
                         '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
                         re.DOTALL).findall(data)

    itemlist = []

    for thumbnail, url, title, genres, plot in matches:

        title = clean_title(title)
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if "Pelicula Anime" in genres:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.show = title
            new_item.context = renumbertools.context

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #4
0
def listado(item):
    logger.info()
    itemlist = []
    
    data = get_source(item.url)

    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')
    
    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)
    
    matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
                         '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)
    
    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        
        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        plot=plot)
        
        if _type == "Anime":
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)
        
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        
        itemlist.append(new_item)
    
    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"
        
        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
    
    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    
    return itemlist
Example #5
0
def search_results(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    logger.debug(data)
    patron = '<article class=Items>.*?href=(.*?)>.*?typeContent>(.*?)<.*?'
    patron += '<img src=(.*?) />.*?<h2>(.*?)</h2><p>(.*?)</p><span>(\d{4})<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, content_type, scrapedthumb, scrapedtitle, scrapedplot, year in matches:

        title = scrapedtitle
        url = scrapedurl
        thumbnail = scrapedthumb
        plot = scrapedplot
        if content_type != 'Serie':
            action = 'findvideos'
        else:
            action = 'seasons'

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot,
                        action=action,
                        type=content_type,
                        infoLabels={'year': year})
        if new_item.action == 'findvideos':
            new_item.contentTitle = new_item.title
        else:
            new_item.contentSerieName = new_item.title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Example #6
0
def list_all(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    data = scrapertools.find_single_match(
        data,
        '<h3 class="widgetitulo">Resultados</h3>.*?<div id="sidebar-wrapper">')

    patron = '<div.*?<a href="(.*?)"><img src="(.*?)" alt="(.*?)".*?</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        title = re.sub('^Pelicula ', '', scrapedtitle)
        new_item = Item(channel=item.channel,
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail)
        if scrapedtitle.startswith("Pelicula") or item.type == "movie":
            new_item.action = 'findvideos'
            new_item.contentTitle = title
        else:
            new_item.contentSerieName = scrapedtitle
            new_item.action = 'episodios'
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginacion
    patron = '<a class="nextpostslink" rel="next" href="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if matches:
        itemlist.append(
            Item(channel=item.channel,
                 action="list_all",
                 title='Siguiente >>>',
                 url=matches[0],
                 type=item.type))
    return itemlist
Example #7
0
def find_video_items(item=None, data=None):
    """
    Función genérica para buscar vídeos en una página, devolviendo un itemlist con los items listos para usar.
     - Si se pasa un Item como argumento, a los items resultantes mantienen los parametros del item pasado
     - Si no se pasa un Item, se crea uno nuevo, pero no contendra ningun parametro mas que los propios del servidor.

    @param item: Item al cual se quieren buscar vídeos, este debe contener la url válida
    @type item: Item
    @param data: Cadena con el contendio de la página ya descargado (si no se pasa item)
    @type data: str

    @return: devuelve el itemlist con los resultados
    @rtype: list
    """
    logger.info()
    itemlist = []

    # Descarga la página
    if data is None:
        data = httptools.downloadpage(item.url).data

    data = zcrypt.get_video_url(data)

    # Crea un item si no hay item
    if item is None:
        item = Item()
    # Pasa los campos thumbnail y title a contentThumbnail y contentTitle
    else:
        if not item.contentThumbnail:
            item.contentThumbnail = item.thumbnail
        if not item.contentTitle:
            item.contentTitle = item.title

    # Busca los enlaces a los videos
    for label, url, server, thumbnail in findvideos(data):
        title = config.get_localized_string(70206) % label
        itemlist.append(
            item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False))

    return itemlist
Example #8
0
def list_all(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    patron = '<a class="ah-imagge" href="([^"]+)">.*?src="([^"]+).*?title="([^"]+)".*?Calificacion(.*?)ratebox'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, year_data in matches:
        url = scrapedurl
        year = scrapertools.find_single_match(year_data, '>Año<.*?>(\d{4})')
        if not year:
            year = '-'
        scrapedtitle = scrapedtitle
        thumbnail = scrapedthumbnail
        new_item = Item(channel=item.channel,
                        title=scrapedtitle,
                        url=url,
                        action='findvideos',
                        thumbnail=thumbnail,
                        infoLabels={'year': year})

        new_item.contentTitle = scrapedtitle
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginacion

    next_page = scrapertools.find_single_match(
        data, '<a class="next page-numbers" href="([^"]+)">')
    if next_page:
        itemlist.append(
            Item(channel=item.channel,
                 action="list_all",
                 title='Siguiente >>>',
                 url=next_page))

    return itemlist
Example #9
0
def search_results(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?"meta".*?'
    patron += '"year">([^<]+)<(.*?)<p>([^<]+)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

        title = scrapedtitle
        url = scrapedurl
        thumbnail = scrapedthumb
        plot = scrapedplot
        language = get_language(lang_data)
        if language:
            action = 'findvideos'
        else:
            action = 'seasons'

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot,
                        action=action,
                        language=language,
                        infoLabels={'year': year})
        if new_item.action == 'findvideos':
            new_item.contentTitle = new_item.title
        else:
            new_item.contentSerieName = new_item.title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Example #10
0
def find_video_items(item=None, data=None):
    """
    Generic function to search for videos on a page, returning an itemlist with the ready-to-use items.
     - If an Item is passed as an argument, the resulting items keep the parameters of the last item
     - If an Item is not passed, a new one is created, but it will not contain any parameters other than those of the server.

    @param item: Item to which you want to search for videos, this must contain the valid url
    @type item: Item
    @param data: String with the page content already downloaded (if item is not passed)
    @type data: str

    @return: returns the itemlist with the results
    @rtype: list
    """
    logger.debug()
    itemlist = []

    # Download the page
    if data is None:
        data = httptools.downloadpage(item.url).data

    data = unshortenit.findlinks(data)

    # Create an item if there is no item
    if item is None:
        item = Item()
    # Pass the thumbnail and title fields to contentThumbnail and contentTitle
    else:
        if not item.contentThumbnail:
            item.contentThumbnail = item.thumbnail
        if not item.contentTitle:
            item.contentTitle = item.title

    # Find the links to the videos
    for label, url, server, thumbnail in findvideos(data):
        title = config.get_localized_string(70206) % label
        itemlist.append(
            item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False))

    return itemlist
Example #11
0
def list_all(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    patron = '<article class="Item"><a href="([^>]+)"><div class="Poster"><img src="([^"]+)".*?'
    patron += '<h2>([^>]+)</h2>.*?</article>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:

        title = scrapedtitle
        thumbnail = scrapedthumbnail.strip()
        url = scrapedurl
        filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w154", "")
        filter_list = {"poster_path": filter_thumb}
        filter_list = filter_list.items()
        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        infoLabels={'filtro':filter_list})

        if item.type == 'peliculas' or 'peliculas' in url:
            new_item.action = 'findvideos'
            new_item.contentTitle = scrapedtitle
        else:
            new_item.action = 'seasons'
            new_item.contentSerieName = scrapedtitle

        itemlist.append(new_item)

    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    #  Paginación

    url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
    if url_next_page:
        itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))

    return itemlist
Example #12
0
def list_all(item):
    logger.info()

    itemlist = list()
    base_url = "%s/%s" % (item.url, item.s_type)
    headers = {"referer": item.url.replace("api/", ""), "x-requested-with": "XMLHttpRequest"}
    matches = httptools.downloadpage(base_url, headers=headers).json

    for elem in matches["data"]:
        url = elem["url"]
        title = elem["title"]
        lang = "VOSE"
        if "latino" in title.lower():
            title = title.replace(" Latino", "")
            lang = "LAT"
        elif "castellano" in title.lower():
            title = title.replace(" Castellano", "")
            lang = "CAST"
        thumb = host.replace("api/", "") + elem["imgPoster"]
        plot = elem["synopsis"]

        new_item = Item(channel=item.channel, title=title, url=url, action='episodios', plot=plot,
                        thumbnail=thumb, language=lang, infoLabels={"year": "-"})

        if "MOVIE" in base_url:
            new_item.contentTitle = title
        else:
            new_item.contentSerieName = title

        itemlist.append(new_item)


    tmdb.set_infoLabels_itemlist(itemlist, True)

    next = int(matches["meta"]["current_page"]) + 1
    if next <= matches["meta"]["last_page"]:
        s_type = "%s&page=%s" % (item.s_type, next)
        itemlist.append(item.clone(title="Siguiente >>", s_type=s_type))

    return itemlist
Example #13
0
def discover_list(item):
    from platformcode import unify
    itemlist = []

    result = tmdb.discovery(item)

    tvshow = False

    logger.debug(item)

    for elem in result:
        elem['tmdb_id']=elem['id']
        if 'title' in elem:
            title = unify.normalize(elem['title']).capitalize()
            elem['year'] = scrapertools.find_single_match(elem['release_date'], '(\d{4})-\d+-\d+')
        else:
            title = unify.normalize(elem['name']).capitalize()
            tvshow = True

        new_item = Item(channel='search', title=title, infoLabels=elem, action='do_search', extra=title,
                        category=config.get_localized_string(70695), context ='')

        if tvshow:
            new_item.contentSerieName = title
        else:
            new_item.contentTitle = title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    if item.page != '' and len(itemlist)>0:
        next_page = str(int(item.page)+1)
        #if not 'similar' in item.list_type:
        #    itemlist.append(item.clone(title='Pagina Siguente', page=next_page))
        #else:
        itemlist.append(Item(channel=item.channel, action='discover_list', title=typo(config.get_localized_string(30992), 'color kod bold'),
                             search_type=item.search_type, list_type=item.list_type, type=item.type, page=next_page))

    return itemlist
Example #14
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find("div", class_="content").find_all("article", id=re.compile(r"^post-\d+"))

    for elem in matches:
        url = elem.a["href"]
        title = elem.img["alt"]
        thumb = elem.img["data-srcset"]
        try:
            year = elem.p.text
        except:
            year = '-'

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

        if "series/" in url:
            new_item.contentSerieName = title
            new_item.action = "seasons"
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find_all("div", class_="pagMovidy")[-1].a["href"]
    except:
        return itemlist

    if url_next_page and len(matches) > 16:
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))

    return itemlist
Example #15
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<a href="([^"]+)">(.*?)</a>.+?'
                         'class="Desc ScrlV"><p>(.*?)</p>', re.DOTALL).findall(data)

    itemlist = []

    for thumbnail, _type, url, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if _type == "Anime":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #16
0
def search_results(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)

    for elem in soup.find_all("div", class_="result-item"):

        url = elem.a["href"]
        thumb = elem.img["src"]
        title = elem.img["alt"]
        year = elem.find("span", class_="year").text

        language = get_language(elem)

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb,
                        language=language, infoLabels={'year': year})

        if "movies" in url:
            new_item.action = "findvideos"
            new_item.contentTitle = new_item.title
        else:
            new_item.action = "seasons"
            new_item.contentSerieName = new_item.title
            new_item.context = filtertools.context(item, list_language, list_quality)

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find_all("a", class_="arrow_pag")[-1]["href"]
    except:
        return itemlist

    itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='search_results'))

    return itemlist
Example #17
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find("div", class_="content")
    for elem in matches.find_all("article", id=re.compile(r"^post-\d+")):

        info_1 = elem.find("div", class_="poster")
        info_2 = elem.find("div", class_="data")

        thumb = info_1.img["src"]
        title = info_1.img["alt"]
        title = re.sub("VOSE", "", title)
        url = info_1.a["href"]
        try:
            year = info_2.find("span", text=re.compile(r"\d{4}")).text.split(",")[-1]
        except:
            pass
        new_item = Item(channel=item.channel, url=url, title=title, thumbnail=thumb, infoLabels={"year": year.strip()})

        if "serie" in url:
            new_item.action = "seasons"
            new_item.contentSerieName = title
        else:
            new_item.action = "findvideos"
            new_item.contentTitle = title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)
    try:
        next_page = soup.find_all("a", class_="arrow_pag")[-1]["href"]
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=next_page, action="list_all"))
    except:
        pass

    return itemlist
Example #18
0
def list_all(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)

    patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
    patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    media_type = item.type
    for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
        url = scrapedurl
        scrapedtitle = scrapedtitle
        thumbnail = scrapedthumbnail
        new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
                        thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
        if media_type != 'dorama':
            new_item.action = 'findvideos'
            new_item.contentTitle = scrapedtitle
            new_item.type = item.type

        else:
            new_item.contentSerieName=scrapedtitle
            new_item.action = 'episodios'
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    # Paginacion

    if itemlist != []:
        next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
        if next_page != '':
            itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
                                 url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
                                 type=item.type))
    return itemlist
Example #19
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
                         '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)

    itemlist = []

    for url, thumbnail, _type, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if _type == "Anime":
            new_item.show = title
            new_item.context = renumbertools.context(item)
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #20
0
def list_all(item):
    logger.info()

    itemlist = list()
    soup = create_soup(item.url)
    matches = soup.find("ul", class_="MovieList NoLmtxt Rows AX A06 B04 C03 E20")

    if not matches:
        return itemlist

    for elem in soup.find_all("article"):

        url = elem.a["href"]
        title = elem.a.h3.text
        thumb = elem.find("img")
        thumb = thumb["data-src"] if thumb.has_attr("data-src") else thumb["src"]
        year = elem.find("span", class_="Year").text

        new_item = Item(channel=item.channel, url=url, title=title, thumbnail=thumb, infoLabels={"year": year})

        if "movie" not in url:
            new_item.contentSerieName = title
            new_item.action = "seasons"
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        next_page = soup.find("a", class_="next page-numbers")["href"]
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=next_page, action='list_all'))
    except:
        pass

    return itemlist
Example #21
0
def list_all(item):
    logger.info()

    itemlist = list()

    matches = alfaresolver.get_data_zw(host, item)

    for elem in matches.get("matches", []):

        title = elem.get("a2", "").split("-")[0].strip()
        v_id = elem.get("a1", "0")
        plot = elem.get("a100", "")
        thumb = "%swp/wp-content/uploads/%s" % (host, elem.get("a8", ""))

        new_item = Item(channel=item.channel,
                        title=title,
                        v_id=v_id,
                        thumbnail=thumb,
                        plot=plot,
                        infoLabels={"year": elem.get("a4", "-")})

        new_item.contentTitle = title
        new_item.action = "findvideos"
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)
    if matches.get("pagination", False):
        url_next_page = item.url
        itemlist.append(
            Item(channel=item.channel,
                 title="Siguiente >>",
                 url=url_next_page,
                 start=item.start + 20,
                 search=item.search,
                 genre=item.genre,
                 action='list_all'))

    return itemlist
Example #22
0
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(
        data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile(
        'href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
        '(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data)
    itemlist = []

    for url, thumbnail, _type, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        fulltitle=title,
                        plot=plot)
        if _type != "Película":
            if config.is_xbmc():
                new_item.show = title
                new_item.context = renumbertools.context
            if not config.is_xbmc():
                new_item.show = title
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    return itemlist
Example #23
0
def search_results(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = '<li class="search-results-item media-item" .*?<a href="([^"]+)" title="([^"]+)">.*?'
    patron += '<img class="content" src="([^"]+)" .*?>(Pelicula|Serie) del año([^<]+)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches:

        title = scrapedtitle
        if len(year) == 0:
            year = '-'
        url = scrapedurl
        thumbnail = scrapedthumb
        if not '/serie' in url:
            action = 'findvideos'
        else:
            action = 'seasons'

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        action=action,
                        infoLabels={'year': year})
        if new_item.action == 'findvideos':
            new_item.contentTitle = new_item.title
        else:
            new_item.contentSerieName = new_item.title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Example #24
0
def list_all(item):
    logger.info()

    itemlist = list()
    soup = create_soup(item.url)
    matches = soup.find("ul", class_="MovieList NoLmtxt Rows AX A06 B04 C03 E20")
    if not matches:
        return itemlist

    for elem in matches.find_all("article"):
        url = elem.a["href"]
        title = elem.a.h3.text
        thumb = elem.img
        if thumb.has_attr("data-lazy-src"):
            thumb = thumb["data-lazy-src"]
        else:
            thumb = thumb["src"]
        try:
            year = elem.find("span", class_="Year").text
        except:
            year = '-'
        new_item = Item(channel=item.channel, url=url, title=title, thumbnail=thumb, infoLabels={'year': year})

        if elem.figcaption or elem.find("span", class_="TpTv BgA"):
            new_item.action = "seasons"
            new_item.contentSerieName = title
        else:
            new_item.action = "findvideos"
            new_item.contentTitle = title
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    next_page = soup.find("a", class_="next page-numbers")["href"]

    itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=next_page, action='list_all'))

    return itemlist
Example #25
0
def sub_search(item):
    logger.info()
    itemlist =[]
    headers = {'Referer': host, 'X-Requested-With': 'XMLHttpRequest'}
    dict_data = httptools.downloadpage(item.url, headers=headers, post="query=%s" % item.query).json
    list = dict_data["data"][item.type]

    for dict in list:
        title = re.sub(r" (\([^\)]+\))", "", dict["title"])
        new_item = Item(channel=item.channel, thumbnail="https://static.noimg.net/movie/" + dict["cover"],
                        title=title + " (" + dict["release_year"] + ")", url=host + dict["slug"],
                        infoLabels={"year": dict["release_year"]})

        if item.type == "m":
            new_item.action = "findvideos"
            new_item.contentTitle = title
        else:
            new_item.action = "seasons"
            new_item.contentSerieName = title
        itemlist.append(new_item)

    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    return itemlist
Example #26
0
def search_results(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
    patron += ">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle, scrapedurl in matches:
        title = "%s [%s]" % (scrapedtitle, scrapedyear)
        new_item = Item(channel=item.channel,
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail)
        if scrapedtype.strip() == 'Serie':
            new_item.contentSerieName = scrapedtitle
            new_item.action = 'episodios'
            new_item.type = 'sr'
        else:
            new_item.contentTitle = scrapedtitle
            new_item.action = 'findvideos'
            new_item.type = 'pl'
        itemlist.append(new_item)
    return itemlist
Example #27
0
def list_all(item):
    logger.info()
    itemlist = list()
    soup = create_soup(item.url)
    matches = soup.find("div", class_="cf")
    for elem in matches.find_all("article", class_=re.compile("shortstory cf")):
        url = elem.a["href"]
        if elem.find('div', class_='short_header'):
            title = elem.find('div', class_='short_header').text.strip()
        else:
            title = elem.a.text.strip()
        thumb = elem.find("img")["src"]
        try:
            year = extra_info[1].text
        except:
            year = "-"
        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})
        if "/peliculas/" in url:
            new_item.contentTitle = title
            new_item.action = "findvideos"
        else:
            new_item.contentSerieName = title
            new_item.action = "seasons"
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find_all("a", class_="page-numbers")[-1]["href"]

        if url_next_page:
            itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
                                 section=item.section))
    except:
        pass

    return itemlist
Example #28
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find_all("div", class_="main-peliculas")[-1].find_all("div", class_="movie-box-1")

    for elem in matches:

        thumb = elem.a.figure.img["src"]
        title = elem.a.p.text
        url = elem.a["href"]
        year = elem.a.span.text

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

        if "serie/" in url:
            new_item.contentSerieName = title
            new_item.action = "seasons"
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    #try:
    url_next_page = soup.find("ul", class_="pagination").find_all("li")[-1].a["href"]
    #except:
    #    return itemlist

    if url_next_page:
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))

    return itemlist
Example #29
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", id="content")
    matches = soup.find_all("div", class_="swiper-container")[item.pos].find_all("div", class_="swiper-slide")

    for elem in matches:
        url = elem.a["href"]
        title = elem.find("div", class_="card-title").text.strip()
        year = elem.find("div", class_="card-subtitle").text.strip()
        if item.pos == 1:
            content_title = title
            title = "%s - %s" % (title, year)
        thumb = elem.img["src"]

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

        if item.pos != 4:

            if item.pos == 1:
                new_item.contentSerieName = content_title
                new_item.action = "findvideos"
            else:
                new_item.contentSerieName = title
                new_item.action = "episodios"
            new_item.context = filtertools.context(item, list_language, list_quality)
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Example #30
0
def listado(item):
    logger.info()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(
        data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
    data = scrapertools.find_single_match(
        data, '</div><div class="full">(.*?)<div class="pagination')
    matches = re.compile(
        '<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
        '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
        re.DOTALL).findall(data)
    itemlist = []
    for thumbnail, url, title, genres, plot in matches:
        title = clean_title(title)
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        fulltitle=title,
                        plot=plot)
        if "Pelicula Anime" in genres:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.show = title
            new_item.context = renumbertools.context(item)
        itemlist.append(new_item)
    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"
        itemlist.append(
            Item(channel=item.channel, action="listado", title=title, url=url))
    return itemlist
Example #31
0
def lista(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron = '<div id="mt-\d+".*?<a href="([^"]+)".*?'
    patron += '<img src="([^"]+)" alt="([^"]+)".*?'
    patron += '<span class="year">(\d+)</span>.*?'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
        scrapedtitle = scrapedtitle.replace("Ver", "").replace("online", "")
        title = '%s (%s)' % (scrapedtitle, scrapedyear)
        url = scrapedurl
        new_item = Item(channel=item.channel,
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail,
                        infoLabels={'year': scrapedyear})

        if '/serie/' in url:
            new_item.action = 'temporadas'
            new_item.contentSerieName = scrapedtitle
        else:
            new_item.action = 'findvideos'
            new_item.contentTitle = scrapedtitle
        itemlist.append(new_item)
    tmdb.set_infoLabels(itemlist, True)
    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">Siguiente</a>')
    if next_page_url != "":
        next_page_url = urlparse.urljoin(item.url, next_page_url)
        itemlist.append(
            item.clone(channel=item.channel,
                       action="lista",
                       title="Next page >>",
                       text_color="blue",
                       url=next_page_url))
    return itemlist
Example #32
0
def novedades_anime(item):
    logger.info()
    itemlist = []

    patr = '<ul class="ListAnimes[^>]+>(.*?)</ul>'
    data = get_source(item.url, patron=patr)

    patron = 'href="([^"]+)".+?<img src="([^"]+)".+?'
    patron += '<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
    patron += '(?:</p><p>(.*?)</p>.+?)?</article></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot)

        if _type != "Película":
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)

        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist
Example #33
0
def search_results(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)

    for elem in soup.find_all("div", class_="result-item"):

        url = elem.a["href"]
        thumb, o = get_tmdb_thumb(elem.img["src"])
        title = elem.img["alt"]
        year = elem.find("span", class_="year").text

        ctitle = title.partition(' | ')[0]

        if not unify:
            title += ' [COLOR darkgrey](%s)[/COLOR]' % year

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        action='findvideos',
                        thumbnail=thumb,
                        infoLabels={'year': year})

        if '/pelicula' in url:
            new_item.contentTitle = ctitle
        else:
            new_item.contentSerieName = ctitle
            new_item.action = 'seasons'

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)
    return itemlist
Example #34
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find("div", class_="dt-css-grid").find_all("article")

    for elem in matches:
        url = elem.h3.a["href"]
        title = elem.h3.a.text
        thumb = elem.img["src"]
        year = "-"

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

        if "serie-" in url:
            new_item.contentSerieName = title
            new_item.action = "seasons"
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find("div", class_="pagination").find("span", class_="current").next_sibling["href"]
    except:
        return itemlist

    if url_next_page and len(matches) > 16:
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))

    return itemlist
Example #35
0
def listado(item):
    logger.info()
    itemlist = []

    try:
        data_dict = jsontools.load_json(httptools.downloadpage(item.url).data)
    except:
        return itemlist # Devolvemos lista vacia

    #Filtrado y busqueda
    if item.filtro:
        for i in data_dict["result"][:]:
            if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \
                (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()):
                    data_dict["result"].remove(i)


    if not item.page:
        item.page = 0

    offset= int(item.page) * 30
    limit= offset + 30
       
    for i in data_dict["result"][offset:limit]:
        infoLabels = InfoLabels()
        idioma = ''

        if item.extra == "movie":
            action= "findvideos"
            #viewcontent = 'movies'
            infoLabels["title"]= i["title"]
            title= '%s (%s)' % (i["title"], i['year'] )
            url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"]))

        elif item.extra=="series": 
            action="get_temporadas"
            #viewcontent = 'seasons'
            title= i["title"]
            infoLabels['tvshowtitle']= i["title"]
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))

        else: #item.extra=="series_novedades": 
            action="findvideos"
            #viewcontent = 'episodes'
            infoLabels['tvshowtitle'] = i["title"]
            infoLabels['season']=i['season']
            infoLabels['episode']=i['episode'].zfill(2)
            flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)')
            idioma=i["label"].replace(flag,"")
            title = '%s %sx%s (%s)' %(i["title"], infoLabels["season"], infoLabels["episode"], idioma)
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))


        if i.has_key("poster") and i["poster"]:
            thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"])
        else:
            thumbnail= item.thumbnail
        if i.has_key("background") and i["background"]: 
            fanart= i["background"]
        else:
            fanart= item.fanart
        
        # Rellenamos el diccionario de infoLabels
        infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com
        if i['genre']: infoLabels['genre']=i['genre']
        if i['year']: infoLabels['year']=i['year']
        #if i['tagline']: infoLabels['plotoutline']=i['tagline']
        if i['plot']: 
            infoLabels['plot']=i['plot']
        else:
            infoLabels['plot']=""
        if i['runtime']: infoLabels['duration']=int(i['runtime'])*60
        if i['imdb_rating']:
            infoLabels['rating']=i['imdb_rating']
        elif i['tmdb_rating']:
            infoLabels['rating']=i['tmdb_rating']
        if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id']
        if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id']


        newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra,
                         fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent,
                         language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels)
        newItem.year=i['year']
        newItem.contentTitle=i['title']
        if 'season' in infoLabels and infoLabels['season']:
            newItem.contentSeason = infoLabels['season']
        if 'episode' in infoLabels and infoLabels['episode']:
            newItem.contentEpisodeNumber = infoLabels['episode']
        itemlist.append(newItem)

    # Obtenemos los datos basicos mediante multihilos
    tmdb.set_infoLabels(itemlist)
    
    # Paginacion
    if len(data_dict["result"]) > limit:
        itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) )
    
    return itemlist      
Example #36
0
def listado(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

    patron  = '<div class="MiniFicha">.*?'
    patron += '<img src="([^"]+).*?'
    patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?'
    patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?'
    patron += '<div class="OpcionesDescargasMini">(.*?)</div>'

    matches = re.compile(patron,re.DOTALL).findall(data)

    for thumbnail, title, cat_padres, cat_hijos, opciones in matches:
        #logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones)
        # Obtenemos el año del titulo y eliminamos lo q sobre
        patron = '\d{4}$'
        year = scrapertools.find_single_match(title,patron)
        if year:
            title = re.sub(patron, "", title)
        patron = '\s?-?\s?(line)?\s?-\s?$'
        title = re.sub(patron, "", title,flags=re.IGNORECASE)

        # Obtenemos la imagen b por q es mayor
        thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:]

        # Buscamos opcion de ver online
        patron = '<a href="http://estrenosly.org/ver-online-([^"]+)'
        url_ver = scrapertools.find_single_match(opciones, patron)
        if url_ver:
            new_item = Item(channel=item.channel, action="findvideos", title=title,
                            thumbnail=thumbnail, url=url_ver,
                            infoLabels={"year":year}, text_color = color1)

            cat_padres = cat_padres.strip()
            if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]:
                #if item.extra == 'movie':
                new_item.contentTitle = title
                new_item.extra = "movie"
                # Filtramos nombres validos para la calidad
                patron = ("rip|dvd|screener|hd|ts|Telesync")
                if re.search(patron,cat_hijos,flags=re.IGNORECASE):
                    new_item.contentQuality = cat_hijos
                    new_item.title = "%s [%s]" % (title, cat_hijos)
                elif cat_padres == "peliculas-dvdrip":
                    new_item.contentQuality = "DVDRIP"
                    new_item.title = "%s [DVDRIP]" % title
                elif cat_padres == "HDRIP":
                    new_item.contentQuality = "HDRIP"
                    new_item.title = "%s [HDRIP]" % title

            elif cat_padres == "series":
                new_item.contentSerieName = cat_hijos
                patron = re.compile('(\d+)x(\d+)')
                matches = patron.findall(title)
                if len(matches) == 1:
                    new_item.contentSeason = matches[0][0]
                    new_item.contentEpisodeNumber = matches[0][1].zfill(2)
                    new_item.extra = "episodie"
                else:
                    # matches == [('1', '01'), ('1', '02'), ('1', '03')]
                    new_item.extra = "multi-episodie"

            else: #Otras categorias q de momento no nos interesan
                continue

            ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas
            patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)'
            new_item.url_descarga = scrapertools.find_single_match(opciones,patron)
            patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?'
            new_item.url_torrent = scrapertools.find_single_match(opciones,patron)'''

            itemlist.append(new_item)

    if itemlist:
        # Obtenemos los datos basicos de todas las peliculas mediante multihilos
        tmdb.set_infoLabels(itemlist)

        # Si es necesario añadir paginacion
        patron = '<div class="sPages">.*?'
        patron += '<a href="([^"]+)">Siguiente'
        url_next_page = scrapertools.find_single_match(data,patron)
        if url_next_page:
            itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
                                 thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True,
                                 text_color = color3, text_blod=True))

    return itemlist
def save_library_episodes(path, episodelist, serie, silent=False, overwrite=True):
    """
    guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist
    @type path: str
    @param path: ruta donde guardar los episodios
    @type episodelist: list
    @param episodelist: listado de items que representan los episodios que se van a guardar.
    @type serie: item
    @param serie: serie de la que se van a guardar los episodios
    @type silent: bool
    @param silent: establece si se muestra la notificación
    @param overwrite: permite sobreescribir los ficheros existentes
    @type overwrite: bool
    @rtype insertados: int
    @return:  el número de episodios insertados
    @rtype sobreescritos: int
    @return:  el número de episodios sobreescritos
    @rtype fallidos: int
    @return:  el número de episodios fallidos
    """
    logger.info()

    # No hay lista de episodios, no hay nada que guardar
    if not len(episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    insertados = 0
    sobreescritos = 0
    fallidos = 0
    news_in_playcounts = {}

    # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
    raiz, carpetas_series, ficheros = filetools.walk(path).next()
    ficheros = [filetools.join(path, f) for f in ficheros]

    # Silent es para no mostrar progreso (para library_service)
    if not silent:
        # progress dialog
        p_dialog = platformtools.dialog_progress('streamondemand', 'Aggiunta episodi...')
        p_dialog.update(0, 'Aggiunta episodio...')

    new_episodelist =[]
    # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean
    for e in episodelist:
        try:
            season_episode = scrapertools.get_season_and_episode(e.title)

            e.infoLabels = serie.infoLabels
            e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
            new_episodelist.append(e)
        except:
            continue

    # No hay lista de episodios, no hay nada que guardar
    if not len(new_episodelist):
        logger.info("No hay lista de episodios, salimos sin crear strm")
        return 0, 0, 0

    # fix float porque la division se hace mal en python 2.x
    t = float(100) / len(new_episodelist)

    for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
        if not silent:
            p_dialog.update(int(math.ceil((i + 1) * t)), 'Aggiunta episodio...', e.title)


        season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2))
        strm_path = filetools.join(path, "%s.strm" % season_episode)
        nfo_path = filetools.join(path, "%s.nfo" % season_episode)
        json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())

        strm_exists = strm_path in ficheros
        nfo_exists = nfo_path in ficheros
        json_exists = json_path in ficheros

        if not strm_exists:
            # Si no existe season_episode.strm añadirlo
            item_strm = Item(action='play_from_library', channel='biblioteca',
                             strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={})
            item_strm.contentSeason = e.contentSeason
            item_strm.contentEpisodeNumber = e.contentEpisodeNumber
            item_strm.contentType = e.contentType
            item_strm.contentTitle = season_episode

            # FILTERTOOLS
            if item_strm.list_idiomas:
                # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar
                if "library_filter_show" in serie:
                    item_strm.library_filter_show = serie.library_filter_show

                if item_strm.library_filter_show == "":
                    logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar")

            # logger.debug("item_strm" + item_strm.tostring('\n'))
            # logger.debug("serie " + serie.tostring('\n'))
            strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl()))

        item_nfo = None
        if not nfo_exists and e.infoLabels["code"]:
            # Si no existe season_episode.nfo añadirlo
            scraper.find_and_set_infoLabels(e)
            head_nfo = scraper.get_nfo(e)

            item_nfo = e.clone(channel="biblioteca", url="", action='findvideos',
                               strm_path=strm_path.replace(TVSHOWS_PATH, ""))

            nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson())


        # Solo si existen season_episode.nfo y season_episode.strm continuamos
        if nfo_exists and strm_exists:
            if not json_exists or overwrite:
                # Obtenemos infoLabel del episodio
                if not item_nfo:
                    head_nfo, item_nfo = read_nfo(nfo_path)

                e.infoLabels = item_nfo.infoLabels

                if filetools.write(json_path, e.tojson()):
                    if not json_exists:
                        logger.info("Insertado: %s" % json_path)
                        insertados += 1
                        # Marcamos episodio como no visto
                        news_in_playcounts[season_episode] = 0
                        # Marcamos la temporada como no vista
                        news_in_playcounts["season %s" % e.contentSeason] = 0
                        # Marcamos la serie como no vista
                        # logger.debug("serie " + serie.tostring('\n'))
                        news_in_playcounts[serie.contentTitle] = 0

                    else:
                        logger.info("Sobreescrito: %s" % json_path)
                        sobreescritos += 1
                else:
                    logger.info("Fallido: %s" % json_path)
                    fallidos += 1

        else:
            logger.info("Fallido: %s" % json_path)
            fallidos += 1


        if not silent and p_dialog.iscanceled():
            break

    if not silent:
        p_dialog.close()

    if news_in_playcounts:
        # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ...
        tvshow_path = filetools.join(path, "tvshow.nfo")
        try:
            import datetime
            head_nfo, tvshow_item = read_nfo(tvshow_path)
            tvshow_item.library_playcounts.update(news_in_playcounts)

            if tvshow_item.active == 30:
                tvshow_item.active = 1
            update_last = datetime.date.today()
            tvshow_item.update_last = update_last.strftime('%Y-%m-%d')
            update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active))
            tvshow_item.update_next = update_next.strftime('%Y-%m-%d')

            filetools.write(tvshow_path, head_nfo + tvshow_item.tojson())
        except:
            logger.error("Error al actualizar tvshow.nfo")
            fallidos = -1
        else:
            # ... si ha sido correcto actualizamos la biblioteca de Kodi
            if config.is_xbmc() and not silent:
                from platformcode import xbmc_library
                xbmc_library.update(FOLDER_TVSHOWS, filetools.basename(path))

    if fallidos == len(episodelist):
        fallidos = -1

    logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" %
                 (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos))
    return insertados, sobreescritos, fallidos
Example #38
0
def listado(item):
    #import json
    logger.info("[pepecine.py] listado")
    itemlist = []
    
    try:
        data_dict = jsontools.load_json(scrapertools.cache_page(item.url))
    except:
        return itemlist # Devolvemos lista vacia
        
    offset= scrapertools.get_match(item.url,'offset=(\d*)')
    limit= scrapertools.get_match(item.url,'limit=(\d*)')
       
    for i in data_dict["result"]:
        infoLabels={}
        idioma =''
        
        if item.extra=="movie":
            action="get_movie"
            title= i["title"] + ' (' + i['year'] + ')'
            url= urlparse.urljoin(__url_base__,"peliculas-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-")))
        elif item.extra=="series": 
            action="get_temporadas"
            title= i["title"]
            infoLabels['tvshowtitle']= i["title"]
            url= urlparse.urljoin(__url_base__,"series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-")))
        else: #item.extra=="series_novedades": 
            action="get_only_episodio"
            infoLabels['season']=i['season']
            infoLabels['episode']=i['episode'].zfill(2)
            item.extra=infoLabels["season"] + "x" + infoLabels["episode"]
            infoLabels['tvshowtitle']= i["title"]
            flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)')
            idioma=i["label"].replace(flag,"")
            title=i["title"] + ' '  + item.extra + ' (' + idioma + ')'
            url= urlparse.urljoin(__url_base__,"series-online/" + str(i["id"])) #+"-"+i["title"]).lower().replace(" ","-"))) 
        
        if i.has_key("poster") and i["poster"]: 
            thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"])
        else:
            thumbnail= item.thumbnail
        if i.has_key("background") and i["background"]: 
            fanart= i["background"]
        else:
            fanart= item.fanart
        
        # Rellenamos el diccionario de infoLabels
        infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com
        infoLabels['titleraw']= i["title"] # titleraw: titulo de la pelicula/serie sin formato
        if i['genre']: infoLabels['genre']=i['genre']
        if i['year']: infoLabels['year']=i['year']
        if i['tagline']: infoLabels['plotoutline']=i['tagline']
        if i['plot']: 
            infoLabels['plot']=i['plot']
        else:
            infoLabels['plot']=""
        if i['runtime']: infoLabels['duration']=int(i['runtime'])*60
        if i['imdb_rating']:
            infoLabels['rating']=i['imdb_rating']
        elif i['tmdb_rating']:
            infoLabels['rating']=i['tmdb_rating']


        newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra,
                         fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot",
                         language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels)
        newItem.year=i['year']
        newItem.contentTitle=i['title']
        if 'season' in infoLabels and infoLabels['season']:
            newItem.contentSeason = infoLabels['season']
        if 'episode' in infoLabels and infoLabels['episode']:
            newItem.contentEpisodeNumber = infoLabels['episode']
        itemlist.append(newItem)
    
    # Paginacion
    if int(data_dict["total_results"]) == int(limit):
        url=item.url.replace('offset='+offset,'offset='+ str(int(offset)+ int(limit)))
        itemlist.append( Item( channel=item.channel, action="listado", text_color="0xFF994D00",
                               title=">> Pagina siguiente >>", thumbnail=item.thumbnail,
                               url=url, extra=item.extra, fanart=fanart_host) )
    
    return itemlist