Example #1
0
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
    itemlist = []

    for thumbnail, url, title in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        title = clean_title(title)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title)

        new_item.show = title
        new_item.context = renumbertools.context

        itemlist.append(new_item)

    return itemlist
Example #2
0
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<p>(.*?)</p>.+?'
                         '<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
    itemlist = []

    for thumbnail, _type, plot, url, title in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)
        if _type != "Película":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    return itemlist
Example #3
0
def mainlist(item):
    logger.info("pelisalacarta.core.favoritos mainlist")
    itemlist = []
    #bookmarkpath = config.get_setting("bookmarkpath") #TODO si solo se usa para esto podriamos eliminarlo

    for name,thumb,data in read_favourites():
        if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
            url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)'  % config.PLUGIN_NAME)\
                .replace("&quot", "")

            item = Item().fromurl(url)
            item.title = name
            item.thumbnail = thumb
            item.isFavourite = True

            item.context = [{"title": config.get_localized_string(30154), #"Quitar de favoritos"
                             "action": "delFavourite",
                             "channel": "favoritos",
                             "from_title": item.title},
                            {"title": "Renombrar",
                             "action": "renameFavourite",
                             "channel": "favoritos",
                             "from_title": item.title}
                            ]
            #logger.debug(item.tostring('\n'))
            itemlist.append(item)

    return itemlist
def mainlist(item):
    logger.info()
    itemlist = []

    for name, thumb, data in read_favourites():
        if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
            url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME)\
                .replace("&quot", "")

            item = Item().fromurl(url)
            item.title = name
            item.thumbnail = thumb
            item.isFavourite = True

            item.context = [{"title": config.get_localized_string(30154),  # "Quitar de favoritos"
                             "action": "delFavourite",
                             "channel": "favoritos",
                             "from_title": item.title},
                            {"title": "Rinomina",
                             "action": "renameFavourite",
                             "channel": "favoritos",
                             "from_title": item.title}
                            ]
            # logger.debug(item.tostring('\n'))
            itemlist.append(item)

    return itemlist
def mainlist(item):
    logger.info()
    itemlist = []

    for name, thumb, data in read_favourites():
        if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
            url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME)\
                .replace("&quot", "")

            item = Item().fromurl(url)
            item.title = name
            item.thumbnail = thumb
            item.isFavourite = True

            if type(item.context) == str:
                item.context = item.context.split("|")
            elif type(item.context) != list:
                item.context = []

            item.context.extend([
                {
                    "title": config.get_localized_string(
                        30154),  # "Quitar de favoritos"
                    "action": "delFavourite",
                    "channel": "favoritos",
                    "from_title": item.title
                },
                {
                    "title": "Rinomina",
                    "action": "renameFavourite",
                    "channel": "favoritos",
                    "from_title": item.title
                }
            ])
            # logger.debug(item.tostring('\n'))
            itemlist.append(item)

    return itemlist
Example #6
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(
        data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(
        data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile(
        '<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
        '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)

    itemlist = []

    for url, thumbnail, _type, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        fulltitle=title,
                        plot=plot)

        if _type == "Anime":

            new_item.show = title
            if config.is_xbmc():
                new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(
            Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #7
0
def search_results(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)

    for elem in soup.find_all("article", class_="item movies"):
        url = elem.a["href"]
        thumb = elem.img["src"]
        title = elem.img["alt"]
        try:
            year = elem.find("span", class_="year").text
        except:
            year = '-'

        language = get_language(elem)

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumb,
                        language=language,
                        infoLabels={'year': year})

        if "/serie" in url:
            new_item.action = "seasons"
            new_item.contentSerieName = new_item.title
            new_item.context = filtertools.context(item, list_language,
                                                   list_quality)
        else:
            new_item.action = "findvideos"
            new_item.contentTitle = new_item.title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find_all("a", class_="arrow_pag")[-1]["href"]
    except:
        return itemlist

    itemlist.append(
        Item(channel=item.channel,
             title="Siguiente >>",
             url=url_next_page,
             action='search_results'))

    return itemlist
Example #8
0
def mostrar_perfil(item):
    logger.info()
    alfav = AlfavoritesData()

    itemlist = []

    i_perfil = item.i_perfil
    if not alfav.user_favorites[i_perfil]: return itemlist
    last_i = len(alfav.user_favorites[i_perfil]['items']) - 1

    ruta_runtime = config.get_runtime_path()

    for i_enlace, enlace in enumerate(alfav.user_favorites[i_perfil]['items']):

        it = Item().fromurl(enlace)
        it.context = [{
            'title': '[COLOR blue]Modificar enlace[/COLOR]',
            'channel': item.channel,
            'action': 'acciones_enlace',
            'i_enlace': i_enlace,
            'i_perfil': i_perfil
        }]

        it.plot += '[CR][CR][COLOR blue]Canal:[/COLOR] ' + it.channel + ' [COLOR blue]Action:[/COLOR] ' + it.action
        if it.extra != '': it.plot += ' [COLOR blue]Extra:[/COLOR] ' + it.extra
        it.plot += '[CR][COLOR blue]Url:[/COLOR] ' + it.url if isinstance(
            it.url, str) else '...'
        if it.date_added != '':
            it.plot += '[CR][COLOR blue]Added:[/COLOR] ' + it.date_added

        # Si no es una url, ni tiene la ruta del sistema, convertir el path ya que se habrá copiado de otro dispositivo.
        # Sería más óptimo que la conversión se hiciera con un menú de importar, pero de momento se controla en run-time.
        if it.thumbnail and '://' not in it.thumbnail and not it.thumbnail.startswith(
                ruta_runtime):
            ruta, fichero = filetools.split(it.thumbnail)
            if ruta == '' and fichero == it.thumbnail:  # en linux el split con un path de windows no separa correctamente
                ruta, fichero = filetools.split(it.thumbnail.replace(
                    '\\', '/'))
            if 'channels' in ruta and 'thumb' in ruta:
                it.thumbnail = filetools.join(ruta_runtime, 'resources',
                                              'media', 'channels', 'thumb',
                                              fichero)
            elif 'themes' in ruta and 'default' in ruta:
                it.thumbnail = filetools.join(ruta_runtime, 'resources',
                                              'media', 'themes', 'default',
                                              fichero)

        itemlist.append(it)

    return itemlist
Example #9
0
def list_all(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
    patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
        type = type.strip().lower()
        url = scrapedurl
        thumbnail = scrapedthumbnail
        lang = 'VOSE'
        title = scrapedtitle
        context = renumbertools.context(item)
        context2 = autoplay.context
        context.extend(context2)
        new_item = Item(channel=item.channel,
                        action='episodios',
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        language=lang,
                        infoLabels={'year': year})
        if type != 'anime':
            new_item.contentTitle = title
        else:
            new_item.plot = type
            new_item.contentSerieName = title
            new_item.context = context
        itemlist.append(new_item)

        # Paginacion
    next_page = scrapertools.find_single_match(
        data,
        '"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')

    if next_page != "":
        actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
        itemlist.append(
            Item(channel=item.channel,
                 action="list_all",
                 title=">> Página siguiente",
                 url=actual_page + next_page,
                 thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'))
    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    return itemlist
Example #10
0
def listado(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)

    url_pagination = scrapertools.find_single_match(
        data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(
        data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile(
        '<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
        '.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)

    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot)

        if _type == "Anime":
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)

        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(
            Item(channel=item.channel, action="listado", title=title, url=url))

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist
Example #11
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find("ul", class_="list-movie").find_all("li",
                                                            class_="item")

    for elem in matches:
        url = elem.a["href"]
        title = elem.h2.text
        year = scrapertools.find_single_match(elem.h4.text, "(\d{4})")
        thumb = elem.a.img["src"]

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumb,
                        infoLabels={'year': year})

        if "/serie" in url:
            new_item.contentSerieName = title
            new_item.action = "seasons"
            new_item.context = filtertools.context(item, list_language,
                                                   list_quality)
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find("a", class_="nextpostslink")["href"]

        if url_next_page:
            itemlist.append(
                Item(channel=item.channel,
                     title="Siguiente >>",
                     url=url_next_page,
                     action='list_all',
                     section=item.section))
    except:
        pass

    return itemlist
Example #12
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find_all("ul", class_="ListAnimes")

    if not matches:
        return itemlist

    for block in matches:
        data = block.find_all("li")
        for elem in data:
            url = elem.a["href"]
            if "juegos-" in url:
                continue
            info = elem.find("div", class_="Description")
            thumb = elem.img.get("src", "")
            title = info.strong.text
            year = scrapertools.find_single_match(codecs.encode(info.text, "utf-8"), "del año (\d{4})")
            if not year:
                try:
                    year = scrapertools.find_single_match(codecs.encode(info.text, "utf-8"), "\d+ de \w+ de (\d{4})")
                except:
                    year = "-"
            new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

            if "serie" in url:
                new_item.contentSerieName = title
                new_item.action = "seasons"
                new_item.context = filtertools.context(item, list_language, list_quality)
            else:
                new_item.contentTitle = title
                new_item.action = "findvideos"

            itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find("a", rel="next")["href"]
    except:
       return itemlist

    itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))

    return itemlist
Example #13
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)

    url_pagination = scrapertools.find_single_match(
        data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')

    data = scrapertools.find_single_match(
        data, '</div><div class="full">(.*?)<div class="pagination')

    matches = re.compile(
        '<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
        '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
        re.DOTALL).findall(data)
    itemlist = []
    for thumbnail, url, title, genres, plot in matches:
        title = clean_title(title)
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        new_item = Item(
            channel=item.channel,
            action="episodios",
            title=title,
            url=url,
            thumbnail=thumbnail,
            contentTitle=title,
            plot=plot,
        )
        if "Pelicula Anime" in genres:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)
        itemlist.append(new_item)
    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"
        itemlist.append(
            Item(channel=item.channel, action="listado", title=title, url=url))

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist
Example #14
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", id="content")
    matches = soup.find_all("div",
                            class_="swiper-container")[item.pos].find_all(
                                "div", class_="swiper-slide")

    for elem in matches:
        url = elem.a["href"]
        title = elem.find("div", class_="card-title").text.strip()
        year = elem.find("div", class_="card-subtitle").text.strip()
        if item.pos == 1:
            content_title = title
            title = "%s - %s" % (title, year)
        thumb = elem.img["src"]

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumb,
                        infoLabels={"year": year})

        if item.pos != 4:

            if item.pos == 1:
                new_item.contentSerieName = content_title
                new_item.action = "findvideos"
            else:
                new_item.contentSerieName = title
                new_item.action = "episodios"
            new_item.context = filtertools.context(item, list_language,
                                                   list_quality)
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Example #15
0
def list_all(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = '39;src=.*?(http.*?)style=display:.*?one-line href=(.*?) title=.*?>(.*?)<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        url = scrapedurl
        scrapedtitle = scrapedtitle.replace('&#215;','x')

        contentSerieName = scrapedtitle
        action = 'seasons'

        if 'episode' in item.url:
            scrapedtitle, season, episode = scrapertools.find_single_match(scrapedtitle, '(.*?) (\d+)x(\d+)')
            contentSerieName = scrapedtitle
            scrapedtitle = '%sx%s - %s' % (season, episode, scrapedtitle)
            action='findvideos'

        thumbnail = scrapedthumbnail
        new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
                        thumbnail=thumbnail, contentSerieName=contentSerieName, action=action,
                        context=filtertools.context(item, list_language, list_quality))

        if 'episode' in item.url:
            new_item.contentSeasonNumber = season
            new_item.contentepisodeNumber = episode
            new_item.context = []

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # Paginacion
    next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?)>»</a>')
    if next_page != '':
        itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
                             url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
                             type=item.type))
    return itemlist
Example #16
0
def search_results(item):
    logger.info()

    itemlist = list()
    url = "%swp-content/plugins/ajax-search-pro/ajax_search.php" % host
    post = {
        "action":
        "ajaxsearchpro_search",
        "aspp":
        item.text,
        "asid":
        "1",
        "asp_inst_id":
        "1_1",
        "options":
        "current_page_id=9&qtranslate_lang=0&filters_changed=0&filters_initial=1&asp_gen%5B%5D=title&asp_gen%5B%5D=content&asp_gen%5B%5D=excerpt&customset%5B%5D=post"
    }
    matches = create_soup(url, post=post).find_all("div", class_="asp_content")
    for elem in matches:
        url = elem.a["href"]
        thumb = elem.find("div", class_="asp_image asp_lazy")["data-src"]
        title = elem.h3.text.strip()

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumb,
                        infoLabels={'year': '-'})

        if "serie-" in url:
            new_item.action = "seasons"
            new_item.contentSerieName = new_item.title
            new_item.context = filtertools.context(item, list_language,
                                                   list_quality)
        else:
            new_item.action = "findvideos"
            new_item.contentTitle = new_item.title

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Example #17
0
def mostrar_perfil(item):
    logger.debug()
    alfav = KodfavouritesData()

    itemlist = []

    i_perfil = item.i_perfil
    if not alfav.user_favorites[i_perfil]: return itemlist
    last_i = len(alfav.user_favorites[i_perfil]['items']) - 1

    ruta_runtime = config.get_runtime_path()

    for i_enlace, enlace in enumerate(alfav.user_favorites[i_perfil]['items']):

        it = Item().fromurl(enlace)
        it.from_channel = 'kodfavorites'
        it.context = [ {'title': config.get_localized_string(70617), 'channel': item.channel, 'action': 'acciones_enlace',
                        'i_enlace': i_enlace, 'i_perfil': i_perfil} ]

        it.plot += '[CR][CR]' + config.get_localized_string(70724) + ': ' + it.channel + ' ' + config.get_localized_string(60266) + ': ' + it.action
        if it.extra != '': it.plot += ' Extra: ' + it.extra
        it.plot += '[CR]Url: ' + it.url if isinstance(it.url, str) else '...'
        if it.date_added != '': it.plot += '[CR]' + config.get_localized_string(70469) + ': ' + it.date_added

        if it.server:
            it.thumbnail = it.contentThumbnail
            it.title += ' [{}]'.format(it.serverName)

        # If it is not a url, nor does it have the system path, convert the path since it will have been copied from another device.
        # It would be more optimal if the conversion was done with an import menu, but at the moment it is controlled in run-time.
        if it.thumbnail and '://' not in it.thumbnail and not it.thumbnail.startswith(ruta_runtime):
            ruta, fichero = filetools.split(it.thumbnail)
            if ruta == '' and fichero == it.thumbnail: # in linux the split with a windows path does not separate correctly
                ruta, fichero = filetools.split(it.thumbnail.replace('\\','/'))
            if 'channels' in ruta and 'thumb' in ruta:
                it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'channels', 'thumb', fichero)
            elif 'themes' in ruta and 'default' in ruta:
                it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'themes', 'default', fichero)

        itemlist.append(it)

    return itemlist
Example #18
0
def list_all(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
    patron += 'font-weight-500">([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(host, scrapedurl)
        scrapedtitle = scrapedtitle
        thumbnail = scrapedthumbnail
        new_item = Item(channel=item.channel,
                        title=scrapedtitle,
                        url=url,
                        thumbnail=thumbnail)

        new_item.contentSerieName = scrapedtitle
        new_item.action = 'seasons'
        new_item.context = filtertools.context(item, list_language,
                                               list_quality)
        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    # Paginacion

    if itemlist != []:

        page_base = host + 'catalogue'
        next_page = scrapertools.find_single_match(
            data, '<a href="([^ ]+)" aria-label="Netx">')
        if next_page != '':
            itemlist.append(
                Item(channel=item.channel,
                     action="list_all",
                     title='Siguiente >>>',
                     url=page_base + next_page,
                     thumbnail=get_thumb("more.png"),
                     type=item.type))
    return itemlist
Example #19
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    logger.debug("datito %s" % data)

    url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')

    data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')

    matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
                         '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
                         re.DOTALL).findall(data)

    itemlist = []

    for thumbnail, url, title, genres, plot in matches:

        title = clean_title(title)
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if "Pelicula Anime" in genres:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.show = title
            new_item.context = renumbertools.context

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #20
0
def search_results(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?meta.*?year">([^<]+)<(.*?)<p>(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

        title = scrapedtitle
        url = scrapedurl
        thumbnail = scrapedthumb
        plot = scrapedplot
        language = get_language(lang_data)
        if language:
            action = 'findvideos'
        else:
            action = 'seasons'

        new_item = Item(channel=item.channel,
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot,
                        action=action,
                        language=language,
                        infoLabels={'year': year})
        if new_item.action == 'findvideos':
            new_item.contentTitle = new_item.title
        else:
            new_item.contentSerieName = new_item.title
            new_item.context = filtertools.context(item, list_language,
                                                   list_quality),

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Example #21
0
def section(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", class_="sidebar left")

    if "Alfabético" not in item.title:
        value = soup.find_all("div",
                              class_=lambda x: x and x.startswith("top-"))
    else:
        value = soup.find("div", id="letters").find_all("a")

    for elem in value:

        action = "alpha_list"
        if "Alfabético" not in item.title:
            elem_data = elem.find_all("a")
            elem = elem_data[0] if len(elem_data) == 1 else elem_data[1]
            action = "seasons"
            url = elem["href"]
        else:
            url = urlparse.urljoin(host, elem["href"])

        title = elem.text

        new_item = Item(channel=item.channel,
                        title=title,
                        action=action,
                        url=url)

        if "letra" not in url:
            new_item.contentSerieName = title
            new_item.context = filtertools.context(item, list_idiomas,
                                                   list_quality)

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Example #22
0
def novedades_anime(item):
    logger.info()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    matches = re.compile('href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
                         '(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data)
    itemlist = []
    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)
        if _type != "Película":
            new_item.show = title
            new_item.context = renumbertools.context(item)
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        itemlist.append(new_item)
    return itemlist
Example #23
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find("div", class_="content").find_all("article", id=re.compile(r"^post-\d+"))

    for elem in matches:
        url = elem.a["href"]
        title = elem.img["alt"]
        thumb = elem.img["data-srcset"]
        try:
            year = elem.p.text
        except:
            year = '-'

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

        if "series/" in url:
            new_item.contentSerieName = title
            new_item.action = "seasons"
            new_item.context = filtertools.context(item, list_language, list_quality)
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find_all("div", class_="pagMovidy")[-1].a["href"]
    except:
        return itemlist

    if url_next_page and len(matches) > 16:
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))

    return itemlist
Example #24
0
def listado(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')

    data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    data = "".join(data)

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<a href="([^"]+)">(.*?)</a>.+?'
                         'class="Desc ScrlV"><p>(.*?)</p>', re.DOTALL).findall(data)

    itemlist = []

    for thumbnail, _type, url, title, plot in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)

        if _type == "Anime":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    if url_pagination:
        url = urlparse.urljoin(HOST, url_pagination)
        title = ">> Pagina Siguiente"

        itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))

    return itemlist
Example #25
0
def novedades_anime(item):
    logger.info()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(
        data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
    matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>',
                         re.DOTALL).findall(data)
    itemlist = []
    for thumbnail, url, title in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        title = clean_title(title)
        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        fulltitle=title)
        new_item.show = title
        new_item.context = renumbertools.context(item)
        itemlist.append(new_item)
    return itemlist
Example #26
0
def novedades_anime(item):
    logger.info()
    itemlist = []

    patr = '<ul class="ListAnimes[^>]+>(.*?)</ul>'
    data = get_source(item.url, patron=patr)

    patron = 'href="([^"]+)".+?<img src="([^"]+)".+?'
    patron += '<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
    patron += '(?:</p><p>(.*?)</p>.+?)?</article></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, thumbnail, _type, title, plot in matches:
        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot)

        if _type != "Película":
            new_item.contentSerieName = title
            new_item.context = renumbertools.context(item)

        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist
Example #27
0
def mainlist(item):
    logger.info()

    itemlist = []
    list_canales = get_channels_list()

    if list_canales['peliculas']:
        thumbnail = get_thumb("channels_movie.png")
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="peliculas",
                    title="Películas",
                    thumbnail=thumbnail)

    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['infantiles']:
        thumbnail = get_thumb("channels_children.png")
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="infantiles",
                    title="Para niños",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['series']:
        thumbnail = get_thumb("channels_tvshow.png")
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="series",
                    title="Episodios de series",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['anime']:
        thumbnail = get_thumb("channels_anime.png")
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="anime",
                    title="Episodios de anime",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['documentales']:
        thumbnail = get_thumb("channels_documentary.png")
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="documentales",
                    title="Documentales",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    return itemlist
Example #28
0
def new_episodes(item):
    logger.info()

    itemlist = list()

    if not item.post:
        soup = create_soup(item.url)
    else:
        soup = create_soup(item.url, post={"searchquery": item.post})

    language = item.extra_lang

    matches = soup.find("div", class_="cajita").find_all("div", class_="capitulo-caja")

    next = True
    first = item.first
    last = first + 20

    if last > len(matches):
        last = len(matches)
        next = False

    for elem in matches[first:last]:

        url = host + scrapertools.find_single_match(elem.get("onclick", ""), "'/([^']+)'")
        if not item.post:
            info = elem.find("div", class_="preopacidad")
            action = "findvideos"
            contentType = 'episode'
        else:
            info = elem.find("div", class_="capitulo-info")
            action = "seasons"
            contentType = 'tvshow'

        img_info = elem.find("div", class_="capitulo-imagen").get("style", "")
        title = re.sub(r'[\ \n]{2,}|\(.*\)', "", info.text).replace('"', " ").strip()
        if scrapertools.find_single_match(title, r"(\d+)x(\d+)"):
            season, episode = scrapertools.find_single_match(title, r"(\d+)x(\d+)")
        else:
            season = 1
            episode = 1
        c_title = scrapertools.find_single_match(title, r"\d+x\d+ ([^$]+)")
        if not c_title:
            c_title = title
        thumb = host + scrapertools.find_single_match(img_info, "url\('([^']+)")
        
        if action == "findvideos":
            new_item = Item(channel=item.channel, action=action, title=title, url=url,
                            thumbnail=thumb, language=language, contentSerieName=c_title, 
                            contentType=contentType, infoLabels={'season': season, 'episode': episode})
        else:
            new_item = Item(channel=item.channel, action=action, title=title, url=url,
                            thumbnail=thumb, language=language, contentSerieName=c_title, 
                            contentType=contentType)

        if item.post:
            new_item.context = filtertools.context(item, list_idiomas, list_quality)

        itemlist.append(new_item)
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    if next:
        url_next_page = item.url
        first = last
    else:
        return itemlist

    if len(matches) > 20:
        itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='new_episodes',
                             first=first))
    return itemlist
Example #29
0
def mainlist(item, thumbnail_type="squares"):
    logger.info()

    itemlist = []
    list_canales = get_list_canales()

    thumbnail_base = "http://media.tvalacarta.info/pelisalacarta/" + thumbnail_type + "/"
    thumbnail = thumbnail_base + '/disabled'

    if list_canales['peliculas']:
        thumbnail = thumbnail_base + "/thumb_canales_peliculas.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="peliculas",
                    title="Film",
                    thumbnail=thumbnail)

    new_item.context = [{
        "title": "Canali inclusi in: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "settingCanal",
        "channel": new_item.channel
    }]
    new_item.category = "Novità in %s" % new_item.title
    itemlist.append(new_item)
    '''
    if list_canales['infantiles']:
        thumbnail = thumbnail_base + "/thumb_canales_infantiles.png"
    new_item = Item(channel=item.channel, action="novedades", extra="infantiles", title="Cartoni Animati",
                    thumbnail=thumbnail)
    new_item.context = [{"title": "Canali inclusi in: %s" %new_item.title,
                         "extra": new_item.extra,
                         "action": "settingCanal",
                         "channel": new_item.channel}]
    new_item.category = "Novità in %s" % new_item.title
    itemlist.append(new_item)
    '''
    if list_canales['series']:
        thumbnail = thumbnail_base + "/thumb_canales_series.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="series",
                    title="Episodi Serie TV",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canali inclusi in: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "settingCanal",
        "channel": new_item.channel
    }]
    new_item.category = "Novità in %s" % new_item.title
    itemlist.append(new_item)

    if list_canales['anime']:
        thumbnail = thumbnail_base + "/thumb_canales_anime.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="anime",
                    title="Episodi Anime",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canali inclusi in: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "settingCanal",
        "channel": new_item.channel
    }]
    new_item.category = "Novità in %s" % new_item.title
    itemlist.append(new_item)

    if list_canales['documentales']:
        thumbnail = thumbnail_base + "/thumb_canales_documentales.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="documentales",
                    title="Documentari",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canali inclusi in: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "settingCanal",
        "channel": new_item.channel
    }]
    new_item.category = "Novità in %s" % new_item.title
    itemlist.append(new_item)

    # itemlist.append(Item(channel=item.channel, action="menu_opciones", title="Opciones", viewmode="list",
    #                     thumbnail=thumbnail_base + "/thumb_configuracion_0.png"))

    return itemlist
Example #30
0
def mainlist(item, thumbnail_type="squares"):
    logger.info()

    itemlist = []
    list_canales = get_list_canales()

    thumbnail_base = plugin_media_url + thumbnail_type + "/"
    thumbnail = thumbnail_base + '/disabled'

    if list_canales['peliculas']:
        thumbnail = thumbnail_base + "/thumb_canales_peliculas.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="peliculas",
                    title="Películas",
                    thumbnail=thumbnail)

    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['infantiles']:
        thumbnail = thumbnail_base + "/thumb_canales_infantiles.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="infantiles",
                    title="Para niños",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['series']:
        thumbnail = thumbnail_base + "/thumb_canales_series.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="series",
                    title="Episodios de series",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['anime']:
        thumbnail = thumbnail_base + "/thumb_canales_anime.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="anime",
                    title="Episodios de anime",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['documentales']:
        thumbnail = thumbnail_base + "/thumb_canales_documentales.png"
    new_item = Item(channel=item.channel,
                    action="novedades",
                    extra="documentales",
                    title="Documentales",
                    thumbnail=thumbnail)
    new_item.context = [{
        "title": "Canales incluidos en: %s" % new_item.title,
        "extra": new_item.extra,
        "action": "setting_channel",
        "channel": new_item.channel
    }]
    new_item.category = "Novedades en %s" % new_item.extra
    itemlist.append(new_item)

    return itemlist
Example #31
0
def search(item, texto):
    logger.info()
    itemlist = []

    texto = texto.replace(" ", "+")
    post = "value=%s&limit=100" % texto

    if clone:
        item.url = "{}browse?q={}".format(HOST, texto)
    else:
        item.url = urlparse.urljoin(HOST, "api/animes/search")

    try:
        if clone:
            response = httptools.downloadpage(item.url).data
            response = scrapertools.find_single_match(
                response, 'class="ListAnimes.+?</ul>')
            patron = '(?is)article class.+?a href="(.+?)".+?img src="(.+?)".+?class="type.+?>(.+?)<.+?class="Title".*?>(.+?)<.+?class="des".*?>(.+?)</p'
            matches = scrapertools.find_multiple_matches(response, patron)
            for url, thumb, _type, title, plot in matches:
                _type = _type.lower()
                url = urlparse.urljoin(HOST, url)
                it = Item(action="episodios",
                          contentType="tvshow",
                          channel=item.channel,
                          plot=plot,
                          thumbnail=thumb,
                          title=title,
                          url=url)
                if "película" in _type:
                    it.contentType = "movie"
                    it.contentTitle = title
                else:
                    it.contentSerieName = title
                    it.context = renumbertools.context(item)
                itemlist.append(it)
        else:
            dict_data = httptools.downloadpage(item.url, post=post).json
            for e in dict_data:
                if e["id"] != e["last_id"]:
                    _id = e["last_id"]
                else:
                    _id = e["id"]
                url = "%sanime/%s/%s" % (HOST, _id, e["slug"])
                title = e["title"]
                #if "&#039;" in title:
                #    title = title.replace("&#039;","")
                #if "&deg;" in title:
                #    title = title.replace("&deg;","")
                thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"])
                new_item = item.clone(action="episodios",
                                      title=title,
                                      url=url,
                                      thumbnail=thumbnail)
                if e["type"] != "movie":
                    new_item.contentSerieName = title
                    new_item.context = renumbertools.context(item)
                else:
                    new_item.contentType = "movie"
                    new_item.contentTitle = title
                itemlist.append(new_item)
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []

    tmdb.set_infoLabels(itemlist, seekTmdb=True)

    return itemlist
Example #32
0
def list_all(item):
    logger.info()

    itemlist = []

    if item.ar_post and not item.not_post:
        data = get_source(item.url, post=item.ar_post)
    else:
        data = get_source(item.url)
    patron = '<div class="col-6.*?href="([^"]+)".*?>(.*?)<img.*?'  #url, info
    patron += 'data-src="([^"]+)".*?<p.*?>([^<]+)</p>'  #thumb,title
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, info, scrapedthumbnail, scrapedtitle in matches:
        _type = scrapertools.find_single_match(info, '>([^<]+)</').lower()
        year = '-'
        year = scrapertools.find_single_match(scrapedtitle, '\(\d{4}\)')
        url = scrapedurl
        if not url.startswith('http'):
            url = urlparse.urljoin(host, url)
        thumbnail = scrapedthumbnail
        thumb = scrapertools.find_single_match(thumbnail, 'portadas/(.*)')
        lang = 'VOSE'
        title = scrapedtitle
        scrapedtitle = re.sub('\(.*?\)$', '', scrapedtitle).strip()
        if _type:
            title += '[COLOR darkgrey] (%s)[/COLOR]' % _type.capitalize()
        context = renumbertools.context(item)
        context2 = autoplay.context
        context.extend(context2)
        new_item = Item(channel=item.channel,
                        title=title,
                        thumbnail=thumbnail,
                        language=lang,
                        thumb=thumb,
                        infoLabels={'year': year})
        if 'pel' in _type:
            new_item.contentTitle = scrapedtitle
            new_item.action = 'findvideos'
            new_item.url = url.replace(host, '%s1/' % host)
        else:
            new_item.plot = _type.capitalize()
            new_item.contentSerieName = scrapedtitle
            new_item.context = context
            new_item.action = 'episodios'
            new_item.url = url
        itemlist.append(new_item)

        # Paginacion
    next_page = scrapertools.find_single_match(
        data, ' data-id="(\d+)" aria-label="Next">')

    if next_page != "":
        ar_post = re.sub('pinput=(\d+)&', 'pinput=%s&' % next_page,
                         item.ar_post)
        itemlist.append(
            Item(channel=item.channel,
                 action="list_all",
                 title=">> Página siguiente",
                 url=item.url,
                 ar_post=ar_post,
                 thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'))
    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    return itemlist
def mainlist(item,thumbnail_type="squares"):
    logger.info("streamondemand.channels.novedades mainlist")

    itemlist = []
    list_canales = get_list_canales()

    thumbnail_base = "http://media.tvalacarta.info/pelisalacarta/"+thumbnail_type+"/"
    thumbnail = thumbnail_base + '/disabled'

    if list_canales['peliculas']:
        thumbnail = thumbnail_base + "/thumb_canales_peliculas.png"
    new_item = Item(channel=item.channel, action="novedades", extra="peliculas", title="Film", thumbnail=thumbnail)
    new_item.context = [{"title": "Canali inclusi in: %s" %new_item.title,
                         "extra": new_item.extra,
                         "action": "settingCanal",
                         "channel": new_item.channel}]
    new_item.category = "Novità in %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['infantiles']:
        thumbnail = thumbnail_base + "/thumb_canales_infantiles.png"
    new_item = Item(channel=item.channel, action="novedades", extra="infantiles", title="Cartoni Animati", thumbnail=thumbnail)
    new_item.context = [{"title": "Canali inclusi in: %s" %new_item.title,
                         "extra": new_item.extra,
                         "action": "settingCanal",
                         "channel": new_item.channel}]
    new_item.category = "Novità in %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['series']:
        thumbnail = thumbnail_base + "/thumb_canales_series.png"
    new_item = Item(channel=item.channel, action="novedades", extra="series", title="Episodi Serie Tv", thumbnail=thumbnail)
    new_item.context = [{"title": "Canali inclusi in: %s" %new_item.title,
                         "extra": new_item.extra,
                         "action": "settingCanal",
                         "channel": new_item.channel}]
    new_item.category = "Novità in %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['anime']:
        thumbnail = thumbnail_base + "/thumb_canales_anime.png"
    new_item = Item(channel=item.channel, action="novedades", extra="anime", title="Episodi Anime", thumbnail=thumbnail)
    new_item.context = [{"title": "Canali inclusi in: %s" %new_item.title,
                         "extra": new_item.extra,
                         "action": "settingCanal",
                         "channel": new_item.channel}]
    new_item.category = "Novità in %s" % new_item.extra
    itemlist.append(new_item)

    if list_canales['documentales']:
        thumbnail = thumbnail_base + "/thumb_canales_documentales.png"
    new_item = Item(channel=item.channel, action="novedades", extra="documentales", title="Documentari", thumbnail=thumbnail)
    new_item.context = [{"title": "Canali inclusi in: %s" %new_item.title,
                         "extra": new_item.extra,
                         "action": "settingCanal",
                         "channel": new_item.channel}]
    new_item.category = "Novità in %s" % new_item.extra
    itemlist.append(new_item)

    #itemlist.append(Item(channel=item.channel, action="menu_opciones", title="Opciones", viewmode="list",
    #                     thumbnail=thumbnail_base + "/thumb_configuracion.png"))

    return itemlist
Example #34
0
def list_all(item):
    logger.info()

    itemlist = list()
    next = True
    if not item.url.startswith(host):
        item.url = host + item.url
    if item.post:
        soup = BeautifulSoup(get_source(item.url, post=item.post),
                             "html5lib",
                             from_encoding="utf-8")
    else:
        soup = create_soup(item.url, referer=host)

    matches = soup.find_all("div", class_="span-6 inner-6 tt view")

    first = item.first
    last = first + 20

    if last > len(matches):
        last = len(matches)
        next = False

    for elem in matches[first:last]:
        lang = list()
        url = elem.a["href"]
        title = elem.find("a", class_="link")["title"]
        title = re.sub(r"\..*", "", title)
        thumb = elem.img["src"]

        new_item = Item(channel=item.channel,
                        title=title,
                        url=host + url,
                        thumbnail=thumb,
                        infoLabels={})

        if '/show/' in url:
            new_item.contentSerieName = title
            new_item.action = 'seasons'
            new_item.context = filtertools.context(item, list_language,
                                                   list_quality)
        else:
            lang_data = elem.find("div", class_="left").find_all("img")
            for l in lang_data:
                if l["src"]:

                    lang.append(
                        IDIOMAS.get(
                            lang_from_flag(l["src"], "/static/style/images/",
                                           "png"), ''))

            new_item.language = lang
            new_item.contentTitle = title
            new_item.infoLabels["year"] = "-"
            new_item.action = 'findvideos'

        itemlist.append(new_item)
    tmdb.set_infoLabels_itemlist(itemlist, True)

    if next:
        url_next_page = item.url
        first = last
    else:
        try:
            url_next_page = host + soup.find("ul", id="filter").find(
                "a", class_="current").next_sibling["href"]
        except:
            url_next_page = False
            pass
        first = 0

    if url_next_page and len(matches) > 20:
        itemlist.append(
            Item(channel=item.channel,
                 title="Siguiente >>",
                 url=url_next_page,
                 action='list_all',
                 first=first))

    return itemlist
Example #35
0
def mostrar_perfil(item):
    logger.info()
    alfav = AlfavoritesData()

    itemlist = []

    i_perfil = item.i_perfil
    if not alfav.user_favorites[i_perfil]: return itemlist
    last_i = len(alfav.user_favorites[i_perfil]['items']) - 1

    ruta_runtime = config.get_runtime_path()

    for i_enlace, enlace in enumerate(alfav.user_favorites[i_perfil]['items']):
        context = []

        if i_enlace > 0:
            context.append({
                'title': 'Mover arriba del todo',
                'channel': item.channel,
                'action': 'mover_enlace',
                'i_enlace': i_enlace,
                'i_perfil': i_perfil,
                'direccion': 'top'
            })
            context.append({
                'title': 'Mover hacia arriba',
                'channel': item.channel,
                'action': 'mover_enlace',
                'i_enlace': i_enlace,
                'i_perfil': i_perfil,
                'direccion': 'arriba'
            })
        if i_enlace < last_i:
            context.append({
                'title': 'Mover hacia abajo',
                'channel': item.channel,
                'action': 'mover_enlace',
                'i_enlace': i_enlace,
                'i_perfil': i_perfil,
                'direccion': 'abajo'
            })
            context.append({
                'title': 'Mover abajo del todo',
                'channel': item.channel,
                'action': 'mover_enlace',
                'i_enlace': i_enlace,
                'i_perfil': i_perfil,
                'direccion': 'bottom'
            })

        if len(
                alfav.user_favorites
        ) > 1:  # si se tiene más de una carpeta permitir mover entre ellas
            context.append({
                'title': 'Mover a otra carpeta',
                'channel': item.channel,
                'action': 'editar_enlace_carpeta',
                'i_enlace': i_enlace,
                'i_perfil': i_perfil
            })

        context.append({
            'title': 'Cambiar título',
            'channel': item.channel,
            'action': 'editar_enlace_titulo',
            'i_enlace': i_enlace,
            'i_perfil': i_perfil
        })

        context.append({
            'title': 'Cambiar color',
            'channel': item.channel,
            'action': 'editar_enlace_color',
            'i_enlace': i_enlace,
            'i_perfil': i_perfil
        })

        context.append({
            'title': 'Cambiar thumbnail',
            'channel': item.channel,
            'action': 'editar_enlace_thumbnail',
            'i_enlace': i_enlace,
            'i_perfil': i_perfil
        })

        context.append({
            'title': 'Eliminar enlace',
            'channel': item.channel,
            'action': 'eliminar_enlace',
            'i_enlace': i_enlace,
            'i_perfil': i_perfil
        })

        it = Item().fromurl(enlace)
        it.context = context
        it.plot = '[COLOR blue]Canal: ' + it.channel + '[/COLOR][CR]' + it.plot

        # Si no es una url, ni tiene la ruta del sistema, convertir el path ya que se habrá copiado de otro dispositivo.
        # Sería más óptimo que la conversión se hiciera con un menú de importar, pero de momento se controla en run-time.
        if it.thumbnail and '://' not in it.thumbnail and not it.thumbnail.startswith(
                ruta_runtime):
            ruta, fichero = filetools.split(it.thumbnail)
            if ruta == '' and fichero == it.thumbnail:  # en linux el split con un path de windows no separa correctamente
                ruta, fichero = filetools.split(it.thumbnail.replace(
                    '\\', '/'))
            if 'channels' in ruta and 'thumb' in ruta:
                it.thumbnail = filetools.join(ruta_runtime, 'resources',
                                              'media', 'channels', 'thumb',
                                              fichero)
            elif 'themes' in ruta and 'default' in ruta:
                it.thumbnail = filetools.join(ruta_runtime, 'resources',
                                              'media', 'themes', 'default',
                                              fichero)

        itemlist.append(it)

    return itemlist