Ejemplo n.º 1
0
def parse_videos(item, tipo, data):
    logger.info()

    itemlist = []

    pattern = "<td.+?<img src='/assets/img/banderas/([^\.]+).+?</td><td.+?>(.*?)</td><td.+?" \
              "<img src='/assets/img/servidores/([^\.]+).+?</td><td.+?href='([^']+)'.+?>.*?</a></td>" \
              "<td.+?>(.*?)</td>"

    links = re.findall(pattern, data, re.MULTILINE | re.DOTALL)

    for language, date, server, link, quality in links:
        if quality == "":
            quality = "SD"
        title = "{tipo} en {server} [{idioma}] [{quality}] ({fecha})".\
            format(tipo=tipo, server=server, idioma=IDIOMAS.get(language, "OVOS"), quality=quality, fecha=date)

        itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, link), action="play",
                             show=item.show, language=IDIOMAS.get(language, "OVOS"), quality=quality,
                             list_idiomas=list_idiomas, list_calidad=CALIDADES, fulltitle=item.title,
                             context=filtertools.context))
        # context=CONTEXT+"|guardar_filtro"))

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item.channel)

    return itemlist
Ejemplo n.º 2
0
def findvideos(item):
    logger.info("url: {0}".format(item.url))

    data = httptools.downloadpage(item.url).data

    expr = 'mtos'                                                 + '.+?'+ \
           '<div.+?images/(?P<lang>[^\.]+)'                       + '.+?'+ \
           '<div[^>]+>\s+(?P<date>[^\s<]+)'                       + '.+?'+ \
           '<div.+?img.+?>\s*(?P<server>.+?)</div>'               + '.+?'+ \
           '<div.+?href="(?P<url>[^"]+).+?images/(?P<type>[^\.]+)' + '.+?'+ \
           '<div[^>]+>\s*(?P<quality>.*?)</div>'                  + '.+?'+ \
           '<div.+?<a.+?>(?P<uploader>.*?)</a>'

    links = re.findall(expr, data, re.MULTILINE | re.DOTALL)

    itemlist = [item.clone(
                     action    = "play",
                     title     = "{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".format(linkType="Ver" if linkType != "descargar" else "Descargar",
                                                                                                           lang=IDIOMAS.get(lang, lang),
                                                                                                           date=date,
                                                                                                           server=server.rstrip(),
                                                                                                           quality=quality,
                                                                                                           uploader=uploader),
                     url       = urlparse.urljoin(HOST, url),
                     language  = IDIOMAS.get(lang, lang),
                     quality   = quality,
                     list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context
                ) for lang, date, server, url, linkType, quality, uploader in links ]

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item.channel)

    return itemlist
Ejemplo n.º 3
0
def episodios(item):
    logger.info("{0} - {1}".format(item.title, item.url))

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    fanart = scrapertools.find_single_match(data, "background-image[^'\"]+['\"]([^'\"]+)")
    plot = scrapertools.find_single_match(data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>")

    logger.debug("fanart: {0}".format(fanart))
    logger.debug("plot: {0}".format(plot))


    episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data, re.MULTILINE | re.DOTALL)
    for url, title, flags in episodes:
        idiomas = " ".join(["[{0}]".format(IDIOMAS.get(language, "OVOS")) for language in re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
        displayTitle = "{show} - {title} {languages}".format(show = item.show, title = title, languages = idiomas)
        logger.debug("Episode found {0}: {1}".format(displayTitle, urlparse.urljoin(HOST, url)))
        itemlist.append(item.clone(title=displayTitle, url=urlparse.urljoin(HOST, url),
                                   action="findvideos", plot=plot, fanart=fanart, language=idiomas,
                                   list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context))

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item.channel)

    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(item.clone(title="Añadir esta serie a la biblioteca", action="add_serie_to_library", extra="episodios"))

    return itemlist
Ejemplo n.º 4
0
def episodios(item):
    logger.info("url: {0}".format(item.url))

    data = httptools.downloadpage(item.url).data

    episodes = re.findall('visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>', data, re.MULTILINE | re.DOTALL)

    itemlist = []
    for url, title, langs in episodes:
        languages = " ".join(["[{0}]".format(IDIOMAS.get(lang, lang)) for lang in re.findall('images/s-([^\.]+)', langs)])
        itemlist.append(item.clone(action      = "findvideos",
                                   title       = "{0} {1} {2}".format(item.title, title, languages),
                                   url         = urlparse.urljoin(HOST, url),
                                   language    = languages,
                                   list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context
                            ))

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item.channel)

    # Opción "Añadir esta serie a la biblioteca de XBMC"
    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(item.clone(title="Añadir esta serie a la biblioteca", action="add_serie_to_library", extra="episodios"))

    return itemlist
Ejemplo n.º 5
0
def parseVideos(item, typeStr, data):
    videoPatternsStr = [
        '<tr.+?<span>(?P<date>.+?)</span>.*?banderas/(?P<language>[^\.]+).+?href="(?P<link>[^"]+).+?servidores/'
        '(?P<server>[^\.]+).*?</td>.*?<td>.*?<span>(?P<uploader>.+?)</span>.*?<span>(?P<quality>.*?)</span>',
        '<tr.+?banderas/(?P<language>[^\.]+).+?<td[^>]*>(?P<date>.+?)</td>.+?href=[\'"](?P<link>[^\'"]+)'
        '.+?servidores/(?P<server>[^\.]+).*?</td>.*?<td[^>]*>.*?<a[^>]+>(?P<uploader>.+?)</a>.*?</td>.*?<td[^>]*>'
        '(?P<quality>.*?)</td>.*?</tr>'
    ]

    for vPatStr in videoPatternsStr:
        vPattIter = re.compile(vPatStr, re.MULTILINE | re.DOTALL).finditer(data)

        itemlist = []

        for vMatch in vPattIter:
            vFields = vMatch.groupdict()
            quality = vFields.get("quality")
            if not quality:
                quality = "SD"

            title = "{0} en {1} [{2}] [{3}] ({4}: {5})"\
                .format(typeStr, vFields.get("server"), IDIOMAS.get(vFields.get("language"), "OVOS"), quality,
                        vFields.get("uploader"), vFields.get("date"))
            itemlist.append(item.clone(title=title, fulltitle=item.title, url=urlparse.urljoin(HOST, vFields.get("link")),
                                       action="play", language=IDIOMAS.get(vFields.get("language"), "OVOS"),
                                       quality=quality, list_idiomas=list_idiomas, list_calidad=CALIDADES,
                                       context=filtertools.context))

        if len(itemlist) > 0 and filtertools.context:
            itemlist = filtertools.get_links(itemlist, item.channel)

        if len(itemlist) > 0:
            return itemlist

    return []
Ejemplo n.º 6
0
def episodios(item):
    logger.info()

    itemlist = []

    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    data = re.sub(r"<!--.*?-->", "", data)

    data = re.sub(r"a> <img src=/assets/img/banderas/", "a><idioma>", data)
    data = re.sub(r"<img src=/assets/img/banderas/", "|", data)
    data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>\s+<", "</idioma><", data)
    data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>", "", data)

    patron = '<div id="T1".*?'
    patron += "<img src='([^']+)'"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        thumbnail = matches[0]

    patron = "<a href='([^']+)'>(.*?)</a><idioma>(.*?)</idioma>"
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedidioma in matches:
        idioma = ""
        for i in scrapedidioma.split("|"):
            idioma += " [" + IDIOMAS.get(i, "OVOS") + "]"
        title = scrapedtitle + idioma

        itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl),
                             action="findvideos", show=item.show, thumbnail=thumbnail, plot="", language=idioma,
                             list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context))

    if len(itemlist) > 0 and filtertools.context:
            itemlist = filtertools.get_links(itemlist, item.channel)

    # Opción "Añadir esta serie a la biblioteca de XBMC"
    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url,
                             action="add_serie_to_library", extra="episodios", show=item.show))

    return itemlist
Ejemplo n.º 7
0
def findvideos(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = "<option value=(.*?) data-content=.*?width='16'> (.*?) <span class='text-muted'>"
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, language in matches:
        if 'jpg' in scrapedurl:
            vip_data = httptools.downloadpage(scrapedurl,
                                              follow_redirects=False)
            scrapedurl = vip_data.headers['location']
        title = '%s [%s]'
        itemlist.append(
            item.clone(title=title,
                       url=scrapedurl.strip(),
                       action='play',
                       language=IDIOMAS[language]))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 8
0
def findvideos(item):
    logger.info()

    itemlist = list()
    # soup = create_soup(item.url).find("ul", class_="TPlayerNv").find_all("li")
    soup, matches = AlfaChannel.get_video_options(item.url)

    infoLabels = item.infoLabels

    for btn in matches:
        opt = btn["data-tplayernv"]
        srv = btn.span.text.lower()
        if "opci" in srv.lower():
            # srv = "okru"
            continue
        itemlist.append(
            Item(channel=item.channel,
                 title=srv,
                 url=item.url,
                 action='play',
                 server=srv,
                 opt=opt,
                 language='LAT',
                 infoLabels=infoLabels))

    itemlist = sorted(itemlist, key=lambda i: i.server)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_idiomas,
                                     list_quality)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 9
0
def findvideos(item):
    logger.info()
    itemlist = []
    video_list = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    patron_language = '(<ul id=level\d_.*?\s*class=.*?ul>)'
    matches = re.compile(patron_language, re.DOTALL).findall(data)

    for language in matches:
        video_list.extend(get_links_by_language(item, language))

    video_list = servertools.get_servers_itemlist(
        video_list, lambda i: i.title %
        (i.server.capitalize(), i.language, i.quality))
    # Requerido para FilterTools

    video_list = filtertools.get_links(video_list, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(video_list, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(video_list) > 0 and item.extra != 'findvideos':
            video_list.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return video_list
Ejemplo n.º 10
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data

    from core import servertools
    itemlist.extend(servertools.find_video_items(data=data))
    if item.language == 'Espa&ntilde;ol':
        item.language == 'Español'
    for videoitem in itemlist:
        videoitem.language = IDIOMAS[item.language]
        videoitem.title = item.contentTitle + ' (' + videoitem.server + ') (' + videoitem.language + ')'
        videoitem.channel = item.channel
        videoitem.folder = False
        videoitem.extra = item.thumbnail
        videoitem.fulltitle = item.title
        videoitem.quality = 'default'

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_library_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
Ejemplo n.º 11
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url, unescape=True)

    sources = soup.find("div", class_="playex")
    langs = soup.find("ul", class_="idTabs sourceslist")

    for elem in sources.find_all("div", class_="play-box-iframe"):

        opt = elem["id"]
        url = elem.iframe["src"]
        if "hideload" in url:
            url = unhideload(url)

        lang_data = langs.find("a", href=re.compile(r"^#%s" % opt))
        lang = languages_from_flags(lang_data.span, "png")
        itemlist.append(Item(channel=item.channel, title='%s', action='play', url=url,
                             language=lang, infoLabels=item.infoLabels))
    itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                             url=item.url, action="add_pelicula_to_library", extra="findvideos",
                             contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 12
0
def findvideos(item):
    import base64
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
    patron  = ';extra_urls\[\d+\]=\'([^\']+)\''
    matches = re.compile(patron,re.DOTALL).findall(data)
    for scrapedurl in matches:
        url = base64.b64decode(scrapedurl)
        if "strdef" in url: 
            url = decode_url(url)
            if "strdef" in url:
                url = httptools.downloadpage(url).url
        elif "vcdn." in url:
            server = "fembed"

###################################### ES FEMBED

        # elif "vcdn" in url:
            # url = url.replace("https://vcdn.pw/v/", "https://vcdn.pw/api/source/")
            # post = "r=&d=vcdn.pw"
            # data1 = httptools.downloadpage(url, post=post).data
            # scrapedurl = scrapertools.find_single_match(data1,'"file":"([^"]+)"')
            # url = scrapedurl.replace("\/", "/")
            # url = httptools.downloadpage(url).url
##########################################
            itemlist.append(item.clone(action="play", server= server, url=url))
        # itemlist.append(item.clone(action="play", title="%s", url=url))
    # itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) 


    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
Ejemplo n.º 13
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    if "Próximamente" in data:
        itemlist.append(Item(channel = item.channel, title = "Próximamente"))
        return itemlist
    patron  = 'data-link="([^"]+).*?'
    patron += '>([^<]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, calidad in matches:
        calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+")
        itemlist.append(item.clone(
                             channel = item.channel,
                             action = "play",
                             title = calidad,
                             thumbnail = item.thumbnail,
                             contentThumbnail = item.thumbnail,
                             url = url,
                             language = IDIOMAS['Latino']
                             ))
    itemlist = servertools.get_servers_itemlist(itemlist)
    tmdb.set_infoLabels(itemlist, seekTmdb = True)
    itemlist.append(Item(channel=item.channel))
    if config.get_videolibrary_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                             action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                             contentTitle = item.contentTitle
                             ))
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)
    return itemlist
Ejemplo n.º 14
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'\n|\r|\t|&nbsp;|<br>||<br/>', "", data)
    data = scrapertools.find_single_match(data, 'var videos =(.*?)\}')
    patron = 'src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for url in matches:
        url = url.replace("cloud/index.php", "cloud/query.php")
        if "/player.php" in url:
            data = httptools.downloadpage(url).data
            phantom = scrapertools.find_single_match(
                data, 'Phantom.Start\("(.*?)"\)')
            phantom = phantom.replace('"+"', '')
            import base64
            packed = base64.b64decode(phantom)
            unpacked = jsunpack.unpack(packed)
            url = scrapertools.find_single_match(unpacked, '"src","([^"]+)"')
            if not url.startswith("https"):
                url = "https:%s" % url
        itemlist.append(
            Item(channel=item.channel,
                 title="%s",
                 url=url,
                 action='play',
                 language='VO',
                 contentTitle=item.contentTitle))
    itemlist = servertools.get_servers_itemlist(itemlist,
                                                lambda x: x.title % x.server)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 15
0
def episodesxseason(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find_all("tbody")
    infoLabels = item.infoLabels
    for elem in soup:
        for el in elem.find_all("tr"):
            language = list()
            try:
                season_episode = scrapertools.find_single_match(
                    el.a.text, "(\d+x\d+)").split("x")
                title = "Episodio %s" % season_episode[1]
                if int(season_episode[0]) != item.infoLabels["season"]:
                    continue
                url = el.a["href"]
                for lang in el.find_all("img"):
                    language.append(IDIOMAS[scrapertools.find_single_match(
                        lang["src"], "language/([^\.]+)\.png")])
                infoLabels["episode"] = season_episode[1]

                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=url,
                         action="findvideos",
                         language=language,
                         infoLabels=infoLabels))
            except:
                pass

    itemlist = filtertools.get_links(itemlist, item, list_idiomas,
                                     list_quality)
    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Ejemplo n.º 16
0
def findvideos(item):
    logger.info()

    itemlist = list()
    soup = create_soup(item.url).find("ul", class_="TPlayerNv").find_all("li")
    infoLabels = item.infoLabels
    for btn in soup:
        opt = btn["data-tplayernv"]
        info = btn.find_all("span")
        srv = info[0].text
        lang = info[1].text[2]

        if "opci" in srv.lower():
            # srv = "okru"
            continue
        itemlist.append(
            Item(channel=item.channel,
                 title=srv,
                 url=item.url,
                 action='play',
                 server=srv,
                 opt=opt,
                 language=IDIOMAS.get(lang, "VOSE"),
                 infoLabels=infoLabels))

    itemlist = sorted(itemlist, key=lambda i: i.server)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 17
0
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url, encoding=encoding, canonical=canonical).data
    matches = scrapertools.find_multiple_matches(data, 'jeg_video_container.*?src="([^"]+)' )
    for url in matches:
        itemlist.append(Item(
                        channel=item.channel,
                        contentTitle=item.contentTitle,
                        contentThumbnail=item.thumbnail,
                        infoLabels=item.infoLabels,
                        language="Latino",
                        title='%s', action="play",
                        url=url
                       ))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if itemlist and item.contentChannel != "videolibrary":
        itemlist.append(Item(channel=item.channel))
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))

        # Opción "Añadir esta película a la videoteca de KODI"
        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                 action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                 contentTitle = item.contentTitle
                                 ))
    return itemlist
Ejemplo n.º 18
0
def findvideos(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)

    itemlist.extend(servertools.find_video_items(data=data))

    for videoitem in itemlist:
        videoitem.channel = item.channel
        videoitem.language = IDIOMA['latino']
        videoitem.title = '[%s] [%s]' % (videoitem.server, videoitem.language)
        videoitem.infoLabels = item.infoLabels

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType == 'movie':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 19
0
def findvideos(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    #return
    patron = r'video\[\d+\] = \'<iframe.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl in matches:

        if host in scrapedurl:
            new_data = get_source(scrapedurl)
            scrapedurl = scrapertools.find_single_match(new_data, "'file':'([^']+)")
            if not scrapedurl:
                scrapedurl = scrapertools.find_single_match(new_data, r'var shareId\s*=\s*"([^"]+)')
                scrapedurl = 'https://www.amazon.com/drive/v1/shares/%s' % scrapedurl

        if scrapedurl != '':
            itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play', language = item.language,
                                       infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 20
0
def findvideos(item):
    logger.info()
    srv_list = {
        'OpRu': 'okru',
        'NeoPen-O-GD': 'netutv',
        'mp4': 'mp4upload',
        'CloudVid': 'gounlimited',
    }
    itemlist = list()
    soup = create_soup(item.url).find("ul",
                                      class_="new-servers").find_all("li")
    infoLabels = item.infoLabels
    for btn in soup:
        opt = btn["data-sv"]
        srv = srv_list.get(opt, 'directo')
        id_opt = btn["id"]
        itemlist.append(
            Item(channel=item.channel,
                 url=item.url,
                 action='play',
                 server=srv,
                 id_opt=id_opt,
                 language='LAT',
                 title=srv,
                 infoLabels=infoLabels))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_idiomas,
                                     list_quality)
    itemlist = sorted(itemlist, key=lambda i: i.server)
    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 21
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
    patron = 'onClick="toplay\((.*?)\).*?>Mirror'
    matches = re.compile(patron,re.DOTALL).findall(data)
    for url in matches:
        url = url.replace("'", "").split(",")
        url = "http://www.veporns.com/ajax.php?page=video_play&thumb=%s&theme=%s&video=%s&id=%s&catid=%s&tip=%s&server=%s" %(url[0],url[1],url[2],url[3],url[4],url[5],str(url[6]))
        headers = {"X-Requested-With":"XMLHttpRequest"}
        data = httptools.downloadpage(url, headers=headers).data
        logger.debug(data)
        url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
        if not url:
            url = scrapertools.find_single_match(data, "<iframe src='([^']+)'")
      
        itemlist.append(item.clone(action="play", title= "%s", contentTitle= item.title, url=url))
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
Ejemplo n.º 22
0
def findvideos(item):
    logger.info("[animeworld.py] findvideos")

    itemlist = []

    anime_id = scrapertoolsV2.find_single_match(item.url, r'.*\..*?\/(.*)')
    data = httptools.downloadpage(host + "/ajax/episode/serverPlayer?id=" +
                                  anime_id).data
    patron = '<source src="([^"]+)"'

    matches = re.compile(patron, re.DOTALL).findall(data)
    for video in matches:
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
                 url=video,
                 server='directo',
                 contentType=item.contentType,
                 folder=False))

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 23
0
def findvideos(item):
    logger.info()

    itemlist = list()
    content_id = scrapertools.find_single_match(item.url, "/(\d+)")

    soup = create_soup(item.url)
    url = soup.find("a", href=re.compile("magnet:"))["href"]
    itemlist.append(Item(channel=item.channel, title="Torrent", url=url, server="torrent", action="play",
                         language="LAT", infoLabels=item.infoLabels))

    strm_url = "%swp-json/elifilms/movies/?id=%s" % (host, content_id)

    json_data = httptools.downloadpage(strm_url).json
    for v_data in json_data["data"]["server_list"]:
        url = v_data["link"]
        server = v_data["name"].lower()

        itemlist.append(Item(channel=item.channel, title=server.capitalize(), url=url, server=server, action="play",
                             language="LAT", infoLabels=item.infoLabels))

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType == 'movie':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                     url=item.url, action="add_pelicula_to_library", extra="findvideos",
                     contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 24
0
def findvideos(item):
    logger.info("%s = %s" % (item.show, item.url))

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    # logger.info(data)

    online = extract_videos_section(data)
    try:
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_enlaces = 2

    list_links = []

    if filtro_enlaces != 0:
        list_links.extend(parse_videos(item, "Ver", online[-2]))

    if filtro_enlaces != 1:
        list_links.extend(parse_videos(item, "Descargar", online[-1]))
    list_links = filtertools.get_links(list_links, item, list_idiomas,
                                       CALIDADES)

    for i in range(len(list_links)):
        a = list_links[i].title
        b = a[a.find("en") + 2:]
        c = b.split('[')
        d = c[0].rstrip()
        d = d.lstrip()
        list_links[i].server = d.replace("streamix", "streamixcloud")
        list_links[i].server = d.replace("uploaded", "uploadedto")

    list_links = servertools.get_servers_itemlist(list_links)
    autoplay.start(list_links, item)

    return list_links
Ejemplo n.º 25
0
def findvideos(item):

    itemlist = []
    data=get_source(item.url)
    patron = '<a href=(/reproductor.*?)target'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for link in matches:
        video_data = get_source(host+link)
        language = ''
        if 'latino' in link.lower():
            language='Latino'
        elif 'español' in link.lower():
            language = 'Español'
        elif 'subtitulado' in link.lower():
            language = 'VOSE'
        elif 'vo' in link.lower():
            language = 'VO'

        url = scrapertools.find_single_match(video_data, '<iframe src=(.*?) scrolling')
        title = '%s [%s]'

        itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,
                             infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 26
0
def findvideos(item):
    logger.info("url: %s" % item.url)

    data = httptools.downloadpage(item.url).data

    expr = 'mtos' + '.+?' + \
           '<div.+?images/(?P<lang>[^\.]+)' + '.+?' + \
           '<div[^>]+>\s+(?P<date>[^\s<]+)' + '.+?' + \
           '<div.+?img.+?>\s*(?P<server>.+?)</div>' + '.+?' + \
           '<div.+?href="(?P<url>[^"]+).+?images/(?P<type>[^\.]+)' + '.+?' + \
           '<div[^>]+>\s*(?P<quality>.*?)</div>' + '.+?' + \
           '<div.+?<a.+?>(?P<uploader>.*?)</a>'

    links = re.findall(expr, data, re.MULTILINE | re.DOTALL)

    itemlist = [
        item.clone(
            action="play",
            title=
            "{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".
            format(linkType="Ver" if linkType != "descargar" else "Descargar",
                   lang=IDIOMAS.get(lang, lang),
                   date=date,
                   server=server.rstrip(),
                   quality=quality,
                   uploader=uploader),
            url=urlparse.urljoin(HOST, url),
            language=IDIOMAS.get(lang, lang),
            quality=quality,
        ) for lang, date, server, url, linkType, quality, uploader in links
    ]

    itemlist = filtertools.get_links(itemlist, item.channel, list_idiomas,
                                     CALIDADES)

    return itemlist
Ejemplo n.º 27
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)
    matches = soup.find_all("ul", class_="menuPlayer")

    for elem in matches:
        lang = re.sub("servidores-", '', elem["id"])
        for opt in elem.find_all("li", class_="option"):
            server = re.sub(r'(ver o [\w]+) en ', '', opt["title"].lower())
            if server == "google drive":
                server = "gvideo"
            if "publicidad" in server:
                continue
            url = opt.a["href"]

            itemlist.append(Item(channel=item.channel, title=server.capitalize(), url=url, server=server, action="play",
                                 language=IDIOMAS.get(lang, 'LAT')))

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType == 'movie':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                     url=item.url, action="add_pelicula_to_library", extra="findvideos",
                     contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 28
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
    data = scrapertools.find_single_match(
        data, '<div class="entry-content post_content">(.*?)</div>')
    patron = '<(?:iframe src|IFRAME SRC|a href)="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for url in matches:
        if not "0load" in url:  #NETU
            itemlist.append(
                item.clone(action="play",
                           title="%s",
                           contentTitle=item.title,
                           url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
Ejemplo n.º 29
0
def findvideos(item):
    logger.info()
    itemlist = list()
    data = create_soup(item.url)
    video_urls = data.find("aside", class_="video-player").find_all('iframe')
    info = data.find("aside", class_="video-options").find_all('li')
    for url, info in zip(video_urls, info):
        url = url['data-src']
        info = info.find('span', class_='server').text.split('-')
        srv = info[0].strip()
        lang = info[1].strip()
        infoLabels = item.infoLabels
        lang = IDIOMAS.get(lang, lang)
        # quality = info[1]
        quality = ""
        itemlist.append(Item(channel=item.channel, title=srv, url=url, action='play', server=srv, opt="1",
                            infoLabels=infoLabels, language=lang, quality=quality))
    
    # downlist = get_downlist(item, data)  #DESCARGAS ACORTADOR
    # itemlist.extend(downlist)
    
    itemlist = sorted(itemlist, key=lambda i: (i.language, i.server))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos' and not item.contentSerieName:
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                             url=item.url, action="add_pelicula_to_library", extra="findvideos",
                             contentTitle=item.contentTitle))
    return itemlist
Ejemplo n.º 30
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup, matches = AlfaChannel.get_video_options(item.url)

    for elem in matches:
        data = AlfaChannel.get_data_by_post(elem).json
        itemlist.append(
            Item(channel=item.channel,
                 title='%s',
                 url=data['embed_url'],
                 action='play'))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
Ejemplo n.º 31
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
    patron = ' - on ([^"]+)" href="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedtitle, url in matches:
        # if "streamz" in url:
        # url = url.replace("streamz.cc", "stream2.vg").replace("streamz.vg", "stream2.vg")
        if not "vidup" in url and not "vev.io/" in url:
            itemlist.append(
                item.clone(action="play",
                           title="%s",
                           contentTitle=item.title,
                           url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
Ejemplo n.º 32
0
def findvideos(item):
    logger.info()

    itemlist = list()
    soup = create_soup(item.url)

    term_id = soup.find("div", class_="VideoPlayer")

    bloq = soup.find("ul", class_="ListOptions")
    for elem in bloq.find_all("li"):
        url = "https://seriesflix.co/?trembed=%s&trid=%s&trtype=2" % (
            elem["data-key"], elem["data-id"])
        server = elem.find("p", class_="AAIco-dns").text
        if server.lower() == "embed":
            server = "Mystream"
        lang = elem.find("p", class_="AAIco-language").text
        qlty = elem.find("p", class_="AAIco-equalizer").text
        title = "%s [%s]" % (server, lang)
        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=url,
                 action='play',
                 language=IDIOMAS.get(lang, lang),
                 infoLabels=item.infoLabels,
                 server=server))

    #Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    #Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 33
0
def findvideos(item):
    logger.info()
    itemlist = []
    soup = create_soup(item.url)
    matches = soup.find_all('li', class_='OptionBx')
    serv=[]
    for elem in matches:
        num= elem['data-key']
        id= elem['data-id']
        lang= elem.find('p', class_='AAIco-language').text.split()
        server =  elem.find('p', class_='AAIco-dns').text.strip()
        # quality = elem.find('p', class_='AAIco-equalizer').text.split()
        # quality = quality[-1]
        lang = lang[-1]
        lang = IDIOMAS.get(lang, lang)
        server = SERVER.get(server, server)
        url = "%s//?trembed=%s&trid=%s&trtype=1"  %  (host,num,id)
        if not config.get_setting('unify') and not channeltools.get_channel_parameters(__channel__)['force_unify']:
            title = "[%s] [COLOR darkgrey][%s][/COLOR]" %(server,lang)
        else:
            title = server
        if not "gounlimited" in server:
            itemlist.append(item.clone(action="play", title=title, url=url, server=server, language=lang ))

    itemlist.sort(key=lambda it: (it.language))

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
        itemlist.append(item.clone(action="add_pelicula_to_library", 
                             title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                             extra="findvideos", contentTitle=item.contentTitle)) 
    return itemlist
Ejemplo n.º 34
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url, forced_proxy_opt=forced_proxy_opt)
    matches = soup.find("div", class_="players").find_all("iframe")

    for elem in matches:
        url = elem["data-src"]
        itemlist.append(Item(channel=item.channel, title="%s", url=url, action="play",
                        language="LAT", infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(itemlist)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 35
0
def findvideos(item):
    logger.info()
    srv_list = {'opru': 'okru', 'neopen-o-gd': 'netutv', 'mp4': 'mp4upload','streamnormal': 'netutv',
                'cloudvid': 'gounlimited', 'streamonly': 'uqload', 'uploadyour': 'yourupload',
                'embedfe': 'fembed', 'loadjet': 'uqload', 'loaduq': 'uqload', 'unlimitedgo': 'gounlimited'}
    itemlist = list()
    soup = create_soup(item.url).find("ul", class_="new-servers").find_all("li")    
    infoLabels = item.infoLabels
    for btn in soup:
        opt = btn["data-sv"]
        srv = srv_list.get(opt.lower(), 'directo').capitalize()
        id_opt = btn["id"]
        itemlist.append(Item(channel=item.channel, url=item.url, action='play', server=srv, id_opt=id_opt, language='LAT',
                        title=srv, infoLabels=infoLabels))
    
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality)
    itemlist = sorted(itemlist, key=lambda i: i.server)
    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
Ejemplo n.º 36
0
def episodesxseason(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = '<li class=series-cat><a target=_blank href=(.*?) class.*?'
    patron += 'title=.*?;(\d+).*?<span>(.*?)</span><span class=flags(.*?)</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    infoLabels = item.infoLabels
    for scrapedurl, scrapedepi, scrapedtitle, lang_data in matches:
        language = []
        if 'lang-lat>' in lang_data:
            language.append('Lat')
        if 'lang-spa>' in lang_data:
            language.append('Cast')
        if 'lang-engsub>' in lang_data:
            language.append('VOSE')
        if 'lang-eng>' in lang_data:
            language.append('VO')

        title = '%sx%s - %s %s' % (infoLabels['season'], scrapedepi,
                                   scrapedtitle, language)
        infoLabels['episode'] = scrapedepi
        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=scrapedurl,
                 action='findvideos',
                 language=language,
                 infoLabels=infoLabels))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    itemlist = filtertools.get_links(itemlist, item, list_language)
    return itemlist
Ejemplo n.º 37
0
def episodios(item):
    logger.info()

    itemlist = []

    # Descarga la página
    data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_HEADERS, host=CHANNEL_HOST)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    data = re.sub(r"<!--.*?-->", "", data)
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    data = re.sub(r"a></td><td> <img src=/banderas/", "a><idioma/", data)
    data = re.sub(r"<img src=/banderas/", "|", data)
    data = re.sub(r"\s\|", "|", data)
    data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+><", "/idioma><", data)
    data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>", "", data)

    patron = "<img id='port_serie' src='([^']+)'.*?<li data-content=\"settings\"><p>(.*?)</p>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    thumbnail = ""
    plot = ""

    for scrapedthumbnail, scrapedplot in matches:
        thumbnail = scrapedthumbnail

        # en algunas series aparece una tabla e información de bones, se controla el posible error
        plot = scrapertools.entitiesfix(scrapertools.htmlclean(scrapedplot))
        if "Informaci&oacute;n de Bones" in plot:
            plot = plot.replace("Informaci&oacute;n de Bones", "")

    '''
    <td>
        <a href='/serie/534/temporada-1/capitulo-00/the-big-bang-theory.html'>1x00 - Capitulo 00 </a>
    </td>
    <td>
        <img src=/banderas/vo.png border='0' height='15' width='25' />
        <img src=/banderas/vos.png border='0' height='15' width='25' />
    </td>
    '''

    patron = "<a href='([^']+)'>([^<]+)</a><idioma/([^/]+)/idioma>"

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedidioma in matches:
        idioma = ""
        for i in scrapedidioma.split("|"):
            idioma += " [" + IDIOMAS.get(i, "OVOS") + "]"
        title = item.show + " - " + scrapedtitle + idioma
        itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl),
                             action="findvideos", show=item.show, thumbnail=thumbnail, plot=plot, language=idioma,
                             list_idiomas=list_idiomas, list_calidad=CALIDADES, context=filtertools.context))

    if len(itemlist) == 0 and "<title>404 Not Found</title>" in data:
        itemlist.append(Item(channel=item.channel, title="la url '" + item.url +
                                                         "' parece no estar disponible en la web. Iténtalo más tarde.",
                             url=item.url, action="series"))

    if len(itemlist) > 0 and filtertools.context:
        itemlist = filtertools.get_links(itemlist, item.channel)

    # Opción "Añadir esta serie a la biblioteca de XBMC"
    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url,
                             action="add_serie_to_library", extra="episodios", show=item.show))

    return itemlist