コード例 #1
0
ファイル: channelselector.py プロジェクト: myarchives/tes1
def getmainlist(view="thumb_"):
    logger.info()
    itemlist = list()

    if config.dev_mode():
        itemlist.append(
            Item(title="Redirect",
                 channel="checkhost",
                 action="check_channels",
                 thumbnail='',
                 category=config.get_localized_string(30119),
                 viewmode="thumbnails"))
    # Añade los canales que forman el menú principal
    if addon.getSetting('enable_news_menu') == "true":
        # itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist",
        #                     thumbnail=get_thumb("news.png", view),
        #                     category=config.get_localized_string(30119), viewmode="thumbnails",
        #                     context=CONTEXT + [{"title": config.get_localized_string(70285), "channel": "news", "action": "menu_opciones","goto": True}]))
        itemlist.append(
            Item(title=config.get_localized_string(30130),
                 channel="news",
                 action="mainlist",
                 thumbnail=get_thumb("news.png", view),
                 category=config.get_localized_string(30119),
                 viewmode="thumbnails",
                 context=[{
                     "title": config.get_localized_string(70285),
                     "channel": "shortcuts",
                     "action": "SettingOnPosition",
                     "category": 5
                 }]))

    if addon.getSetting('enable_channels_menu') == "true":
        itemlist.append(
            Item(title=config.get_localized_string(30118),
                 channel="channelselector",
                 action="getchanneltypes",
                 thumbnail=get_thumb("channels.png", view),
                 view=view,
                 category=config.get_localized_string(30119),
                 viewmode="thumbnails"))

    if addon.getSetting('enable_search_menu') == "true":
        # itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist",
        #                     thumbnail=get_thumb("search.png", view),
        #                     category=config.get_localized_string(30119), viewmode="list",
        #                     context = CONTEXT + [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"},
        #                                          {"title": config.get_localized_string(70286), "action": "settings", "channel": "search"}]))
        itemlist.append(
            Item(title=config.get_localized_string(30103),
                 channel="search",
                 path='special',
                 action="mainlist",
                 thumbnail=get_thumb("search.png", view),
                 category=config.get_localized_string(30119),
                 viewmode="list",
                 context=[{
                     "title": config.get_localized_string(60412),
                     "action": "setting_channel_new",
                     "channel": "search"
                 }, {
                     "title": config.get_localized_string(70286),
                     "channel": "shortcuts",
                     "action": "SettingOnPosition",
                     "category": 3
                 }]))

    if addon.getSetting('enable_onair_menu') == "true":
        itemlist.append(
            Item(channel="filmontv",
                 action="mainlist",
                 title=config.get_localized_string(50001),
                 thumbnail=get_thumb("on_the_air.png"),
                 viewmode="thumbnails"))

    if addon.getSetting('enable_link_menu') == "true":
        itemlist.append(
            Item(title=config.get_localized_string(70527),
                 channel="kodfavorites",
                 action="mainlist",
                 thumbnail=get_thumb("mylink.png", view),
                 view=view,
                 category=config.get_localized_string(70527),
                 viewmode="thumbnails"))

    if addon.getSetting('enable_fav_menu') == "true":
        itemlist.append(
            Item(title=config.get_localized_string(30102),
                 channel="favorites",
                 action="mainlist",
                 thumbnail=get_thumb("favorites.png", view),
                 category=config.get_localized_string(30102),
                 viewmode="thumbnails"))

    if config.get_videolibrary_support() and addon.getSetting(
            'enable_library_menu') == "true":
        # itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist",
        #                      thumbnail=get_thumb("videolibrary.png", view),
        #                      category=config.get_localized_string(30119), viewmode="thumbnails",
        #                      context=CONTEXT + [{"title": config.get_localized_string(70287), "channel": "videolibrary",
        #                                "action": "channel_config"}]))
        itemlist.append(
            Item(title=config.get_localized_string(30131),
                 channel="videolibrary",
                 action="mainlist",
                 thumbnail=get_thumb("videolibrary.png", view),
                 category=config.get_localized_string(30119),
                 viewmode="thumbnails",
                 context=[{
                     "title": config.get_localized_string(70287),
                     "channel": "shortcuts",
                     "action": "SettingOnPosition",
                     "category": 2
                 }, {
                     "title": config.get_localized_string(60568),
                     "channel": "videolibrary",
                     "action": "update_videolibrary"
                 }]))
    if downloadenabled != "false":
        # itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist",
        #                     thumbnail=get_thumb("downloads.png", view), viewmode="list",
        #                     context=CONTEXT + [{"title": config.get_localized_string(70288), "channel": "setting", "config": "downloads",
        #                             "action": "channel_config"}]))
        itemlist.append(
            Item(title=config.get_localized_string(30101),
                 channel="downloads",
                 action="mainlist",
                 thumbnail=get_thumb("downloads.png", view),
                 viewmode="list",
                 context=[{
                     "title": config.get_localized_string(70288),
                     "channel": "shortcuts",
                     "action": "SettingOnPosition",
                     "category": 4
                 }]))

    thumb_setting = "setting_%s.png" % 0  # config.get_setting("plugin_updates_available")

    # itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist",
    #                      thumbnail=get_thumb(thumb_setting, view),
    #                      category=config.get_localized_string(30100), viewmode="list"))
    itemlist.append(
        Item(title=config.get_localized_string(30100),
             channel="setting",
             action="settings",
             thumbnail=get_thumb(thumb_setting, view),
             category=config.get_localized_string(30100),
             viewmode="list"))
    itemlist.append(
        Item(title=config.get_localized_string(30104) + " (v" +
             config.get_addon_version(with_fix=True) + ")",
             channel="help",
             action="mainlist",
             thumbnail=get_thumb("help.png", view),
             category=config.get_localized_string(30104),
             viewmode="list"))
    return itemlist
コード例 #2
0
def episodios(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    # obtener el numero total de episodios
    total_episode = 0

    patron_caps = '<li><span>Capitulo (\d+).*?</span><a href="(.*?)">(.*?)</a></li>'
    matches = scrapertools.find_multiple_matches(data, patron_caps)
    patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
    scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(
        data, patron_info)
    scrapedthumbnail = host + scrapedthumbnail

    for cap, link, name in matches:

        title = ""
        pat = "/"
        if "Mike, Lu & Og" == item.title:
            pat = "&/"
        if "KND" in item.title:
            pat = "-"
        # varios episodios en un enlace
        if len(name.split(pat)) > 1:
            i = 0
            for pos in name.split(pat):
                i = i + 1
                total_episode += 1
                season, episode = renumbertools.numbered_for_tratk(
                    item.channel, item.contentSerieName, 1, total_episode)
                if len(name.split(pat)) == i:
                    title += "%sx%s " % (season, str(episode).zfill(2))
                else:
                    title += "%sx%s_" % (season, str(episode).zfill(2))
        else:
            total_episode += 1
            season, episode = renumbertools.numbered_for_tratk(
                item.channel, item.contentSerieName, 1, total_episode)

            title += "%sx%s " % (season, str(episode).zfill(2))

        url = host + "/" + link
        if "disponible" in link:
            title += "No Disponible aún"
        else:
            title += name
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     url=url,
                     show=show,
                     plot=scrapedplot,
                     thumbnail=scrapedthumbnail))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
                url=item.url,
                action="add_serie_to_library",
                extra="episodios",
                show=show))

    return itemlist
コード例 #3
0
def findvideos(item):
    logger.info()
    itemlist = []
    templist = []
    video_list = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)

    patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
    matches = matches = re.compile(patron, re.DOTALL).findall(data)
    for quality, lang, scrapedurl in matches:
        url = host + scrapedurl
        title = item.title + ' (' + lang + ') (' + quality + ')'
        templist.append(item.clone(title=title, language=lang, url=url))
    for videoitem in templist:
        data = httptools.downloadpage(videoitem.url).data
        urls_list = scrapertools.find_single_match(
            data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
        urls_list = urls_list.split("},")
        for element in urls_list:
            if not element.endswith('}'):
                element = element + '}'
            json_data = jsontools.load(element)
            if 'id' in json_data:
                id = json_data['id']
            sub = ''
            if 'srt' in json_data:
                sub = json_data['srt']

            url = json_data['source'].replace('\\', '')
            server = json_data['server']
            quality = json_data['quality']
            if 'http' not in url:

                new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
                          '=%s&srt=%s' % (url, sub)

                data = httptools.downloadpage(new_url).data
                data = re.sub(r'\\', "", data)
                video_list.extend(servertools.find_video_items(data=data))
                for urls in video_list:
                    if urls.language == '':
                        urls.language = videoitem.language
                    urls.title = item.title + urls.language + '(%s)'

                for video_url in video_list:
                    video_url.channel = item.channel
                    video_url.action = 'play'
                    video_url.quality = quality
                    video_url.server = ""
                    video_url.infoLabels = item.infoLabels
            else:
                title = '%s [%s]' % (server, quality)
                video_list.append(
                    item.clone(title=title,
                               url=url,
                               action='play',
                               quality=quality,
                               server=server,
                               subtitle=sub))
    tmdb.set_infoLabels(video_list)
    if config.get_videolibrary_support(
    ) and len(video_list) > 0 and item.extra != 'findvideos':
        video_list.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return video_list
コード例 #4
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    patron = '(?is)<div id="tab(\d+)".*?<iframe.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, url in matches:
        extra_info = scrapertools.find_single_match(data, '<a href="#tab%s">(.*?)<' % option)
        if '-' in extra_info:
            quality, language = scrapertools.find_single_match(extra_info, '(.*?) - (.*)')
            if " / " in language:
                language = language.split(" / ")[1]
        else:
            language = ''
            quality = extra_info

        if 'https:' not in url:
            url = 'https:'+url
        title = ''
        if not config.get_setting('unify'):
            if language != '':
                try:
                    title += ' [%s]' % IDIOMAS.get(language.capitalize(), 'Latino')
                except:
                    pass
            if quality != '':
                title += ' [%s]' % quality
        #url = "%s|%s" % (url, host)
        new_item = Item(channel=item.channel,
                        url=url,
                        title= '%s'+ title,
                        contentTitle=item.title,
                        action='play',
                        infoLabels = item.infoLabels
                        )
        if language != '':
            new_item.language = IDIOMAS.get(language.capitalize(), 'Latino')
        if quality != '':
            new_item.quality = quality

        itemlist.append(new_item)
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos' and not "/episode/" in item.url:
        itemlist.append(
            Item(channel=item.channel,
                 title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle,
                 ))


    return itemlist
コード例 #5
0
def episodios(item):  # Questa def. deve sempre essere nominata episodios
    logger.info('[filmsenzalimiticc.py] episodios')
    itemlist = []

    # Trova le Stagioni

    # Carica la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Estrae i contenuti
    patron = r'<iframe src="([^"]+)".*?>'
    url = scrapertools.find_single_match(data, patron)

    # Carica la pagina
    data = httptools.downloadpage(url).data.replace('\t', '').replace('\n', '')

    # Estrae i contenuti
    section_stagione = scrapertools.find_single_match(
        data, r'Stagioni<\/a>(.*?)<\/ul>')
    patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
    seasons = re.compile(patron, re.DOTALL).findall(section_stagione)

    for scrapedseason_url, scrapedseason in seasons:

        # Trova gli Episodi

        season_url = urlparse.urljoin(url, scrapedseason_url)

        # Carica la pagina
        data = httptools.downloadpage(season_url).data.replace('\t',
                                                               '').replace(
                                                                   '\n', '')

        # Estrae i contenuti
        section_episodio = scrapertools.find_single_match(
            data, r'Episodio<\/a>(.*?)<\/ul>')
        patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
        episodes = re.compile(patron, re.DOTALL).findall(section_episodio)

        for scrapedepisode_url, scrapedepisode in episodes:
            episode_url = urlparse.urljoin(url, scrapedepisode_url)

            title = scrapedseason + 'x' + scrapedepisode.zfill(2)

            itemlist.append(
                Item(channel=item.channel,
                     action='findvideos',
                     contentType='episode',
                     title=title,
                     url=episode_url,
                     fulltitle=title + ' - ' + item.show,
                     show=item.show,
                     thumbnail=item.thumbnail))

    # Link Aggiungi alla Libreria
    if config.get_videolibrary_support() and len(itemlist) != 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR lightblue][B]Aggiungi Serie alla videoteca[/B][/COLOR]',
                url=item.url,
                action='add_serie_to_library',
                extra='episodios' + '###' + item.extra,
                show=item.show))

    return itemlist
コード例 #6
0
ファイル: zoowoman.py プロジェクト: Jaloga/xiaomi
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data
    if not item.contentPlot:
        item.contentPlot = scrapertools.find_single_match(
            data, 'wp-content"><p>(.*?)\/')

    patron = 'download" href="([^"]+)".*?</td><td><img.*?> (.*?)</td>'  #url, server
    patron += '<td>(.*?)</td><td>(.*?)</td><td>(.*?)</td>'  #cal, lang, tipo
    #patron+= '<td>(.*?)</td><td><a href.*?>(.*?)' #fecha, uploader

    matches = scrapertools.find_multiple_matches(data, patron)
    for surl, sserver, scal, slang, stype in matches:
        sname = sserver.split(".")[0]
        lang = IDIOMAS.get(slang, slang)
        scal = scal.replace("HD", "720")
        stitle = " [COLOR=green][%sp][/COLOR] [COLOR=yellow](%s)[/COLOR]" % (
            scal, lang)

        if 'torrent' in stype.lower():
            sname = 'torrent'
            server = 'torrent'

        elif sname == "mega":
            server = sname
        #si hay mas excepciones, usar dict
        elif sname == "ok":
            server = 'okru'
            sname == "okru"
        else:
            server = "directo"

        titulo = "Ver en: %s" % sname.capitalize()

        if host in surl:
            new_data = httptools.downloadpage(surl).data
            surl = scrapertools.find_single_match(new_data,
                                                  '<a href="([^"]+)"')
        if "/formats=" in surl:
            surl = surl.split("/formats=")[0].replace("/compress/",
                                                      "/details/")
            server = "archiveorg"
        itemlist.append(
            item.clone(channel=item.channel,
                       action="play",
                       title=titulo + stitle,
                       url=surl,
                       server=server,
                       language=lang,
                       quality=scal + 'p',
                       infoLabels=item.infoLabels))
    #se produce cuando aún no han subido enlaces y solo hay embed(provisional)
    if not matches:
        lang = ""
        stitle = ""
        #en los embed no siempre sale el idioma, y si sale puede ser el mismo para varios videos
        mlang = scrapertools.find_multiple_matches(data,
                                                   '<strong>(.*?)</strong>')
        patron = '<iframe.*?src="([^"]+)"'  #server
        matches = scrapertools.find_multiple_matches(data, patron)
        for i, surl in enumerate(matches):
            if mlang:
                try:
                    slang = mlang[i]
                except:
                    slang = mlang[0]
                if "original" in slang.lower():
                    if "castellano" in slang.lower():
                        lang = "VOSE"
                    elif "ingl" in slang.lower():
                        lang = "VOS"
                    else:
                        lang = "VOSE"
                elif "castellano" in slang.lower():
                    lang = "Cast"
                else:
                    lang = "Lat"
            try:
                int(mlang[0])
                lang = 'VOSE'
            except:
                pass
            if lang:
                stitle = " [COLOR=yellow](%s)[/COLOR]" % lang
            itemlist.append(
                item.clone(channel=item.channel,
                           action="play",
                           title="%s" + stitle,
                           url=surl,
                           language=lang,
                           infoLabels=item.infoLabels))

        itemlist = servertools.get_servers_itemlist(
            itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if itemlist and item.contentChannel != "videolibrary":
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        # Opción "Añadir esta película a la biblioteca de KODI"
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=item.url,
                     thumbnail=item.thumbnail,
                     contentTitle=item.contentTitle))
    return itemlist
コード例 #7
0
ファイル: cuevana2espanol.py プロジェクト: shlibidon/addon
def findvideos(item):
    logger.info()
    itemlist = []

    data = load_data(item.url)
    getContent(item, data)
    """
    if item.extra:
        getContentShow(data, item)
    else:
        getContentMovie(data, item)
    """
    pattern = '<div id="option-(\d)".*?<iframe class="metaframe rptss" src="([^"]+)"'

    #itemlist.append(Item(channel = item.channel, title=item.url))
    for option, link in scrapertools.find_multiple_matches(data, pattern):
        #php.*?=(\w+)&
        #url=(.*?)&
        server = ""
        sname = scrapertools.find_single_match(
            data,
            'href="#option-%s"><b class="icon-play_arrow"></b> Servidor (\w+)'
            % option)
        sname = sname.replace("Siempre", "SO")
        title = "[COLOR blue]Servidor " + sname + " [%s][/COLOR]"
        if 'player' in link:
            #~logger.info("CUEVANA LINK %s" % link)
            #fembed y rapidvideo
            if r'irgoto.php' in link:
                link = scrapertools.find_single_match(
                    link, 'php\?url=(.*)').replace('%3A',
                                                   ':').replace('%2F', '/')
                link = RedirectLink(link)
                if not link:
                    continue
                server = servertools.get_server_from_url(link)

            #vanlong
            elif r'irgotogd' in link:
                link = redirect_url('https:' + link, scr=True)
                server = "directo"

            #openloadpremium m3u8
            elif r'irgotoolp' in link:
                link = redirect_url('https:' + link)
                server = "oprem"

            #openloadpremium no les va en la web, se hace un fix aqui
            elif r'irgotogp' in link:
                link = scrapertools.find_single_match(
                    data, r'irgotogd.php\?url=(\w+)')
                #link = redirect_url('https:'+link, "", True)
                link = GKPluginLink(link)
                server = "directo"
            elif r'gdv.php' in link:
                # google drive hace lento la busqueda de links, ademas no es tan buena opcion y es el primero que eliminan
                continue
            #amazon y vidcache, casi nunca van
            else:
                link = scrapertools.find_single_match(link, 'php.*?file=(\w+)')
                link = GKPluginLink(link)
                server = "directo"

        elif r'openload' in link:
            link = scrapertools.find_single_match(link, '\?h=(\w+)')
            link = OpenloadLink(link)
            server = "openload"
        elif 'youtube' in link:
            title = "[COLOR yellow]Ver Trailer (%s)[/COLOR]"
            server = "youtube"
        else:  # En caso de que exista otra cosa no implementada, reportar si no aparece pelicula
            continue

        if not link:
            continue
        # GKplugin puede devolver multiples links con diferentes calidades, si se pudiera colocar una lista de opciones
        # personalizadas para Directo, se agradece, por ahora solo devuelve el primero que encuentre
        if type(link) is list:
            link = link[0]['link']
        #if r'chomikuj.pl' in link:
        # En algunas personas la opcion CH les da error 401
        #link += "|Referer=https://player4.cuevana2.com/plugins/gkpluginsphp.php"
        itemlist.append(
            item.clone(title=title % server.capitalize(),
                       server=server,
                       url=link,
                       action='play'))

    #itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist):
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la videoteca",
                 text_color="yellow",
                 action="add_pelicula_to_library",
                 url=item.url,
                 thumbnail=item.thumbnail,
                 contentTitle=item.contentTitle))
    return itemlist
コード例 #8
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    #logger.debug(data)
    patron = '<iframe.*?rptss src=(.*?) (?:width.*?|frameborder.*?) allowfullscreen><\/iframe>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for video_url in matches:
        logger.debug('video_url: %s' % video_url)
        if 'stream' in video_url and 'streamango' not in video_url:
            data = httptools.downloadpage('https:' + video_url).data
            logger.debug(data)
            if not 'iframe' in video_url:
                new_url = scrapertools.find_single_match(
                    data, 'iframe src="(.*?)"')
                new_data = httptools.downloadpage(new_url).data
            logger.debug('new_data %s' % new_data)
            url = ''
            try:
                url, quality = scrapertools.find_single_match(
                    new_data, 'file:.*?(?:\"|\')(https.*?)(?:\"|\'),'
                    'label:.*?(?:\"|\')(.*?)(?:\"|\'),')
            except:
                pass
            if url != '':
                headers_string = '|Referer=%s' % url
                url = url.replace('download', 'preview') + headers_string

                sub = scrapertools.find_single_match(new_data,
                                                     'file:.*?"(.*?srt)"')
                new_item = (Item(title=item.title,
                                 url=url,
                                 quality=quality,
                                 subtitle=sub,
                                 server='directo'))
                itemlist.append(new_item)
        else:
            itemlist.extend(servertools.find_video_items(data=video_url))

    for videoitem in itemlist:
        videoitem.channel = item.channel
        videoitem.action = 'play'
        videoitem.thumbnail = item.thumbnail
        videoitem.infoLabels = item.infoLabels
        videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]'

    itemlist = servertools.get_servers_itemlist(itemlist)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
コード例 #9
0
ファイル: pelis24.py プロジェクト: w1s0/addon
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
    patron = '<td class="MvTbImg B"><a href="([^"]+)".*?'  # url
    patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>'  # title de episodios

    matches = scrapertools.find_multiple_matches(data, patron)

    for scrapedurl, scrapedtitle, scrapedname in matches:
        scrapedtitle = scrapedtitle.replace('--', '0')
        patron = '(\d+)x(\d+)'
        match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
        season, episode = match[0]

        if 'season' in item.infoLabels and int(
                item.infoLabels['season']) != int(season):
            continue

        title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
        new_item = item.clone(title=title,
                              url=scrapedurl,
                              action="findvideos",
                              text_color=color3,
                              fulltitle=title,
                              contentType="episode")
        if 'infoLabels' not in new_item:
            new_item.infoLabels = {}

        new_item.infoLabels['season'] = season
        new_item.infoLabels['episode'] = episode.zfill(2)

        itemlist.append(new_item)

    # TODO no hacer esto si estamos añadiendo a la videoteca
    if not item.extra:
        # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
        for i in itemlist:
            if i.infoLabels['title']:
                # Si el capitulo tiene nombre propio añadirselo al titulo del item
                i.title = "%sx%s %s" % (i.infoLabels['season'],
                                        i.infoLabels['episode'],
                                        i.infoLabels['title'])
            if i.infoLabels.has_key('poster_path'):
                # Si el capitulo tiene imagen propia remplazar al poster
                i.thumbnail = i.infoLabels['poster_path']

    itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
                  reverse=config.get_setting('orden_episodios', __channel__))
    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    # Opción "Añadir esta serie a la videoteca"
    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=__channel__,
                 title="Añadir esta serie a la videoteca",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show,
                 category="Series",
                 text_color=color1,
                 thumbnail=thumbnail_host,
                 fanart=fanart_host))

    return itemlist
コード例 #10
0
def findvideos(item):
    from lib import jsunpack
    logger.info()
    itemlist = []
    full_data = get_source(item.url)

    patron = '<div id="([^"]+)" class="play-box-iframe.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(full_data)
    for option, video_url in matches:
        language = scrapertools.find_single_match(
            full_data, '"#%s">.*?-->(.*?)(?:\s|<)' % option)
        if 'sub' in language.lower():
            language = 'SUB'
        language = IDIOMAS[language]
        quality = ''
        # if 'waaw.tv' in video_url:
        #     continue
        # data = httptools.downloadpage(video_url, follow_redirects=False, headers={'Referer': item.url}).data
        #
        # if 'hideload' in video_url:
        #     quality = ''
        #     new_id = scrapertools.find_single_match(data, "var OLID = '([^']+)'")
        #     new_url = 'https://www.ultrapeliculashd.com/hideload/?ir=%s' % new_id[::-1]
        #     data = httptools.downloadpage(new_url, follow_redirects=False, headers={'Referer': video_url}).headers
        #     url = data['location']+"|%s" % video_url
        # elif 'd.php' in video_url:
        #     data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
        #     quality = '1080p'
        #     packed = scrapertools.find_single_match(data, '<script>(eval\(.*?)eval')
        #     unpacked = jsunpack.unpack(packed)
        #     url = scrapertools.find_single_match(unpacked, '"file":("[^"]+)"')
        # elif 'drive' in video_url:
        #     quality = '1080p'
        #     data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
        #     url = scrapertools.find_single_match(data, 'src="([^"]+)"')
        #
        if not config.get_setting("unify"):
            title = ' [%s] [%s]' % (quality, language)
        else:
            title = ''

        new_item = (Item(channel=item.channel,
                         title='%s' + title,
                         url=video_url,
                         action='play',
                         quality=quality,
                         language=language,
                         infoLabels=item.infoLabels))
        itemlist.append(new_item)

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
コード例 #11
0
ファイル: allcalidad.py プロジェクト: Hasimov/addon
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data
    if not item.infoLabels["year"]:
        item.infoLabels["year"] = scrapertools.find_single_match(
            data, 'dateCreated.*?(\d{4})')
        if "orig_title" in data:
            contentTitle = scrapertools.find_single_match(
                data, 'orig_title.*?>([^<]+)<').strip()
            if contentTitle != "":
                item.contentTitle = contentTitle
    patron = '(?s)fmi(.*?)thead'
    bloque = scrapertools.find_single_match(data, patron)
    match = scrapertools.find_multiple_matches(
        bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
    for url in match:
        titulo = "Ver en: %s"
        if "goo.gl" in url:
            url = httptools.downloadpage(url,
                                         follow_redirects=False,
                                         only_headers=True).headers.get(
                                             "location", "")
        if "youtube" in url:
            titulo = "[COLOR = yellow]Ver trailer: %s[/COLOR]"
        if "ad.js" in url or "script" in url or "jstags.js" in url:
            continue
        elif "vimeo" in url:
            url += "|" + "http://www.allcalidad.com"
        itemlist.append(
            item.clone(channel=item.channel,
                       action="play",
                       title=titulo,
                       url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    tmdb.set_infoLabels(itemlist, __modo_grafico__)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if itemlist:
        itemlist.append(Item(channel=item.channel))
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
        # Opción "Añadir esta película a la biblioteca de KODI"
        if item.extra != "library":
            if config.get_videolibrary_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir a la videoteca",
                         text_color="green",
                         action="add_pelicula_to_library",
                         url=item.url,
                         thumbnail=item.thumbnail,
                         contentTitle=item.contentTitle))
    return itemlist
コード例 #12
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

    patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for link in matches:
        if 'maxipelis24.tv/hideload/?' in link:
            if 'id=' in link:
                id_type = 'id'
                ir_type = 'ir'
            elif 'ud=' in link:
                id_type = 'ud'
                ir_type = 'ur'
            elif 'od=' in link:
                id_type = 'od'
                ir_type = 'or'
            elif 'ad=' in link:
                id_type = 'ad'
                ir_type = 'ar'
            elif 'ed=' in link:
                id_type = 'ed'
                ir_type = 'er'
        else:
            continue

        id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
        base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)

        ir = id[::-1]
        referer = base_link + '%s=%s&/' % (id_type, ir)
        video_data = httptools.downloadpage('%s%s=%s' %
                                            (base_link, ir_type, ir),
                                            headers={'Referer': referer},
                                            follow_redirects=False)
        url = video_data.headers['location']
        title = '%s'

        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=url,
                 action='play',
                 language='',
                 infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())
    if itemlist:
        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, action=""))
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=item.url,
                     thumbnail=item.thumbnail,
                     contentTitle=item.contentTitle))

    return itemlist
コード例 #13
0
def findvideos(item):
    itemlist = []
    encontrado = []
    data = httptools.downloadpage(item.url).data
    patron = '#embed." data-src="([^"]+).*?'
    patron += 'class="([^"]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, language in matches:
        url = get_url(host + url)
        url = url.replace("feurl.com/v", "feurl.com/f")
        encontrado.append(url)
        itemlist.append(
            Item(channel=item.channel,
                 contentTitle=item.contentTitle,
                 contentThumbnail=item.thumbnail,
                 infoLabels=item.infoLabels,
                 language=language,
                 title="Ver en %s " + "(" + language + ")",
                 action="play",
                 url=url))
    patron = '(?is)<a href=".*?go.([^"]+)" class="btn btn-xs btn-info.*?<span>([^<]+)</span>.*?<td>([^<]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, srv, language in matches:
        if url in encontrado or ".srt" in url:
            continue
        encontrado.append(url)
        new_item = Item(channel=item.channel,
                        url=url,
                        title="Ver en %s " + "(" + language + ")",
                        action="play",
                        contentTitle=item.contentTitle,
                        contentThumbnail=item.thumbnail,
                        infoLabels=item.infoLabels,
                        language="Latino")
        if "torrent" in srv.lower():
            new_item.server = "Torrent"
        itemlist.append(new_item)

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if itemlist and item.contentChannel != "videolibrary":
        itemlist.append(Item(channel=item.channel))
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))

        # Opción "Añadir esta película a la biblioteca de KODI"
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=item.url,
                     thumbnail=item.thumbnail,
                     contentTitle=item.contentTitle))
    return itemlist
コード例 #14
0
ファイル: peliculonhd.py プロジェクト: userColaborator/addon
def findvideos(item):
    logger.info()
    
    itemlist = []
    infoLabels = item.infoLabels
    servers = {'premium': 'oprem'}
    
    soup = create_soup(item.url)
    matches = soup.find_all('li', id=re.compile(r'player-option-\d+'))
    stitle = ''

    for elem in matches:

        lang = elem.find('span', class_='title').text.lower()
        language = IDIOMAS.get(lang, lang)

        if not unify:
            stitle = '[COLOR darkgrey][%s][/COLOR] ' % language
        
        _type = elem['data-type']
        nume = elem['data-nume']
        _id = elem['data-post']
        post = {'action': 'doo_player_ajax', 'post': _id, 'nume': nume, 'type':_type}

        post_url = '%swp-admin/admin-ajax.php' % host
        new_soup = create_soup(post_url, post=post, headers={'Referer': item.url})
        
        scrapedurl = new_soup.iframe['src']
        #modo viejo
        if 'play.php' in scrapedurl:
            try:
                data = httptools.downloadpage(scrapedurl, headers={'Referer': item.url}).data
                enc_data = scrapertools.find_single_match(data, '(eval.*?)</script')
                
                dec_data = jsunpack.unpack(enc_data)
                url = scrapertools.find_single_match(dec_data, 'src="([^"]+)"')
                
                server = servertools.get_server_from_url(url)
                title = stitle + server.capitalize()
                
                itemlist.append(Item(channel=item.channel, url=url, title=title, action='play',
                                    language=language, infoLabels=infoLabels, server=server))

            except:
                continue
        #modo menos viejo
        elif 'xyz' in scrapedurl:
            new_data = httptools.downloadpage(scrapedurl, headers={'Referer': item.url}).data
            patron = r"addiframe\('([^']+)'"
            matches = scrapertools.find_multiple_matches(new_data, patron)

            for new_url in matches:
                if 'play.php' in new_url:
                    
                    data = httptools.downloadpage(new_url).data
                    enc_data = scrapertools.find_single_match(data, '(eval.*?)</script')

                    try:
                        dec_data = jsunpack.unpack(enc_data)                    
                    except:
                        continue
                    
                    url = scrapertools.find_single_match(dec_data, r'src\s*=\s*"([^"]+)"')
                    if 'vev.' in url:
                        continue
                
                elif 'embedvip' in new_url:
                    continue
                
                if url != '':
                    server = servertools.get_server_from_url(url)
                    title = stitle + server.capitalize()
                    
                    itemlist.append(Item(channel=item.channel, url=url, title=title, action='play',
                                    language=language, infoLabels=infoLabels, server=server))

        #modo nuevo
        else:
            try:
                soup = create_soup(scrapedurl, headers={'Referer': item.url})
            except:
                continue
            matches = soup.find_all('li', class_='option servers')
            from urlparse import urlparse
            i = urlparse(scrapedurl)
            url = 'https://%s/edge-data/' % (i[1])

            for elem in matches:
                srv = elem['title'].lower()
                if 'vip' in srv: continue
                st = elem['data-embed']
                vt = elem['data-issuer']
                tk = elem['data-signature']

                post = {'streaming':st, 'validtime':vt, 'token':tk}
                server = servers.get(srv, srv)
                title = stitle + server.capitalize()

                itemlist.append(Item(channel=item.channel, url=url, title=title, action='play',
                                    language=language, infoLabels=infoLabels, server=server,
                                     _post=post, _ref={'Referer': scrapedurl}))

    itemlist = filtertools.get_links(itemlist, item, list_language)
    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(channel=item.channel, title='Añadir esta pelicula a la videoteca',
                     url=item.url, action="add_pelicula_to_library", extra="findvideos",
                     contentTitle=item.contentTitle, text_color='yellow'))

    return itemlist
コード例 #15
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"<!--.*?-->", "", data)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    bloque_tab = scrapertools.find_single_match(
        data, '<div id="verpelicula">(.*?)<div class="tab_container">')
    patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>'
    check = re.compile(patron, re.DOTALL).findall(bloque_tab)
    servers_data_list = []
    patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) == 0:
        patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>'
        matches = re.compile(patron, re.DOTALL).findall(data)
    for check_tab, server, id in matches:
        if check_tab in str(check):
            idioma, calidad = scrapertools.find_single_match(
                str(check), "" + check_tab + "', '(.*?)', '(.*?)'")
            servers_data_list.append([server, id, idioma, calidad])
    url = host + "/Js/videod.js"
    data = httptools.downloadpage(url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    data = data.replace(
        '<iframe width="100%" height="400" scrolling="no" frameborder="0"', '')
    patron = 'function (\w+)\(id\).*?'
    patron += 'data-src="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for server, url in matches:
        for enlace, id, idioma, calidad in servers_data_list:
            if server == enlace:
                video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "",
                                   str(url))
                video_url = re.sub(r"'\+codigo\+'", "", video_url)
                video_url = video_url.replace('embed//', 'embed/')
                video_url = video_url + id
                if "goo.gl" in video_url:
                    try:
                        from unshortenit import unshorten
                        url = unshorten(video_url)
                        video_url = scrapertools.get_match(
                            str(url), "u'([^']+)'")
                    except:
                        continue
                title = "Ver en: %s [" + idioma + "][" + calidad + "]"
                itemlist.append(
                    item.clone(title=title,
                               url=video_url,
                               action="play",
                               thumbnail=item.category,
                               language=idioma,
                               quality=calidad))
    tmdb.set_infoLabels(itemlist)
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)
    if item.library and config.get_videolibrary_support(
    ) and len(itemlist) > 0:
        infoLabels = {
            'tmdb_id': item.infoLabels['tmdb_id'],
            'title': item.fulltitle
        }
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta película a la videoteca",
                 action="add_pelicula_to_library",
                 url=item.url,
                 infoLabels=infoLabels,
                 text_color="0xFFff6666",
                 thumbnail='http://imgur.com/0gyYvuC.png'))
    return itemlist
コード例 #16
0
ファイル: pelis24.py プロジェクト: w1s0/addon
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
    data = scrapertools.decodeHtmlentities(data)
    # logger.info(data)

    # patron1 = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span><span>(.*?)</span>' # option, server, lang - quality
    patron = 'href="#option-(.*?)"><span class="dt_flag"><img src="[^"]+"></span>([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # urls = re.compile(patron2, re.DOTALL).findall(data)

    for option, lang in matches:
        url = scrapertools.find_single_match(
            data,
            '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"'
            % option)
        lang = lang.lower().strip()
        languages = {
            'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
            'castellano': '[COLOR green](CAST)[/COLOR]',
            'español': '[COLOR green](CAST)[/COLOR]',
            'subespañol': '[COLOR red](VOS)[/COLOR]',
            'sub': '[COLOR red](VOS)[/COLOR]',
            'ingles': '[COLOR red](VOS)[/COLOR]'
        }
        if lang in languages:
            lang = languages[lang]

        server = servertools.get_server_from_url(url)
        title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (
            server.title(), item.quality, lang)
        # if 'google' not in url and 'directo' not in server:

        itemlist.append(
            item.clone(action='play',
                       url=url,
                       title=title,
                       language=lang,
                       text_color=color3))

    itemlist = servertools.get_servers_itemlist(itemlist)
    itemlist.sort(key=lambda it: it.language, reverse=False)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'episodios':
        itemlist.append(
            Item(channel=__channel__,
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 thumbnail=thumbnail_host,
                 contentTitle=item.contentTitle))

    return itemlist
コード例 #17
0
ファイル: pelisplay.py プロジェクト: shlibidon/addon
def findvideos(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
    patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?'
    patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for data_player, servername, quality, lang in matches:
        post_link = '%sentradas/procesar_player' % host
        token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
        post = {'data': data_player, 'tipo': 'videohost', '_token': token}
        json_data = httptools.downloadpage(post_link, post=post).json
        url = json_data['data']

        if 'pelisplay.co/embed/' in url:
            new_data = httptools.downloadpage(url).data
            url = scrapertools.find_single_match(new_data,
                                                 '"file":"([^"]+)",').replace(
                                                     '\\', '')

        elif 'fondo_requerido' in url:
            link = scrapertools.find_single_match(
                url, '=(.*?)&fondo_requerido').partition('&')[0]
            post_link = '%sprivate/plugins/gkpluginsphp.php' % host
            post = {'link': link}
            new_data2 = httptools.downloadpage(post_link, post=post).data
            url = scrapertools.find_single_match(new_data2,
                                                 '"link":"([^"]+)"').replace(
                                                     '\\', '')

        lang = lang.lower().strip()
        idioma = {
            'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
            'castellano': '[COLOR green](CAST)[/COLOR]',
            'subtitulado': '[COLOR red](VOSE)[/COLOR]'
        }
        if lang in idioma:
            lang = idioma[lang]
        if servername.lower() == "tazmania":
            servername = "fembed"
        title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
            servername.title(), quality, lang)

        itemlist.append(
            item.clone(channel=__channel__,
                       title=title,
                       action='play',
                       language=lang,
                       quality=quality,
                       url=url))

    itemlist = servertools.get_servers_itemlist(itemlist)
    itemlist.sort(key=lambda it: it.language, reverse=False)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'serie':
        itemlist.append(
            Item(channel=__channel__,
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 thumbnail=get_thumb("videolibrary_movie.png"),
                 contentTitle=item.contentTitle))
    return itemlist
コード例 #18
0
def getmainlist(view="thumb_"):
    logger.info()
    itemlist = list()

    # Añade los canales que forman el menú principal
    itemlist.append(
        Item(title=config.get_localized_string(30130),
             channel="news",
             action="mainlist",
             thumbnail=get_thumb("news.png", view),
             category=config.get_localized_string(30119),
             viewmode="thumbnails",
             context=[{
                 "title": config.get_localized_string(70285),
                 "channel": "news",
                 "action": "menu_opciones",
                 "goto": True
             }]))

    itemlist.append(
        Item(title=config.get_localized_string(30118),
             channel="channelselector",
             action="getchanneltypes",
             thumbnail=get_thumb("channels.png", view),
             view=view,
             category=config.get_localized_string(30119),
             viewmode="thumbnails"))

    itemlist.append(
        Item(title=config.get_localized_string(70527),
             channel="kodfavorites",
             action="mainlist",
             thumbnail=get_thumb("mylink.png", view),
             view=view,
             category=config.get_localized_string(70527),
             viewmode="thumbnails"))

    itemlist.append(
        Item(title=config.get_localized_string(30103),
             channel="search",
             path='special',
             action="mainlist",
             thumbnail=get_thumb("search.png", view),
             category=config.get_localized_string(30119),
             viewmode="list",
             context=[{
                 "title": config.get_localized_string(70286),
                 "channel": "search",
                 "action": "opciones",
                 "goto": True
             }]))

    itemlist.append(
        Item(title=config.get_localized_string(30102),
             channel="favorites",
             action="mainlist",
             thumbnail=get_thumb("favorites.png", view),
             category=config.get_localized_string(30102),
             viewmode="thumbnails"))

    if config.get_videolibrary_support():
        itemlist.append(
            Item(title=config.get_localized_string(30131),
                 channel="videolibrary",
                 action="mainlist",
                 thumbnail=get_thumb("videolibrary.png", view),
                 category=config.get_localized_string(30119),
                 viewmode="thumbnails",
                 context=[{
                     "title": config.get_localized_string(70287),
                     "channel": "videolibrary",
                     "action": "channel_config"
                 }]))
    if downloadenabled != "false":
        itemlist.append(
            Item(title=config.get_localized_string(30101),
                 channel="downloads",
                 action="mainlist",
                 thumbnail=get_thumb("downloads.png", view),
                 viewmode="list",
                 context=[{
                     "title": config.get_localized_string(70288),
                     "channel": "setting",
                     "config": "downloads",
                     "action": "channel_config"
                 }]))

    thumb_setting = "setting_%s.png" % 0  # config.get_setting("plugin_updates_available")

    itemlist.append(
        Item(title=config.get_localized_string(30100),
             channel="setting",
             action="mainlist",
             thumbnail=get_thumb(thumb_setting, view),
             category=config.get_localized_string(30100),
             viewmode="list"))

    itemlist.append(
        Item(title=config.get_localized_string(30104) + " (" +
             config.get_localized_string(20000) + " " +
             config.get_addon_version(with_fix=False) + ")",
             channel="help",
             action="mainlist",
             thumbnail=get_thumb("help.png", view),
             category=config.get_localized_string(30104),
             viewmode="list"))
    return itemlist
コード例 #19
0
ファイル: cliver.py プロジェクト: Jaloga/xiaomi
def findvideos(item):
    logger.info()
    itemlist = []

    if not item.contentSerieName:
        post = {"pelicula": item.content_id}
        data = httptools.downloadpage(xhr_film, post=post).json

        for langs in data:
            language = IDIOMAS.get(langs, langs)
            for elem in data[langs]:
                token = elem['token']
                server = elem['reproductor_nombre'].lower()
                title = '%s[COLOR springgreen] (%s)[/COLOR]' % (
                    server.capitalize(), language)

                if 'supervideo' in server:
                    server = 'directo'

                itemlist.append(
                    Item(channel=item.channel,
                         url='',
                         title=title,
                         contentTitle=item.title,
                         action='play',
                         infoLabels=item.infoLabels,
                         server=server,
                         token=token,
                         language=language))

    elif item.direct_play == True:
        item.action = 'play'
        return play(item)
    else:

        matches = scrapertools.find_multiple_matches(item.url,
                                                     'data-url-(.*?)="(.*?)"')

        for scrapedlang, scrapedurl in matches:
            if not scrapedurl:
                continue
            scrapedlang = scrapedlang.replace('-', '_')
            language = IDIOMAS.get(scrapedlang, scrapedlang)
            server = servertools.get_server_from_url(scrapedurl)
            title = '%s[COLOR springgreen] (%s)[/COLOR]' % (
                server.capitalize(), language)
            itemlist.append(
                Item(channel=item.channel,
                     url=scrapedurl,
                     title=title,
                     contentSerieName=item.title,
                     action='play',
                     infoLabels=item.infoLabels,
                     server=server,
                     language=language))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(
            itemlist
    ) > 0 and item.extra != 'findvideos' and not item.contentSerieName:
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
コード例 #20
0
ファイル: hdfulls.py プロジェクト: roliverosc/addon
def findvideos(item):
    logger.info()

    itemlist = list()

    data = get_source(item.url)

    js_data = get_source("%s/static/style/js/jquery.hdfull.view.min.js" % host)

    data_js = get_source("%s/static/js/providers.js" % host)
    decoded = alfaresolver.jhexdecode(data_js).replace("'", '"')

    providers_pattern = 'p\[(\d+)\]= {"t":"([^"]+)","d":".*?","e":.function.*?,"l":.function.*?return "([^"]+)".*?};'
    providers = scrapertools.find_multiple_matches(decoded, providers_pattern)

    provs = {}

    for provider, e, l in providers:
        provs[provider] = [e, l]

    try:
        data_decrypt = jsontools.load(alfaresolver.obfs(data, js_data))
    except:
        return itemlist

    infolabels = item.infoLabels
    year = scrapertools.find_single_match(
        data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year

    matches = []

    for match in data_decrypt:
        if match['provider'] in provs:
            try:
                embed = provs[match['provider']][0]
                url = provs[match['provider']][1] + match['code']
                matches.append([match['lang'], match['quality'], url, embed])
            except:
                pass

    for idioma, calidad, url, embed in matches:

        idioma = IDIOMAS.get(idioma.lower(), idioma)
        calidad = unicode(calidad, "utf8").upper().encode("utf8")
        title = "%s (" + calidad + ")(" + idioma + ")"

        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=url,
                 infoLabels=item.infoLabels,
                 language=idioma,
                 contentType=item.contentType,
                 quality=calidad))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda i: i.language)
    if config.get_videolibrary_support(
    ) and itemlist and not item.contentSerieName and not item.extra:
        itemlist.append(
            Item(channel=item.channel,
                 title='[COLOR yellow]{}[/COLOR]'.format(
                     config.get_localized_string(60353)),
                 action="add_pelicula_to_library",
                 url=item.url,
                 thumbnail=item.thumbnail,
                 contentTitle=item.contentTitle,
                 infoLabels=item.infoLabels,
                 extra="findvideos"))

    return itemlist
コード例 #21
0
def findvideos(item):
    logger.info()
    itemlist = []
    th = Thread(target=get_art(item))
    th.setDaemon(True)
    th.start()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    enlaces = scrapertools.find_multiple_matches(
        data,
        'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"'
    )
    for calidad, size, url in enlaces:
        title = "[COLOR palegreen][B]Torrent[/B][/COLOR]" + " " + "[COLOR chartreuse]" + calidad + "[/COLOR]" + "[COLOR teal] ( [/COLOR]" + "[COLOR forestgreen]" + size + "[/COLOR]" + "[COLOR teal] )[/COLOR]"
        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=url,
                 action="play",
                 server="torrent",
                 fanart=item.fanart,
                 thumbnail=item.thumbnail,
                 extra=item.extra,
                 InfoLabels=item.infoLabels,
                 folder=False))
    dd = scrapertools.find_single_match(
        data,
        'button-green-download-big".*?href="([^"]+)"><span class="icon-play">')
    if dd:
        if item.library:
            itemlist.append(
                Item(channel=item.channel,
                     title="[COLOR floralwhite][B]Online[/B][/COLOR]",
                     url=dd,
                     action="dd_y_o",
                     thumbnail="http://imgur.com/mRmBIV4.png",
                     fanart=item.extra.split("|")[0],
                     contentType=item.contentType,
                     extra=item.extra,
                     folder=True))
        else:
            videolist = servertools.find_video_items(data=str(dd))
            for video in videolist:
                icon_server = os.path.join(config.get_runtime_path(),
                                           "resources", "images", "servers",
                                           "server_" + video.server + ".png")
                if not os.path.exists(icon_server):
                    icon_server = ""
                itemlist.append(
                    Item(channel=item.channel,
                         url=video.url,
                         server=video.server,
                         title="[COLOR floralwhite][B]" + video.server +
                         "[/B][/COLOR]",
                         thumbnail=icon_server,
                         fanart=item.extra.split("|")[1],
                         action="play",
                         folder=False))
    if item.library and config.get_videolibrary_support() and itemlist:
        infoLabels = {
            'tmdb_id': item.infoLabels['tmdb_id'],
            'title': item.infoLabels['title']
        }
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta película a la videoteca",
                 action="add_pelicula_to_library",
                 url=item.url,
                 infoLabels=infoLabels,
                 text_color="0xFFe5ffcc",
                 thumbnail='http://imgur.com/DNCBjUB.png',
                 extra="library"))

    return itemlist
コード例 #22
0
ファイル: estadepelis.py プロジェクト: x7r6xx/repo
def findvideos(item):
    logger.info()

    itemlist = []
    langs = dict()

    data = httptools.downloadpage(item.url).data
    logger.debug('data: %s' % data)
    patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for key, value in matches:
        langs[key] = value.strip()

    patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
    matches = re.compile(patron, re.DOTALL).findall(data)
    title = item.title
    enlace = scrapertools.find_single_match(
        data,
        'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"'
    )

    for scrapedlang, encurl in matches:

        if 'e20fb34' in encurl:
            url = dec(encurl)
            url = url + enlace

        else:
            url = dec(encurl)
        title = ''
        server = ''
        servers = {
            '/opl': 'openload',
            '/your': 'yourupload',
            '/sen': 'senvid',
            '/face': 'netutv',
            '/vk': 'vk'
        }
        server_id = re.sub(r'.*?embed|\.php.*', '', url)
        if server_id and server_id in servers:
            server = servers[server_id]
        logger.debug('server_id: %s' % server_id)

        if langs[scrapedlang] in list_language:
            language = IDIOMAS[langs[scrapedlang]]
        else:
            language = 'Latino'
        if langs[scrapedlang] == 'Latino':
            idioma = '[COLOR limegreen]LATINO[/COLOR]'
        elif langs[scrapedlang] == 'Sub Español':
            idioma = '[COLOR red]SUB[/COLOR]'

        if item.extra == 'peliculas':
            title = item.contentTitle + ' (' + server + ') ' + idioma
            plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
        else:
            title = item.contentSerieName + ' (' + server + ') ' + idioma
            plot = item.plot

        thumbnail = servertools.guess_server_thumbnail(title)

        if 'player' not in url and 'php' in url:
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           plot=plot,
                           thumbnail=thumbnail,
                           server=server,
                           quality='',
                           language=language))
        logger.debug('url: %s' % url)
    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
コード例 #23
0
def findvideos(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    data = scrapertools.unescape(data)
    data = scrapertools.decodeHtmlentities(data)

    # patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
    patron = 'id="(Opt\d+)">.*?src="(?!about:blank)([^"]+)" frameborder.*?</iframe>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, scrapedurl in matches:
        scrapedurl = scrapedurl.replace('"', '').replace('&#038;', '&')
        data_video = get_source(scrapedurl)
        url = scrapertools.find_single_match(
            data_video, '<div class="Video">.*?src="([^"]+)"')
        opt_data = scrapertools.find_single_match(
            data,
            '"%s"><span>.*?</span>.*?<span>([^<]+)</span>' % option).split('-')
        language = opt_data[0].strip()
        quality = opt_data[1].strip()
        if 'sub' in language.lower():
            language = 'VOSE'
        else:
            language = 'VO'
        if url != '' and 'youtube' not in url:
            itemlist.append(
                Item(channel=item.channel,
                     title='%s',
                     url=url,
                     language=IDIOMAS[language],
                     quality=quality,
                     action='play'))
        elif 'youtube' in url:
            trailer = Item(channel=item.channel,
                           title='Trailer',
                           url=url,
                           action='play',
                           server='youtube')

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % '%s [%s] [%s]' %
        (i.server.capitalize(), i.language, i.quality))
    try:
        itemlist.append(trailer)
    except:
        pass

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
コード例 #24
0
ファイル: pelisplanet.py プロジェクト: Jaloga/xiaomi
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
    # logger.info(datas)
    patron = '<a id="[^"]+" style="cursor:pointer; cursor: hand" rel="([^"]+)".*?'
    patron += '<span class="optxt"><span>(.*?)</span>.*?'
    patron += '<span class="q">([^<]+)</span>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, lang, servidores in matches:
        servidores = servidores.lower().strip()
        url_list = []
        lang = lang.replace('Español ', '')
        #No funcionan:
        if 'ultrastream' in servidores or '/meganz' in scrapedurl:
            continue
        if 'streamvips' in servidores:
            data = httptools.downloadpage(scrapedurl,
                                          headers={
                                              'Referer': host
                                          }).data
            data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
            new_data = scrapertools.find_single_match(
                data, 'sources: \[(.+?)</body>')
            if new_data:
                patronr = "'file': '([^']+)','type': '([^']+)','label': '([^']+)'"
                matchesr = re.compile(patronr, re.DOTALL).findall(new_data)
                for surl, _type, label in matchesr:
                    url_list.append([".%s (%s)" % (_type, label), surl])
            elif 'vips/' in scrapedurl:
                scrapedurl = scrapertools.find_single_match(
                    data, '"file": "([^"]+)')

        title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(
            servidores.capitalize(), lang)

        itemlist.append(
            item.clone(action='play',
                       title=title,
                       url=scrapedurl,
                       quality=item.quality,
                       language=lang.replace('Español ', ''),
                       password=url_list,
                       text_color=color3,
                       thumbnail=item.thumbnail))
    itemlist = servertools.get_servers_itemlist(itemlist)
    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                thumbnail=
                'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
                extra="findvideos",
                contentTitle=item.contentTitle))

    return itemlist
コード例 #25
0
ファイル: cinecalidad.py プロジェクト: roliverosc/addon
def findvideos(item):

    logger.info()
    itemlist = []
    dl_itemlist = list()
    duplicados = []

    if 'cinemaqualidade' in item.url:
        lang = 'portugues'
    elif 'espana' in item.url:
        lang = 'castellano'
    elif 'cine-calidad' in item.url:
        lang = 'latino'

    data = httptools.downloadpage(item.url).data.replace("'", '"')
    patron = '(?:onclick="Abrir.*?"|class="link(?: onlinelink)?").*?data(?:-url)?="([^"]+)".*?<li>([^<]+)</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    server_url = {
        'yourupload': 'https://www.yourupload.com/embed/%s',
        'trailer': 'https://www.youtube.com/embed/%s',
        'bittorrent': '',
        'mega': 'https://mega.nz/file/%s',
        'fembed': '%s',
        'gounlimited': 'https://gounlimited.to/embed-%s.html',
        'clipwatching': 'https://clipwatching.com/embed-%s.html',
        'vidcloud': 'https://vidcloud.co/embed/%s',
        'jetload': 'https://jetload.net/e/%s',
        'evoload': 'https://evoload.io/e/%s',
        'doodstream': '%s',
        'cineplay': '%s'
    }

    dec_value = scrapertools.find_single_match(
        data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
    protected_links = scrapertools.find_multiple_matches(
        data,
        '<a href="(%sprotect/v.php[^"]+)" target="_blank"><li>([^<]+)</li>\s+?</a>'
        % host)
    subs = scrapertools.find_single_match(data,
                                          '<a id=subsforlink href=(.*?) ')

    if protected_links:
        headers = {'Referer': item.url}
        language = IDIOMAS[lang]
        quality = '1080p'
        for protected, server_id in protected_links:
            is_dl = False
            protected_link = scrapertools.decodeHtmlentities(protected)
            if "torrent" not in server_id.lower():
                enc_url = scrapertools.find_single_match(
                    protected_link, "i=([^&]+)")
                url = base64.b64decode(enc_url).decode("utf-8")
                if url.startswith("https://mega.nz/file"):
                    continue
                is_dl = True
            else:
                p_data = httptools.downloadpage(protected_link,
                                                headers=headers,
                                                ignore_response_code=True).data
                url = scrapertools.find_single_match(p_data,
                                                     'value="(magnet.*?)"')
                quality = '1080p'
                if "4K" in server_id:
                    quality = '4K'
                language = IDIOMAS[lang]
            if url and url in duplicados:
                continue
            else:
                duplicados.append(url)
            new_item = Item(channel=item.channel,
                            action='play',
                            title="%s",
                            contentTitle=item.contentTitle,
                            url=url,
                            language=language,
                            quality=quality,
                            subtitle=subs,
                            infoLabels=item.infoLabels)
            if is_dl:
                dl_itemlist.append(new_item)
            else:
                new_item.server = "Torrent"
                itemlist.append(new_item)

    for video_cod, server_id in matches:
        thumbnail = item.thumbnail

        server = server_id.lower()
        if server == "trailer":
            continue
        video_id = dec(video_cod, dec_value)
        url = server_url.get(server, '')

        quality = '1080p'
        language = IDIOMAS[lang]
        if url:
            duplicados.append(url)
            url = url % video_id
            new_item = Item(channel=item.channel,
                            action='play',
                            title="%s",
                            contentTitle=item.contentTitle,
                            url=url,
                            language=language,
                            thumbnail=thumbnail,
                            quality=quality,
                            subtitle=subs,
                            infoLabels=item.infoLabels)
            itemlist.append(new_item)

    if dl_itemlist:
        itemlist += dl_itemlist

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    # itemlist.append(trailer_item)
    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
コード例 #26
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    if item.extra != "dd" and item.extra != "descarga":
        if item.contentType != "movie":
            bloque_links = scrapertools.find_single_match(
                data, '<div class="links">(.*?)<\/i>Selecciona un')
            if bloque_links == "":
                bloque_links = scrapertools.find_single_match(
                    data, '<div class="links">(.*?)<div class="enlaces">')
        else:
            bloque_links = scrapertools.find_single_match(
                data, '<div class="links">(.*?)<\/i>Descargar')
            if bloque_links == "":
                bloque_links = scrapertools.find_single_match(
                    data, '<div class="links">(.*?)<div class="enlaces">')
        patron = '<a class="goto" rel="nofollow".*?data-id="([^<]+)".*?'
        patron += 'src="([^"]+)">'
        patron += '([^<]+)<.*?'
        patron += 'src="([^"]+)'
        patron += '">([^<]+).*?'
        patron += '<span>([^<]+)'
        links = scrapertools.find_multiple_matches(bloque_links, patron)
        for id, thumb, server, idiomapng, idioma, calidad in links:
            idioma = idioma.strip()
            calidad = calidad.lower()
            calidad = re.sub(r' ', '-', calidad)
            if calidad == "ts":
                calidad = re.sub(r'ts', 'ts-hq', calidad)
            url = host + "/goto/"
            url_post = urllib.urlencode({'id': id})
            server_name = scrapertools.get_match(server, '(\w+)\.').replace(
                "waaw", "netutv")
            server_parameters = servertools.get_server_parameters(server_name)
            icon_server = server_parameters.get("thumbnail", "")
            extra = "online"
            title = server_name + " (" + calidad + ") (" + idioma + ")"
            itemlist.append(
                item.clone(title=title,
                           url=url,
                           action="play",
                           thumbnail=icon_server,
                           folder=True,
                           id=url_post,
                           language=idioma,
                           quality=calidad,
                           server=server_name))
    else:
        bloque_dd = scrapertools.find_single_match(
            data, '<\/i>Descargar(.*?)<div class="enlaces">')
        links_dd = scrapertools.find_multiple_matches(
            bloque_dd,
            '<a class="low".*?data-id="(.*?)".*?src="([^"]+)">([^<]+)<.*?src[^<]+>([^<]+).*?<span>([^<]+)'
        )
        for id, thumb, server, idioma, calidad in links_dd:
            idioma = idioma.strip()
            calidad = calidad.lower()
            calidad = re.sub(r' ', '-', calidad)
            if calidad == "ts":
                calidad = re.sub(r'ts', 'ts-hq', calidad)
            if CALIDADES.get(calidad):
                calidad = CALIDADES.get(calidad)
            else:
                calidad = "[COLOR brown]" + calidad + "[/COLOR]"
            if IDIOMAS.get(idioma):
                idioma = IDIOMAS.get(idioma)
            else:
                idioma = "[COLOR brown]" + idioma + "[/COLOR]"
            url = host + "/goto/"
            data_post = urllib.urlencode({'id': id})
            server_name = scrapertools.get_match(server, '(.*?)\.').strip()
            icon_server = os.path.join(config.get_runtime_path(), "resources",
                                       "images", "servers",
                                       "server_" + server_name + ".png")
            icon_server = icon_server.replace('streamin', 'streaminto')
            icon_server = icon_server.replace('ul', 'uploadedto')
            if not os.path.exists(icon_server):
                icon_server = thumb
            extra = "descarga"
            itemlist.append(
                item.clone(title="[COLOR floralwhite][B]" + server +
                           "[/B][/COLOR] " + calidad + " " + idioma,
                           url=url,
                           action="play",
                           thumbnail=icon_server,
                           id=data_post))
    if item.infoLabels["year"]:
        tmdb.set_infoLabels(itemlist)
    if item.contentType == "movie" and item.extra != "descarga" and item.extra != "online":
        if config.get_videolibrary_support() and len(itemlist) > 0:
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir película a la videoteca",
                     action="add_pelicula_to_library",
                     url=item.url,
                     text_color="green",
                     infoLabels={'title': item.fulltitle},
                     thumbnail="http://imgur.com/xjrGmVM.png",
                     fulltitle=item.fulltitle,
                     extra=extra))
    if item.extra != "dd" and item.extra != "descarga" and item.extra != "online":
        bloque_dd = scrapertools.find_single_match(
            data, '<\/i>Descargar(.*?)<div class="enlaces">')
        if bloque_dd:
            itemlist.append(
                item.clone(
                    title="[COLOR aqua][B]Ver enlaces Descarga[/B][/COLOR] ",
                    action="findvideos",
                    thumbnail=thumb,
                    fanart="",
                    contentType=item.contentType,
                    bloque_dd=bloque_dd,
                    extra="dd"))
    return itemlist
コード例 #27
0
ファイル: cartoonlatino.py プロジェクト: x7r6xx/repo
def episodios(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    data_lista = scrapertools.find_single_match(
        data,
        '<div class="su-list su-list-style-"><ulclass="lista-capitulos">.+?<\/div><\/p>'
    )
    if '&#215;' in data_lista:
        data_lista = data_lista.replace('&#215;', 'x')

    show = item.title
    if "[Latino]" in show:
        show = show.replace("[Latino]", "")
    if "Ranma" in show:
        patron_caps = '<\/i> <strong>.+?Capitulo ([^"]+)\: <a .+? href="([^"]+)">([^"]+)<\/a>'
    else:
        patron_caps = '<\/i> <strong>Capitulo ([^"]+)x.+?\: <a .+? href="([^"]+)">([^"]+)<\/a>'
    matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
    scrapedplot = scrapertools.find_single_match(
        data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>')
    number = 0
    ncap = 0
    A = 1
    tempo = 1
    for temp, link, name in matches:
        if A != temp and "Ranma" not in show:
            number = 0
        number = number + 1
        if "Ranma" in show:
            number, tempo = renumerar_ranma(number, tempo, 18 + 1, 1)
            number, tempo = renumerar_ranma(number, tempo, 22 + 1, 2)
            number, tempo = renumerar_ranma(number, tempo, 24 + 1, 3)
            number, tempo = renumerar_ranma(number, tempo, 24 + 1, 4)
            number, tempo = renumerar_ranma(number, tempo, 24 + 1, 5)
            number, tempo = renumerar_ranma(number, tempo, 24 + 1, 6)
        capi = str(number).zfill(2)
        if "Ranma" in show:
            title = "{0}x{1} - ({2})".format(str(tempo), capi, name)
        else:
            title = "{0}x{1} - ({2})".format(str(temp), capi, name)
        url = link
        A = temp
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 show=show))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="[COLOR yellow]Añadir " + show +
                 " a la videoteca[/COLOR]",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=show))

    return itemlist
コード例 #28
0
def findvideos(item):
    from lib import jsunpack
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    patron = '<div id=(option.*?) class=play.*?<iframe.*?'
    patron += 'rptss src=(.*?) (?:width.*?|frameborder.*?) allowfullscreen><\/iframe>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, video_url in matches:
        language = scrapertools.find_single_match(
            data, '#%s>.*?-->(.*?)(?:\s|<)' % option)
        if 'sub' in language.lower():
            language = 'SUB'
        language = IDIOMAS[language]
        if 'ultrapeliculashd' in video_url:
            new_data = httptools.downloadpage(video_url).data
            new_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", new_data)
            if 'drive' not in video_url:
                quality = '1080p'
                packed = scrapertools.find_single_match(
                    new_data, '<script>(eval\(.*?)eval')
                unpacked = jsunpack.unpack(packed)
                url = scrapertools.find_single_match(unpacked,
                                                     'file:(http.?:.*?)\}')
            else:
                quality = '1080p'
                url = scrapertools.find_single_match(
                    new_data,
                    '</div><iframe src=([^\s]+) webkitallowfullscreen')

        elif 'stream' in video_url and 'streamango' not in video_url:
            data = httptools.downloadpage('https:' + video_url).data
            if not 'iframe' in video_url:
                new_url = scrapertools.find_single_match(
                    data, 'iframe src="(.*?)"')
                new_data = httptools.downloadpage(new_url).data
            url = ''
            try:
                url, quality = scrapertools.find_single_match(
                    new_data, 'file:.*?(?:\"|\')(https.*?)(?:\"|\'),'
                    'label:.*?(?:\"|\')(.*?)(?:\"|\'),')
            except:
                pass
            if url != '':
                headers_string = '|Referer=%s' % url
                url = url.replace('download', 'preview') + headers_string

                sub = scrapertools.find_single_match(new_data,
                                                     'file:.*?"(.*?srt)"')
                new_item = (Item(title=item.title,
                                 url=url,
                                 quality=quality,
                                 subtitle=sub,
                                 server='directo',
                                 language=language))
                itemlist.append(new_item)

        else:
            url = video_url
            quality = 'default'

        if not config.get_setting("unify"):
            title = ' [%s] [%s]' % (quality, language)
        else:
            title = ''

        new_item = (Item(channel=item.channel,
                         title='%s' + title,
                         url=url,
                         action='play',
                         quality=quality,
                         language=language,
                         infoLabels=item.infoLabels))
        itemlist.append(new_item)

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
コード例 #29
0
def findvideos(item):
    logger.info()

    itemlist = []
    is_tvshow = False
    json_data = httptools.downloadpage(item.url).json

    if len(json_data) > 0:
        videos_info = json_data['title']['videos']

        if str(item.ep_info) != '':
            is_tvshow = True
            epi = item.ep_info
            season = item.contentSeason

        for elem in videos_info:
            lang = scrapertools.find_single_match(elem['name'], '/(.*?).png')

            if len(lang) > 2 and not 'sub' in lang:
                lang = lang[-2:]
            elif 'sub' in lang:
                lang = 'sub'
            # else:
            #    lang = 'en'

            url = elem['url']

            lang = IDIOMAS.get(lang, 'VO')

            if not config.get_setting('unify'):
                title = ' [%s]' % lang
            else:
                title = ''

            if not is_tvshow or (elem['season'] == season
                                 and elem['episode'] == epi):

                itemlist.append(
                    Item(channel=item.channel,
                         title='%s' + title,
                         action='play',
                         url=url,
                         language=lang,
                         infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
コード例 #30
0
def episodios(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    # obtener el numero total de episodios
    total_episode = 0

    patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
    matches = scrapertools.find_multiple_matches(data, patron_caps)
    # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
    patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
    scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(
        data, patron_info)
    scrapedthumbnail = host + scrapedthumbnail

    for cap, link, name in matches:

        title = ""
        pat = "as/sd"
        # varios episodios en un enlace
        if len(name.split(pat)) > 1:
            i = 0
            for pos in name.split(pat):
                i = i + 1
                total_episode += 1
                season, episode = renumbertools.numbered_for_tratk(
                    item.channel, item.show, 1, total_episode)
                if len(name.split(pat)) == i:
                    title += "{0}x{1:02d} ".format(season, episode)
                else:
                    title += "{0}x{1:02d}_".format(season, episode)
        else:
            total_episode += 1
            season, episode = renumbertools.numbered_for_tratk(
                item.channel, item.show, 1, total_episode)

            title += "{0}x{1:02d} ".format(season, episode)

        url = host + "/" + link
        if "disponible" in link:
            title += "No Disponible aún"
        else:
            title += name
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     url=url,
                     show=show,
                     plot=scrapedplot,
                     thumbnail=scrapedthumbnail))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=show))

    return itemlist