Esempio n. 1
0
def get_premium(item, url, lang):
    logger.info()

    itemlist = list()

    from lib.generictools import dejuice
    try:
        data = httptools.downloadpage(url, timeout=5).data
        dejuiced = dejuice(data)
    except:
        return itemlist

    patron = r'"file":"([^"]+)","label":"(\d+P)"'
    matches = re.compile(patron, re.DOTALL).findall(dejuiced)
    for url, qlty in matches:
        itemlist.append(
            Item(channel=item.channel,
                 title="%s",
                 action="play",
                 url=url,
                 language=IDIOMAS.get(lang, "VOSE"),
                 quality=qlty,
                 infoLabels=item.infoLabels))

    return itemlist
Esempio n. 2
0
def findvideos(item):
    logger.info()
    import urllib
    itemlist = []

    data = get_source(item.url)
    patron = 'video\[\d+\] = "([^"]+)";'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for video_url in matches:

        language = 'latino'
        if not config.get_setting('unify'):
            title = ' [%s]' % language.capitalize()
        else:
            title = ''

        if 'pelisplus.net' in video_url:
            referer = video_url
            post = {'r': item.url}
            post = urllib.urlencode(post)
            video_url = video_url.replace('/v/', '/api/sources/')
            url_data = httptools.downloadpage(video_url,
                                              post=post,
                                              headers={
                                                  'Referer': referer
                                              }).data
            patron = '"file":"([^"]+)","label":"([^"]+)"'
            matches = re.compile(patron, re.DOTALL).findall(url_data)
            for url, quality in matches:
                url = url.replace('\/', '/')
                itemlist.append(
                    Item(channel=item.channel,
                         title='%s' + title,
                         url=url,
                         action='play',
                         language=IDIOMAS[language],
                         quality=quality,
                         infoLabels=item.infoLabels))

        else:
            url_data = get_source(video_url)
        url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
        if 'server' in url:
            hidden_data = get_source(hidden_url)
            url = scrapertools.find_single_match(hidden_data,
                                                 '<iframe src="([^"]+)"')

        else:
            url = url
            if 'pelishd.net' in url:
                vip_data = httptools.downloadpage(url,
                                                  headers={
                                                      'Referer': item.url
                                                  },
                                                  follow_redirects=False).data
                dejuiced = generictools.dejuice(vip_data)
                url = scrapertools.find_single_match(dejuiced,
                                                     '"file":"([^"]+)"')

        if url != '':
            itemlist.append(
                Item(channel=item.channel,
                     title='%s' + title,
                     url=url,
                     action='play',
                     language=IDIOMAS[language],
                     infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType == 'movie':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))
    return itemlist
Esempio n. 3
0
def findvideos(item):
    logger.info()
    from lib import generictools
    import urllib
    itemlist = []
    data = get_source(item.url)
    patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
    matches = re.compile(patron, re.DOTALL).findall(data)
    for id, option, lang in matches:
        lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
        quality = ''

        post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
        post = urllib.urlencode(post)
        test_url = 'https://pelisr.com/wp-admin/admin-ajax.php'
        new_data = httptools.downloadpage(test_url, post=post).data
        scrapedurl = scrapertools.find_single_match(new_data, "src='([^']+)'")

        if lang not in IDIOMAS:
            lang = 'en'
        title = '%s'

        if 'drive' in scrapedurl:
            try:
                enc_data = httptools.downloadpage(scrapedurl, headers = {'Referer':item.url}).data
                dec_data = generictools.dejuice(enc_data)
                url, quality = scrapertools.find_single_match(dec_data, '"file":"(.*?)","label":"(.*?)"')
            except:
                pass
        else:
            url = scrapedurl
        try:
            url = url +"|referer=%s" % item.url
            itemlist.append(
                Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
                     infoLabels=item.infoLabels))
        except:
            pass

    itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                     action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))

    return itemlist
Esempio n. 4
0
def findvideos(item):
    logger.info()
    from lib import generictools
    import urllib
    itemlist = []
    data = get_source(item.url)
    data = data.replace("'", '"')
    patron = 'data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+).*?img src=\"([^"]+)\"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for type, id, option, lang in matches:
        lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
        quality = ''
        if lang not in IDIOMAS:
            lang = 'en'
        if not config.get_setting('unify'):
            title = ' [%s]' % IDIOMAS[lang]
        else:
            title = ''

        post = {
            'action': 'doo_player_ajax',
            'post': id,
            'nume': option,
            'type': type
        }
        post = urllib.urlencode(post)

        test_url = '%swp-admin/admin-ajax.php' % host
        new_data = httptools.downloadpage(test_url,
                                          post=post,
                                          headers={
                                              'Referer': item.url
                                          }).data
        test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
        if 'xyz' in test_url:
            new_data = get_source(test_url, item.url)
            patron = "addiframe\('([^']+)'"
            matches = scrapertools.find_multiple_matches(new_data, patron)

            for test_url in matches:
                if 'play.php' in test_url:
                    new_data = get_source(test_url)
                    enc_data = scrapertools.find_single_match(
                        new_data, '(eval.*?)</script')

                    dec_data = jsunpack.unpack(enc_data)
                    url = scrapertools.find_single_match(
                        dec_data, 'src="([^"]+)"')
                elif 'embedvip' in test_url:
                    from lib import generictools
                    new_data = get_source(test_url)
                    dejuiced = generictools.dejuice(new_data)
                    url = scrapertools.find_single_match(
                        dejuiced, '"file":"([^"]+)"')
                if url != '':
                    itemlist.append(
                        Item(channel=item.channel,
                             url=url,
                             title='%s' + title,
                             action='play',
                             quality=quality,
                             language=IDIOMAS[lang],
                             infoLabels=item.infoLabels))
        else:
            new_data = get_source(test_url, item.url)
            patron = 'data-embed="([^"]+)" data-issuer="([^"]+)" data-signature="([^"]+)"'
            matches = scrapertools.find_multiple_matches(new_data, patron)

            for st, vt, tk in matches:
                post = {'streaming': st, 'validtime': vt, 'token': tk}
                post = urllib.urlencode(post)
                new_url = '%sedge-data/' % 'https://peliculonhd.net/'
                new_data = httptools.downloadpage(new_url,
                                                  post,
                                                  headers={
                                                      'Referer': test_url
                                                  }).data
                json_data = jsontools.load(new_data)
                if 'peliculonhd' not in json_data['url']:
                    url = json_data['url']
                else:
                    new_data = get_source(json_data['url'], test_url)
                    url = scrapertools.find_single_match(
                        new_data, 'src: "([^"]+)"')
                    url = url.replace('download', 'preview')
                if url != '':
                    itemlist.append(
                        Item(channel=item.channel,
                             url=url,
                             title='%s' + title,
                             action='play',
                             quality=quality,
                             language=IDIOMAS[lang],
                             infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Esempio n. 5
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
    data = scrapertools.decodeHtmlentities(data)
    patron = 'data-tplayernv="Opt(.*?)"><span>[^"<]+</span>(.*?)</li>'  # option, servername, lang - quality
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, quote in matches:
        patron = '<span>(.*?) -([^<]+)</span'
        match = re.compile(patron, re.DOTALL).findall(quote)
        lang, quality = match[0]
        quality = quality.strip()
        lang = lang.lower().strip()
        languages = {
            'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
            'castellano': '[COLOR green](CAST)[/COLOR]',
            'subtitulado': '[COLOR red](VOS)[/COLOR]'
        }

        if lang in languages:
            lang = languages[lang]

        url_1 = scrapertools.find_single_match(
            data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' %
            option)
        new_data = httptools.downloadpage(url_1).data
        new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
        new_data = scrapertools.decodeHtmlentities(new_data)
        patron1 = '<iframe width="560" height="315" src="([^"]+)"'
        match1 = re.compile(patron1, re.DOTALL).findall(new_data)

        urls = scrapertools.find_single_match(
            new_data, '<iframe width="560" height="315" src="([^"]+)"')
        servername = servertools.get_server_from_url(urls)
        if 'stream.pelishd24.net' in urls:
            vip_data = httptools.downloadpage(urls).data
            dejuiced = generictools.dejuice(vip_data)
            patron = '"file":"([^"]+)"'
            match = re.compile(patron, re.DOTALL).findall(dejuiced)
            for scrapedurl in match:
                urls = scrapedurl
                servername = 'gvideo'
        if 'pelishd24.com/?trhide' in urls:
            data = httptools.downloadpage(urls).data
            # logger.error(texto='****hex'+data)
            patron = '"file":"([^"]+)"'
            match = re.compile(patron, re.DOTALL).findall(data)
            for scrapedurl in match:
                urls = scrapedurl
                servername = 'gvideo'

        title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
            servername.title(), quality, lang)
        if 'embed.pelishd24.com' not in urls and 'embed.pelishd24.net' not in urls:
            itemlist.append(
                item.clone(action='play',
                           title=title,
                           url=urls,
                           language=lang,
                           quality=quality,
                           text_color=color3))

        for url in match1:
            new_data = httptools.downloadpage(url).data
            new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "",
                              new_data)
            new_data = scrapertools.decodeHtmlentities(new_data)
            patron1 = '\["\d+","([^"]+)",\d+]'
            match1 = re.compile(patron1, re.DOTALL).findall(new_data)
            for url in match1:
                url = url.replace('\\', '')
                servername = servertools.get_server_from_url(url)
                if 'pelishd24.net' in url or 'stream.pelishd24.com' in url:
                    vip_data = httptools.downloadpage(url).data
                    dejuiced = generictools.dejuice(vip_data)
                    patron = '"file":"([^"]+)"'
                    match = re.compile(patron, re.DOTALL).findall(dejuiced)
                    for scrapedurl in match:
                        url = scrapedurl
                        servername = 'gvideo'

                if 'ww3.pelishd24.com' in url:
                    data1 = httptools.downloadpage(url).data
                    url = scrapertools.find_single_match(
                        data1, '"file": "([^"]+)"')
                    servername = 'gvideo'

                title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
                    servername.title(), quality, lang)

                itemlist.append(
                    item.clone(action='play',
                               title=title,
                               url=url,
                               language=lang,
                               quality=quality,
                               text_color=color3))

    itemlist = servertools.get_servers_itemlist(itemlist)

    itemlist.sort(key=lambda it: it.language, reverse=False)

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'episodios':
        itemlist.append(
            Item(channel=__channel__,
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 thumbnail=thumbnail_host,
                 contentTitle=item.contentTitle))

    return itemlist
Esempio n. 6
0
def findvideos(item):
    logger.info()
    import urllib
    itemlist = []

    data = get_source(item.url)
    patron = 'video\[\d+\] = "([^"]+)";'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for video_url in matches:
        language = 'latino'
        url = ''
        if not config.get_setting('unify'):
            title = ' [%s]' % IDIOMAS[language]
        else:
            title = ''

        if 'pelisplus.net' in video_url:
            itemlist += add_vip(item, video_url, IDIOMAS[language])


        # elif not 'vidoza' in video_url and not 'pelishd' in video_url:
        #     url_data = get_source(video_url)
        #     url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
        #
        else:
            url = video_url

        if not 'server' in url:
            url = url

            if 'pelishd' in url:
                vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False)
                try:
                    dejuiced = generictools.dejuice(vip_data.data)
                    urls = scrapertools.find_multiple_matches(dejuiced, '"file":"([^"]+)","label":"([^"]+)"')
                    for new_url, quality in urls:
                        new_url = new_url.replace('unicorn', 'dragon')
                        new_url = new_url + '|referer:%s' % url
                        itemlist.append(
                            Item(channel=item.channel, title='%s' + title + " " + quality, url=new_url, action='play',
                                 language=IDIOMAS[language], quality=quality, infoLabels=item.infoLabels))
                except:
                    pass

        if url != '' and 'rekovers' not in url and not 'pelishd' in url:
            itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
            infoLabels=item.infoLabels))


    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if item.contentType == 'movie':
        if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(Item(channel=item.channel,
                                 title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                                 url=item.url,
                                 action="add_pelicula_to_library",
                                 extra="findvideos",
                                 contentTitle=item.contentTitle))
    return itemlist
Esempio n. 7
0
def findvideos(item):
    logger.info()
    from lib import generictools
    itemlist = []
    data = get_source(item.url)
    patron = 'id="option-(\d+).*?rptss" src="([^"]+)" frameborder'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for option, scrapedurl in matches:
        lang = scrapertools.find_single_match(
            data, 'href="#option-%s">.*?/flags/(.*?).png' % option)
        quality = ''
        if lang not in IDIOMAS:
            lang = 'en'
        title = '%s'

        if 'embed' in scrapedurl:
            enc_data = get_source(scrapedurl)
            dec_data = generictools.dejuice(enc_data)
            url, quality = scrapertools.find_single_match(
                dec_data, '"file":"(.*?)","label":"(.*?)"')

        elif 'wd=' in scrapedurl:
            new_id = scrapertools.find_single_match(scrapedurl, 'wd=(.*?)&')
            new_id = new_id[::-1]
            new_url = 'https://pelisr.com/encri/?wr=%s' % new_id
            headers = {'Referer': scrapedurl}
            data = httptools.downloadpage(new_url,
                                          headers=headers,
                                          follow_redirects=False)
            url = data.headers['location']

        itemlist.append(
            Item(channel=item.channel,
                 url=url,
                 title=title,
                 action='play',
                 quality=quality,
                 language=IDIOMAS[lang],
                 infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist