Ejemplo n.º 1
0
def play(item):
    logger.info("streamondemand.mondolunatico play")

    itemlist = []

    if item.server == 'captcha':
        headers = [['Referer', item.url]]

        # Descarga la página
        data = httptools.downloadpage(item.url, headers=headers).data

        if 'CaptchaSecurityImages.php' in data:
            # Descarga el captcha
            img_content = httptools.downloadpage(captcha_url,
                                                 headers=headers).data

            captcha_fname = os.path.join(config.get_data_path(),
                                         __channel__ + "captcha.img")
            with open(captcha_fname, 'wb') as ff:
                ff.write(img_content)

            from platformcode import captcha

            keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
            keyb.doModal()
            if keyb.isConfirmed():
                captcha_text = keyb.getText()
                post_data = urllib.urlencode({
                    'submit1': 'Invia',
                    'security_code': captcha_text
                })
                data = httptools.downloadpage(item.url,
                                              post=post_data,
                                              headers=headers).data

            try:
                os.remove(captcha_fname)
            except:
                pass

        itemlist.extend(servertools.find_video_items(data=data))

        for videoitem in itemlist:
            videoitem.title = item.title
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.show = item.show
            videoitem.plot = item.plot
            videoitem.channel = __channel__
    else:
        itemlist.append(item)

    return itemlist
Ejemplo n.º 2
0
def episodios(item):
    logger.info("streamondemand.mondolunatico episodios")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url, headers=headers)

    html = []

    for i in range(2):
        patron = 'href="(https?://www\.keeplinks\.eu/p92/([^"]+))"'
        matches = re.compile(patron, re.DOTALL).findall(data)
        for keeplinks, id in matches:
            _headers = list(headers)
            _headers.append([
                'Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' +
                str(int(time.time()))
            ])
            _headers.append(['Referer', keeplinks])

            html.append(scrapertools.cache_page(keeplinks, headers=_headers))

        patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host
        matches = re.compile(patron, re.DOTALL).findall(data)
        for scrapedurl in matches:
            tmp = scrapertools.cache_page(scrapedurl, headers=headers)

            if 'CaptchaSecurityImages.php' in tmp:
                # Descarga el captcha
                img_content = scrapertools.cache_page(captcha_url,
                                                      headers=headers)

                captcha_fname = os.path.join(config.get_data_path(),
                                             __channel__ + "captcha.img")
                with open(captcha_fname, 'wb') as ff:
                    ff.write(img_content)

                from platformcode import captcha

                keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
                keyb.doModal()
                if keyb.isConfirmed():
                    captcha_text = keyb.getText()
                    post_data = urllib.urlencode({
                        'submit1': 'Invia',
                        'security_code': captcha_text
                    })
                    tmp = scrapertools.cache_page(scrapedurl,
                                                  post=post_data,
                                                  headers=headers)

                try:
                    os.remove(captcha_fname)
                except:
                    pass

            html.append(tmp)

        data = '\n'.join(html)

    encontrados = set()

    patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.split('/')[-1]
        if not scrapedtitle or scrapedtitle in encontrados: continue
        encontrados.add(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 show=item.show))

    patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.split('/')[-1]
        if not scrapedtitle or scrapedtitle in encontrados: continue
        encontrados.add(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 show=item.show))

    return itemlist
def episodios(item):
    logger.info("streamondemand.mondolunatico_new episodios")

    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    html = []

    for i in range(2):
        patron = 'href="(https?://www\.keeplinks\.co/p92/([^"]+))"'
        matches = re.compile(patron, re.DOTALL).findall(data)
        for keeplinks, id in matches:
            _headers = [[
                'Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' +
                str(int(time.time()))
            ], ['Referer', keeplinks]]

            html.append(
                httptools.downloadpage(keeplinks, headers=_headers).data)

        patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host
        matches = re.compile(patron, re.DOTALL).findall(data)
        for scrapedurl in matches:
            tmp = httptools.downloadpage(scrapedurl).data

            if 'CaptchaSecurityImages.php' in tmp:
                # Descarga el captcha
                img_content = httptools.downloadpage(captcha_url).data

                captcha_fname = os.path.join(config.get_data_path(),
                                             __channel__ + "captcha.img")
                with open(captcha_fname, 'wb') as ff:
                    ff.write(img_content)

                from platformcode import captcha

                keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
                keyb.doModal()
                if keyb.isConfirmed():
                    captcha_text = keyb.getText()
                    post_data = urllib.urlencode({
                        'submit1': 'Invia',
                        'security_code': captcha_text
                    })
                    tmp = httptools.downloadpage(scrapedurl,
                                                 post=post_data).data

                try:
                    os.remove(captcha_fname)
                except:
                    pass

            html.append(tmp)

        data = '\n'.join(html)

    encontrados = set()

    patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        if "mondolunatico.org/goto" in scrapedtitle:
            scrapedtitle = "Lista episodi in Fase di Ripristino >>"
        else:
            scrapedtitle = scrapedtitle.split('/')[-1]
        if not scrapedtitle or scrapedtitle in encontrados: continue
        encontrados.add(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).title()
        scrapedtitle = scrapedtitle.replace(".-.", " ").replace(
            "Mkv",
            "").replace(".", " ").replace("Mp4",
                                          "").replace("Ac3",
                                                      "").replace("Soft", "")
        scrapedtitle = scrapedtitle.replace("By Bloody", "").replace(
            "Avi",
            "").replace("Xvid",
                        "").replace("Dvdrip",
                                    "").replace("_", " ").replace("Spft", "")
        scrapedtitle = scrapedtitle.replace("Internal", "").replace(
            "%2520", " ").replace("Html", "").replace("Dvdrip", "").strip()
        if "=" in scrapedtitle:
            continue

        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 plot=item.plot,
                 show=item.show))

    patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle in matches:
        if "mondolunatico.org/goto" in scrapedtitle:
            scrapedtitle = "Lista Episodi in Fase di Ripristino >>"
        else:
            scrapedtitle = scrapedtitle.split('/')[-1]
        if not scrapedtitle or scrapedtitle in encontrados: continue
        encontrados.add(scrapedtitle)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).title()

        scrapedtitle = scrapedtitle.replace(".-.", " ").replace(
            "Mkv", "").replace(".",
                               " ").replace("Mp4",
                                            "").replace("Ac3",
                                                        "").replace("_", " ")
        scrapedtitle = scrapedtitle.replace("By Bloody", "").replace(
            "Avi",
            "").replace("Xvid",
                        "").replace("Dvdrip-",
                                    "").replace("Soft-",
                                                "").replace("Spft-", "")
        scrapedtitle = scrapedtitle.replace("Internal", "").replace(
            "%2520", " ").replace("Html", "").replace("Dvdrip", "").strip()
        if "=" in scrapedtitle:
            continue

        itemlist.append(
            Item(channel=__channel__,
                 extra=item.extra,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle,
                 plot=item.plot,
                 show=item.show))

    return itemlist