コード例 #1
0
def play(item):
    itemlist = []

    data = item.url
    while 'vcrypt' in data or 'linkup' in data and 'cryptmango' not in data:
        data = httptools.downloadpage(data,
                                      only_headers=True,
                                      follow_redirects=False).headers.get(
                                          "location", "")

    data = expurl.expand_url(data)

    while 'vcrypt' in data or 'linkup' in data and 'cryptmango' not in data:
        data = httptools.downloadpage(data,
                                      only_headers=True,
                                      follow_redirects=False).headers.get(
                                          "location", "")

    if 'cryptmango' in data:
        data = httptools.downloadpage(data).data

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
コード例 #2
0
def findvideos(item):
    logger.info("streamondemand.animeforce findvideos")

    itemlist = []

    if item.extra:
        data = httptools.downloadpage(item.url, headers=headers).data

        blocco = scrapertools.get_match(data, r'%s(.*?)</tr>' % item.extra)
        scrapedurl = scrapertools.find_single_match(
            blocco, r'<a href="([^"]+)"[^>]+>')
        url = scrapedurl
    else:
        url = item.url

    if 'adf.ly' in url:
        url = expurl.expand_url(url)
    elif 'bit.ly' in url:
        url = httptools.downloadpage(
            url, only_headers=True,
            follow_redirects=False).headers.get("location")

    if 'animeforce' in url:
        headers.append(['Referer', item.url])
        data = httptools.downloadpage(url, headers=headers).data
        itemlist.extend(servertools.find_video_items(data=data))

        for videoitem in itemlist:
            videoitem.title = item.title + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

        url = url.split('&')[0]
        data = httptools.downloadpage(url, headers=headers).data
        patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
        matches = re.compile(patron, re.DOTALL).findall(data)
        headers.append(['Referer', url])
        for video in matches:
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=item.title,
                     url=video + '|' + urllib.urlencode(dict(headers)),
                     folder=False))
    else:
        itemlist.extend(servertools.find_video_items(data=url))

        for videoitem in itemlist:
            videoitem.title = item.title + videoitem.title
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

    return itemlist
コード例 #3
0
def play(item):
    logger.info()

    video = expurl.expand_url(item.url)

    itemlist = []

    itemlist = servertools.find_video_items(data=video)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = item.channel

    return itemlist
コード例 #4
0
def findvideos(item):
    logger.info("[RedAnimeDatabase.py]==> findvideos")

    for url in scrapertools.find_multiple_matches(item.url, r'href="([^"]+)'):
        item.url += '\n' + expurl.expand_url(url)

    itemlist = servertools.find_video_items(data=item.url)

    for videoitem in itemlist:
        server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
        videoitem.title = "".join(
            ["[%s] " % color(server, 'orange'), item.title])
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__
    return itemlist
コード例 #5
0
def play(item):
    
    logger.info("[descargacineclasico.py] play")

    video = expurl.expand_url(item.url)
   
    
    itemlist = []
    
    itemlist = servertools.find_video_items(data=video)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = item.channel

    return itemlist    
コード例 #6
0
ファイル: zcrypt.py プロジェクト: Jpocas3212/salva59sg
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):

    encontrados = {
        'https://vcrypt.net/images/logo', 'https://vcrypt.net/css/out',
        'https://vcrypt.net/images/favicon', 'https://vcrypt.net/css/open',
        'http://linkup.pro/js/jquery', 'https://linkup.pro/js/jquery',
        'http://www.rapidcrypt.net/open'
    }
    devuelve = []

    patronvideos = [
        r'(https?://(gestyy|rapidteria|sprysphere)\.com/[a-zA-Z0-9]+)',
        r'(https?://(?:www\.)?(vcrypt|linkup)\.[^/]+/[^/]+/[a-zA-Z0-9_]+)'
    ]

    for patron in patronvideos:
        logger.info(" find_videos #" + patron + "#")
        matches = re.compile(patron).findall(page_url)

        for url, host in matches:
            if url not in encontrados:
                logger.info("  url=" + url)
                encontrados.add(url)

                if host == 'gestyy':
                    resp = httptools.downloadpage(
                        url,
                        follow_redirects=False,
                        cookies=False,
                        only_headers=True,
                        replace_headers=True,
                        headers={'User-Agent': 'curl/7.59.0'})
                    data = resp.headers.get("location", "")
                elif 'vcrypt.net' in url:
                    from lib import unshortenit
                    data, status = unshortenit.unshorten(url)

                elif 'linkup' in url:
                    idata = httptools.downloadpage(url).data
                    data = scrapertoolsV2.find_single_match(
                        idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
                else:
                    data = ""
                    while host in url:
                        resp = httptools.downloadpage(url,
                                                      follow_redirects=False)
                        url = resp.headers.get("location", "")
                        if not url:
                            data = resp.data
                        elif host not in url:
                            data = url
                if data:
                    devuelve.append(data)
            else:
                logger.info("  url duplicada=" + url)

    patron = r"""(https?://(?:www\.)?(?:threadsphere\.bid|adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co|adfoc\.us|lnx\.lu|sh\.st|href\.li|anonymz\.com|shrink-service\.it|rapidcrypt\.net)/[^"']+)"""

    logger.info(" find_videos #" + patron + "#")
    matches = re.compile(patron).findall(page_url)

    for url in matches:
        if url not in encontrados:
            logger.info("  url=" + url)
            encontrados.add(url)

            long_url = expurl.expand_url(url)
            if long_url:
                devuelve.append(long_url)
        else:
            logger.info("  url duplicada=" + url)

    ret = page_url + " " + str(devuelve) if devuelve else page_url
    logger.info(" RET=" + str(ret))
    return ret
コード例 #7
0
def find_videos(text):
    encontrados = {
        'https://vcrypt.net/images/logo', 'https://vcrypt.net/css/out',
        'https://vcrypt.net/images/favicon', 'https://vcrypt.net/css/open',
        'http://linkup.pro/js/jquery', 'https://linkup.pro/js/jquery',
        'http://www.rapidcrypt.net/open'
    }
    devuelve = []

    patronvideos = [
        r'(https?://(gestyy|rapidteria|sprysphere)\.com/[a-zA-Z0-9]+)',
        r'(https?://(?:www\.)?(vcrypt|linkup)\.[^/]+/[^/]+/[a-zA-Z0-9_]+)'
    ]

    for patron in patronvideos:
        logger.info(" find_videos #" + patron + "#")
        matches = re.compile(patron).findall(text)

        for url, host in matches:
            if url not in encontrados:
                logger.info("  url=" + url)
                encontrados.add(url)

                import requests
                headers = {
                    'User-Agent':
                    'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'
                }

                if host == 'gestyy':
                    resp = httptools.downloadpage(
                        url,
                        follow_redirects=False,
                        cookies=False,
                        only_headers=True,
                        replace_headers=True,
                        headers={'User-Agent': 'curl/7.59.0'})
                    data = resp.headers.get("location", "")
                elif 'vcrypt.net' in url:
                    # req = httptools.downloadpage(url)
                    req = requests.get(url, headers=headers)
                    idata = req.content
                    patron = r"document.cookie\s=\s.*?'(.*)'"
                    # matches = re.compile(patron, re.IGNORECASE).findall(idata)
                    matches = re.finditer(patron, idata, re.MULTILINE)
                    mcookie = {}
                    for matchNum, match in enumerate(matches, start=1):
                        for c in match.group(1).split("; "):
                            c, v = c.split('=')
                            mcookie[c] = v

                    try:
                        patron = r';URL=([^\"]+)\">'
                        dest = scrapertools.get_match(idata, patron)
                        r = requests.post(dest, cookies=mcookie)
                        url = r.url
                    except:
                        r = requests.get(req.url, headers=headers)
                        if r.url == url:
                            url = ""

                    if "4snip" in url:
                        desturl = url.replace("/out/", "/outlink/")
                        import os
                        par = os.path.basename(desturl)
                        rdata = requests.post(desturl, data={'url': par})
                        url = rdata.url

                    if "wstream" in url:
                        url = url.replace("/video/", "/")

                    data = url

                else:
                    data = ""
                    while host in url:
                        resp = httptools.downloadpage(url,
                                                      follow_redirects=False)
                        url = resp.headers.get("location", "")
                        if not url:
                            data = resp.data
                        elif host not in url:
                            data = url
                if data:
                    devuelve.append(data)
            else:
                logger.info("  url duplicada=" + url)

    patron = r"""(https?://(?:www\.)?(?:threadsphere\.bid|adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co|adfoc\.us|lnx\.lu|sh\.st|href\.li|anonymz\.com|shrink-service\.it|rapidcrypt\.net)/[^"']+)"""

    logger.info(" find_videos #" + patron + "#")
    matches = re.compile(patron).findall(text)

    for url in matches:
        if url not in encontrados:
            logger.info("  url=" + url)
            encontrados.add(url)

            long_url = expurl.expand_url(url)
            if long_url:
                devuelve.append(long_url)
        else:
            logger.info("  url duplicada=" + url)

    ret = servertools.findvideos(str(devuelve)) if devuelve else []
    return ret