コード例 #1
0
def play(item):
    logger.info("[cineblog01.py] play")

    data = scrapertools.cache_page(item.url)

    print "##############################################################"
    if "go.php" in item.url:
        data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
        print "##### play go.php data ##\n%s\n##" % data
    elif "/link/" in item.url:
        from lib.jsbeautifier.unpackers import packer

        try:
            data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>")
            data = packer.unpack(data)
            print "##### play /link/ unpack ##\n%s\n##" % data
        except IndexError:
            print "##### The content is yet unpacked"

        data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
        print "##### play /link/ data ##\n%s\n##" % data
    else:
        data = item.url
        print "##### play else data ##\n%s\n##" % data
    print "##############################################################"

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
コード例 #2
0
def play(item):
    logger.info("streamondemand.streamingfilmit play")

    data = scrapertools.cache_page(item.url, headers=headers)
    data = scrapertools.decodeHtmlentities(data).replace('http://cineblog01.pw', 'http://k4pp4.pw')

    url = scrapertools.find_single_match(data, r'<a\s*href="([^"]+)"><h1')

    data = scrapertools.cache_page(url, headers=headers)

    if "go.php" in url:
        data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
    elif "/link/" in url:
        from lib.jsbeautifier.unpackers import packer
        try:
            data = scrapertools.get_match(data, "(eval.function.p,a,c,k,e,.*?)</script>")
            data = packer.unpack(data)
        except IndexError:
            pass

        data = scrapertools.get_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
    else:
        data = url

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = item.show
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist
コード例 #3
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("[megahd.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data = scrapertools.cache_page(page_url)

    data_pack = scrapertools.find_single_match(data, "(eval.function.p,a,c,k,e,.*?)\s*</script>")

    if data_pack != "":
        try:
            from core import unpackerjs3
            data_unpack = unpackerjs3.unpackjs(data_pack)
        except:
            data_unpack = ""
        if data_unpack == "":
            from lib.jsbeautifier.unpackers import packer
            data_unpack = packer.unpack(data_pack)
        data = data_unpack

    video_url = scrapertools.find_single_match(data, 'file"?\s*:\s*"([^"]+)",')
    video_urls.append(["[megahd]", video_url])

    for video_url in video_urls:
        logger.info("[megahd.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #4
0
ファイル: rapidvideo.py プロジェクト: umbvitt/pelisalacartait
def get_video_url( page_url, premium = False, user="", password="", video_password="" ):
    logger.info( "[rapidvideo.py] url=" + page_url )

    video_id = scrapertools.get_match( page_url, 'org/([A-Za-z0-9]+)' )
    url = 'http://www.rapidvideo.org/embed-%s-607x360.html' % video_id

    #data = scrapertools.cache_page( url ).replace( 'TMPL_VAR|', '' )
    data = scrapertools.cache_page( url )

    packed = scrapertools.get_match( data, "<script type='text/javascript'>eval.function.p,a,c,k,e,.*?</script>" )
    unpacked = packer.unpack( packed )
    media_url = scrapertools.get_match( unpacked, 'file:"([^"]+)"' )

    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url( media_url )[-4:] + " [fastvideo.me]", media_url ] )

    for video_url in video_urls:
        logger.info( "[fastvideo.py] %s - %s" % ( video_url[0], video_url[1] ) )

    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url( media_url )[-4:] + " [rapidvideo.org]", media_url ] )

    for video_url in video_urls:
        logger.info( "[rapidvideo.py] %s - %s" % ( video_url[0], video_url[1] ) )

    return video_urls
コード例 #5
0
def __decode_O(html):
    match = re.search('>\s*(eval\(function.*?)</script>', html, re.DOTALL)
    if match:
        from lib.jsbeautifier.unpackers import packer
        html = packer.unpack(match.group(1))
        html = html.replace('\\\\', '\\')

    match = re.search('(l=.*?)(?:$|</script>)', html, re.DOTALL)
    if match:
        s = match.group(1)

        O = {
            '___': 0,
            '$$$$': "f",
            '__$': 1,
            '$_$_': "a",
            '_$_': 2,
            '$_$$': "b",
            '$$_$': "d",
            '_$$': 3,
            '$$$_': "e",
            '$__': 4,
            '$_$': 5,
            '$$__': "c",
            '$$_': 6,
            '$$$': 7,
            '$___': 8,
            '$__$': 9,
            '$_': "constructor",
            '$$': "return",
            '_$': "o",
            '_': "u",
            '__': "t",
        }
        match = re.search('l\.\$\(l\.\$\((.*?)\)\(\)\)\(\);', s)
        if match:
            s1 = match.group(1)
            s1 = s1.replace(' ', '')
            s1 = s1.replace('(![]+"")', 'false')
            s3 = ''
            for s2 in s1.split('+'):
                if s2.startswith('l.'):
                    s3 += str(O[s2[2:]])
                elif '[' in s2 and ']' in s2:
                    key = s2[s2.find('[') + 3:-1]
                    s3 += s2[O[key]]
                else:
                    s3 += s2[1:-1]

            s3 = s3.replace('\\\\', '\\')
            s3 = s3.decode('unicode_escape')
            s3 = s3.replace('\\/', '/')
            s3 = s3.replace('\\\\"', '"')
            s3 = s3.replace('\\"', '"')

            print s3

            match = re.search(r'attr\("href",\s*"([^"]+)"', s3)
            if match:
                return match.group(1)
コード例 #6
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.vidtome url="+page_url)

    # Lo pide una vez
    headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
    data = scrapertools.cache_page( page_url , headers=headers )
    #logger.info("data="+data)
    
    logger.info("pelisalacarta.servers.vidtome opcion 2")
    op = scrapertools.get_match(data,'<input type="hidden" name="op" value="([^"]+)"')
    logger.info("pelisalacarta.servers.vidtome op="+op)
    usr_login = ""
    id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)"')
    logger.info("pelisalacarta.servers.vidtome id="+id)
    fname = scrapertools.get_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
    logger.info("pelisalacarta.servers.vidtome fname="+fname)
    referer = scrapertools.get_match(data,'<input type="hidden" name="referer" value="([^"]*)"')
    logger.info("pelisalacarta.servers.vidtome referer="+referer)
    hashstring = scrapertools.get_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
    logger.info("pelisalacarta.servers.vidtome hashstring="+hashstring)
    imhuman = scrapertools.get_match(data,'<input type="submit".*?name="imhuman" value="([^"]+)"').replace(" ","+")
    logger.info("pelisalacarta.servers.vidtome imhuman="+imhuman)
        
    import time
    time.sleep(10)

    # Lo pide una segunda vez, como si hubieras hecho click en el banner
    #op=download1&usr_login=&id=z3nnqbspjyne&fname=Coriolanus_DVDrip_Castellano_by_ARKONADA.avi&referer=&hash=nmnt74bh4dihf4zzkxfmw3ztykyfxb24&imhuman=Continue+to+Video
    #op=download1&usr_login=&id=h6gjvhiuqfsq&fname=GENES1S.avi&referer=&hash=taee4nbdgbuwuxfguju3t6nq2gkdzs6k&imhuman=Proceed+to+video
    post = "op="+op+"&usr_login="******"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
    headers.append(["Referer",page_url])
    body = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("body="+body)

    data = scrapertools.find_single_match(body,"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
    logger.info("data="+data)
    data = packer.unpack(data)
    logger.info("data="+data)

    # Extrae la URL
    #{label:"240p",file:"http://188.240.220.186/drjhpzy4lqqwws4phv3twywfxej5nwmi4nhxlriivuopt2pul3o4bkge5hxa/video.mp4"}
    video_urls = []
    media_urls = re.findall(r'\{label:"([^"]+)",file:"([^"]+)"\}', data)
    video_urls = []
    for label,media_url in media_urls:
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" ("+label+") [vidto.me]",media_url])

    #<a id="lnk_download" href="http://188.240.220.186/drjhpzy4lqqwws4phv3twywfxej5nwmi4nhxlriivuopt2pul3oyvkoe5hxa/INT3NS4HDTS-L4T.mkv">
    media_url = scrapertools.find_single_match(body,'<a id="lnk_download" href="([^"]+)"')
    if media_url!="":
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" (ORIGINAL) [vidto.me]",media_url])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.vidtome %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #7
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("[rapidvideo.py] url=" + page_url)

    data = scrapertools.cache_page(page_url)

    packed = scrapertools.get_match(data, "<script type='text/javascript'>eval.function.p,a,c,k,e,.*?</script>")
    unpacked = packer.unpack(packed)
    media_url = scrapertools.get_match(unpacked, 'file:"([^"]+)"')

    video_urls = [[scrapertools.get_filename_from_url(media_url)[-4:] + " [rapidvideo.org]", media_url]]

    for video_url in video_urls:
        logger.info("[rapidvideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #8
0
def get_video_url( page_url, premium = False, user="", password="", video_password="" ):
    logger.info( "[fastvideo.py] url=" + page_url )

    video_id = scrapertools.get_match( page_url, 'me/([A-Za-z0-9]+)' )
    url = 'http://www.fastvideo.me/embed-%s-607x360.html' % video_id

    data = scrapertools.cache_page( url )

    packed = scrapertools.get_match( data, "<script type='text/javascript'>eval.function.p,a,c,k,e,.*?</script>" )
    unpacked = packer.unpack( packed )
    media_url = scrapertools.get_match( unpacked, 'file:"([^"]+)"' )

    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url( media_url )[-4:] + " [fastvideo.me]", media_url ] )

    for video_url in video_urls:
        logger.info( "[fastvideo.py] %s - %s" % ( video_url[0], video_url[1] ) )

    return video_urls
コード例 #9
0
def findvid(item):
    logger.info("[altadefinizione01.py] findvideos")

    ## Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.find_single_match(data, "(eval.function.p,a,c,k,e,.*?)\s*</script>")
    if data != "":
        from lib.jsbeautifier.unpackers import packer
        data = packer.unpack(data).replace(r'\\/', '/')
        itemlist = servertools.find_video_items(data=data)

        for videoitem in itemlist:
            videoitem.title = "".join([item.title, videoitem.title])
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__
    else:
        itemlist = servertools.find_video_items(item=item)

    return itemlist
def findvid(item):
    logger.info("[altadefinizione01.py] findvideos")

    ## Descarga la página
    data = scrapertools.cache_page(item.url)
    data = scrapertools.find_single_match(data, "(eval.function.p,a,c,k,e,.*?)\s*</script>")
    if data != "":
        from lib.jsbeautifier.unpackers import packer
        data = packer.unpack(data).replace(r'\\/', '/')
        itemlist = servertools.find_video_items(data=data)

        for videoitem in itemlist:
            videoitem.title = "".join([item.title, videoitem.title])
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__
    else:
        itemlist = servertools.find_video_items(item=item)

    return itemlist
コード例 #11
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("[rapidvideo.py] url=" + page_url)

    page_url = page_url.replace(".tv/", ".org/")

    video_id = scrapertools.get_match(page_url, "org/([A-Za-z0-9]+)")
    url = "http://www.rapidvideo.org/embed-%s-607x360.html" % video_id

    data = scrapertools.cache_page(url)

    packed = scrapertools.get_match(data, "<script type='text/javascript'>eval.function.p,a,c,k,e,.*?</script>")
    unpacked = packer.unpack(packed)
    media_url = scrapertools.get_match(unpacked, 'file:"([^"]+)"')

    video_urls = [[scrapertools.get_filename_from_url(media_url)[-4:] + " [rapidvideo.org]", media_url]]

    for video_url in video_urls:
        logger.info("[rapidvideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #12
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("streamondemand.servers.vidtome url=" + page_url)

    data = scrapertools.cache_page(page_url, headers=headers)
    # logger.info("data="+data)

    op = scrapertools.get_match(data, '<input type="hidden" name="op" value="([^"]+)"')
    usr_login = ""
    id = scrapertools.get_match(data, '<input type="hidden" name="id" value="([^"]+)"')
    fname = scrapertools.get_match(data, '<input type="hidden" name="fname" value="([^"]+)"')
    referer = scrapertools.get_match(data, '<input type="hidden" name="referer" value="([^"]*)"')
    hashstring = scrapertools.get_match(data, '<input type="hidden" name="hash" value="([^"]*)"')
    imhuman = scrapertools.get_match(data, '<input type="submit".*?name="imhuman" value="([^"]+)"').replace(" ", "+")

    import time
    time.sleep(10)

    post = "op=" + op + "&usr_login="******"&id=" + id + "&fname=" + fname + "&referer=" + referer + "&hash=" + hashstring + "&imhuman=" + imhuman
    headers.append(["Referer", page_url])
    body = scrapertools.cache_page(page_url, post=post, headers=headers)

    patron = "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>"
    data = scrapertools.find_single_match(body, patron)
    data = packer.unpack(data)

    media_urls = re.findall(r'\{label:"([^"]+)",file:"([^"]+)"\}', data)
    video_urls = []
    for label, media_url in media_urls:
        video_urls.append(
            [scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [vidto.me]", media_url])

    patron = '<a id="lnk_download" href="([^"]+)"'
    media_url = scrapertools.find_single_match(body, patron)
    if media_url != "":
        video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (ORIGINAL) [vidto.me]", media_url])

    for video_url in video_urls:
        logger.info("streamondemand.servers.vidtome %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #13
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[megahd.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data = scrapertools.cache_page(page_url)

    data = scrapertools.find_single_match(
        data, "(eval.function.p,a,c,k,e,.*?)\s*</script>")
    if data != "":
        from lib.jsbeautifier.unpackers import packer
        data = packer.unpack(data)
        video_url = scrapertools.find_single_match(data,
                                                   'file"?\s*:\s*"([^"]+)",')
        video_urls.append(["[megahd]", video_url])

    for video_url in video_urls:
        logger.info("[megahd.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #14
0
ファイル: flashx.py プロジェクト: CmosGit/pelisalacarta-Cmos
def get_link(data):
    for match in re.finditer('(eval\(function\(p,a,c,k,e,d\).*?)</script>', data, re.DOTALL):
        js = packer.unpack(match.group(1))
        return re.findall('file\s*:\s*"([^"]+\.mp4)"', js)
コード例 #15
0
def get_link(data):
    for match in re.finditer('(eval\(function\(p,a,c,k,e,d\).*?)</script>',
                             data, re.DOTALL):
        js = packer.unpack(match.group(1))
        return re.findall('file\s*:\s*"([^"]+\.mp4)"', js)