Esempio n. 1
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    video = True
    data = scrapertools.downloadpageWithoutCookies(page_url)

    if "videocontainer" not in data:
        video = False
        url = page_url.replace("/embed/","/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.get_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = decode(data)
    else:
        text_encode = scrapertools.get_match(data,"<video[^<]+<script[^>]+>(.*?)</script>")
        text_decode = decode(data)

    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']+"|"
    if video == True:
        videourl = scrapertools.get_match(text_decode, "(http.*?true)")
        videourl = scrapertools.get_header_from_response(videourl,header_to_get="location")
        videourl = videourl.replace("https://","http://").replace("?mime=true","")
        extension = videourl[-4:]
        video_urls.append([ extension + " [Openload]", videourl+header_down+extension])
    else:
        videourl = scrapertools.find_single_match(text_decode, '"href",(?:\s|)\'([^\']+)\'')
        videourl = videourl.replace("https://","http://")
        extension = videourl[-4:]
        video_urls.append([ extension + " [Openload]", videourl+header_down+extension])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 2
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)

    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent'] + "|"

    from lib.aadecode import decode as aadecode
    if "videocontainer" not in data:
        url = page_url.replace("/embed/", "/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.find_single_match(
            data, "Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = aadecode(text_encode)

        videourl = "http://" + scrapertools.find_single_match(
            text_decode, "(openload.co/.*?)\}")
        extension = scrapertools.find_single_match(
            data, '<meta name="description" content="([^"]+)"')
        extension = "." + extension.rsplit(".", 1)[1]
        video_urls.append(
            [extension + " [Openload]", videourl + header_down + extension])
    else:
        text_encode = scrapertools.find_multiple_matches(
            data, '<script[^>]+>(゚ω゚.*?)</script>')
        decodeindex = aadecode(text_encode[0])
        subtract = scrapertools.find_single_match(decodeindex,
                                                  'welikekodi.*?(\([^;]+\))')
        index = int(eval(subtract))

        # Buscamos la variable que nos indica el script correcto
        text_decode = aadecode(text_encode[index])

        videourl = "http://" + scrapertools.find_single_match(
            text_decode, "(openload.co/.*?)\}")
        extension = "." + scrapertools.find_single_match(
            text_decode, "video/(\w+)")
        if config.get_platform() != "plex":
            video_urls.append([
                extension + " [Openload] ", videourl + header_down + extension,
                0, subtitle
            ])
        else:
            video_urls.append(
                [extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Esempio n. 3
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    video = True
    data = scrapertools.downloadpageWithoutCookies(page_url)

    if "videocontainer" not in data:
        video = False
        url = page_url.replace("/embed/", "/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.get_match(
            data, "Click to start Download.*?<script[^>]+>(.*?)</script")

    else:
        text_encode = scrapertools.get_match(
            data, "<video[^<]+<script[^>]+>(.*?)</script>")

    from aadecode import decode as aadecode
    text_decode = aadecode(text_encode)

    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent'] + "|"
    if video == True:
        videourl = scrapertools.get_match(text_decode, "(http.*?true)")
        videourl = scrapertools.get_header_from_response(
            videourl, header_to_get="location")
        videourl = videourl.replace("https://",
                                    "http://").replace("?mime=true", "")
        extension = videourl[-4:]
        video_urls.append([
            extension + " [Openload]", videourl + header_down + extension, 0,
            subtitle
        ])
    else:
        videourl = scrapertools.find_single_match(
            text_decode, '"href",(?:\s|)\'([^\']+)\'')
        videourl = videourl.replace("https://", "http://")
        extension = videourl[-4:]
        video_urls.append(
            [extension + " [Openload]", videourl + header_down + extension])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Esempio n. 4
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[putlocker.py] url="+page_url)
    
    data = scrapertools.cache_page(page_url)
    logger.info("data="+data)

    patron  = '<input type="hidden" value="([0-9a-f]+?)" name="([^"]+)">[^<]+'
    patron += '<input name="confirm" type="submit" value="([^"]+)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)==0: return []

    post = matches[0][1]+"="+matches[0][0]+"&confirm="+(matches[0][2].replace(" ","+"))
    headers = []
    headers.append( ['User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.2) Gecko/20100101 Firefox/10.0.2'] )
    headers.append( [ "Accept" , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ])
    headers.append( ['Referer',page_url] )

    data = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("data="+data)
    
    # extrae 
    patron = "playlist: '(.+?)'"
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    video_urls = []
    if len(matches)>0:
        xmlurl = urlparse.urljoin(page_url,matches[0])
        logger.info("[putlocker.py] Playlist="+xmlurl)
    
        logger.info("xmlurl="+xmlurl)
        data = scrapertools.downloadpageWithoutCookies(xmlurl)
        logger.info("data="+data)
        # Extrae la URL
        try:
            mediaurl = scrapertools.get_match(data,'</link><media\:content url="(.+?)"')
        except:
            mediaurl = scrapertools.get_match(data,'<media\:content url="(.+?)"')
        logger.info("mediaurl="+mediaurl)
        # web  http://media-a7.putlocker.com/download/17/ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv?h=T6eVK5WKEn3fDwKLcFkAog&e=1341894542&f=%27ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv%27
        # xbmc http://media-a7.putlocker.com/download/17/ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv?h=yFVjhTW95m3LqyqUH1yUDA&amp;e=1341894600&amp;f='ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv'
        # xbmc http://media-a7.putlocker.com/download/17/ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv?h=yFVjhTW95m3LqyqUH1yUDA&e=1341894600&f=%27ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv%27

        mediaurl = mediaurl.replace("&amp;","&").replace("'","%27")
        logger.info("mediaurl="+mediaurl)
        video_urls.append( [".flv [putlocker]",mediaurl] )

    else:
        logger.info("data="+data)
        logger.info("[putlocker.py] No encuentra Playlist")
        #url: 'http://s3.putlocker.ch:86/2015.mp4?key=2daad71cdc34f5a2e10665cf0efe1356'
        videourl = scrapertools.get_match(data,"url\: '([^']+)'")
        video_urls.append( ["[putlocker]",videourl] )
    

    for video_url in video_urls:
        logger.info("[putlocker.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 5
0
def elgoles(item):
    logger.info("pelisalacarta.channels.miscelanea_p2p elgoles")
    itemlist = []

    try:
        from servers import servertools
    except:
        from core import servertools

    data = scrapertools.downloadpage(item.url)
    bloque = scrapertools.find_single_match(
        data, '<h2>Páginas</h2>.*?<ul>(.*?)</ul>')
    matches = scrapertools.find_multiple_matches(
        bloque, "<a href='([^']+)'>(.*?)</a>")
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = "[COLOR crimson]" + scrapedtitle.capitalize(
        ) + "[/COLOR]"
        data = scrapertools.downloadpageWithoutCookies(scrapedurl)
        urls = servertools.findvideosbyserver(data, "p2p")
        if urls:
            scrapedtitle += "   [COLOR darkcyan]" + urls[0][0] + "[/COLOR]"
            itemlist.append(
                item.clone(url=scrapedurl,
                           action="play",
                           title=scrapedtitle,
                           folder=False))

    return itemlist
Esempio n. 6
0
def test_video_exists( page_url ):
    logger.info("[wupload.py] test_video_exists(page_url='%s')" % page_url)

    # Existe: http://www.wupload.com/file/2666595132
    # No existe: http://www.wupload.es/file/2668162342
    location = scrapertools.get_header_from_response(page_url,header_to_get="location")
    logger.info("location="+location)
    if location!="":
        page_url = location

    data = scrapertools.downloadpageWithoutCookies(page_url)
    logger.info("data="+data)
    patron  = '<p class="fileInfo filename"><span>Filename: </span> <strong>([^<]+)</strong></p>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    
    if len(matches)>0:
        return True,""
    else:
        patron  = '<p class="deletedFile">(Sorry, this file has been removed.)</p>'
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            return False,matches[0]
        
        patron = '<div class="section CL3 regDownloadMessage"> <h3>(File does not exist)</h3> </div>'
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            return False,matches[0]
    
    return True,""
Esempio n. 7
0
def videos(item):
    #logger.info("[xhamster.py] videos")
    data = scrapertools.downloadpageWithoutCookies(item.url)
    #data = scrapertools.get_match(data,'<td valign="top" id="video_title">(.*?)<div id="footer">')
    itemlist = []
    '''
    <a href="/movies/1280051/pussy_pump_002.html?s=10" class='hRotator' >
    <img src='http://et1.xhamster.com/t/051/10_1280051.jpg' width='160' height='120' alt="pussy pump 002"/>
    <img class='hSprite' src="http://eu-st.xhamster.com/images/spacer.gif" width='160' height='120' sprite='http://et1.xhamster.com/t/051/s_1280051.jpg' id="1280051" onmouseover="hRotator.start2(this);">
    '''
    '''
    <a href='http://es.xhamster.com/movies/1627978/its_so_big_its_stretching_my_insides.html'  class='hRotator' >
    <img src='http://et18.xhamster.com/t/978/4_1627978.jpg' width='160' height='120' class='thumb' alt="Its So Big its Stretching My Insides"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et18.xhamster.com/t/978/s_1627978.jpg' id='1627978' onmouseover='hRotator.start2(this);'><b>12:13</b><u title="Its So Big its Stretching My Insides">Its So Big its Stretching My Insides</u></a><div class='hRate'><div class='fr'>94%</div>Views: 168,430</div></div><div class='video'><a href='http://es.xhamster.com/movies/1445375/busty_preggo_mom_dp_fuck.html'  class='hRotator' ><img src='http://et15.xhamster.com/t/375/3_1445375.jpg' width='160' height='120' class='thumb' alt="Busty preggo mom dp f**k"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et15.xhamster.com/t/375/s_1445375.jpg' id='1445375' onmouseover='hRotator.start2(this);'><b>13:38</b><u title="Busty preggo mom dp f**k">Busty preggo mom dp f**k</u></a><div class='hRate'><div class='fr'>93%</div>Views: 246,305</div></div><div class='video'><a href='http://es.xhamster.com/movies/745518/lauren_calendar_audition_netvideogirls.html'  class='hRotator' ><img src='http://et18.xhamster.com/t/518/2_745518.jpg' width='160' height='120' class='thumb' alt="Lauren Calendar Audition - netvideogirls"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et18.xhamster.com/t/518/s_745518.jpg' id='745518' onmouseover='hRotator.start2(this);'><b>46:25</b><u title="Lauren Calendar Audition - netvideogirls">Lauren Calendar Audition - netvideogirls</u></a><div class='hRate'><div class='fr'>95%</div>Views: 691,072</div></div><div class='clear' /></div><div class='video'><a href='http://es.xhamster.com/movies/1609732/pantyhose_hooker_nylon_prostitute_fetish_sex.html'  class='hRotator' ><img src='http://et12.xhamster.com/t/732/5_1609732.jpg' width='160' height='120' class='thumb' alt="pantyhose hooker nylon prostitute fetish sex"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et12.xhamster.com/t/732/s_1609732.jpg' id='1609732' onmouseover='hRotator.start2(this);'><b>13:02</b><u title="pantyhose hooker nylon prostitute fetish sex">pantyhose hooker nylon prostitute fetish sex</u><div class="hSpriteHD"></div></a><div class='hRate'><div class='fr'>95%</div>Views: 232,148</div></div><div class='video'><a href='http://es.xhamster.com/movies/1670755/tattooed_and_pierced_lesbians_licking_pussies.html'  class='hRotator' ><img src='http://et15.xhamster.com/t/755/7_1670755.jpg' width='160' height='120' class='thumb' alt="tattooed and pierced lesbians licking pussies"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et15.xhamster.com/t/755/s_1670755.jpg' id='1670755' onmouseover='hRotator.start2(this);'><b>13:32</b><u title="tattooed and pierced lesbians licking pussies">tattooed and pierced lesbians licking pussies</u></a><div class='hRate'><div class='fr'>92%</div>Views: 68,202</div></div><div class='video'><a href='http://es.xhamster.com/movies/1460297/brunette_en_jupe_jaune_baise_dehors.html'  class='hRotator' ><img src='http://et17.xhamster.com/t/297/6_1460297.jpg' width='160' height='120' class='thumb' alt="Brunette en jupe jaune baise dehors"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et17.xhamster.com/t/297/s_1460297.jpg' id='1460297' onmouseover='hRotator.start2(this);'><b>13:31</b><u title="Brunette en jupe jaune baise dehors">Brunette en jupe jaune baise dehors</u></a><div class='hRate'><div class='fr'>91%</div>Views: 64,222</div></div><div class='clear' /></div><div class="loader"></div>
    '''

    patron = "<a href='([^']+)'[^<]+<img src='([^']+)' width='[^']+' height='[^']+' class='[^']+' alt=\"([^\"]+)\""
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        try:
            #title = unicode( scrapedtitle, "utf-8" ).encode( "iso-8859-1" )
            title = scrapedtitle.strip()
        except:
            title = scrapedtitle
        title = unicode(title, "utf-8")
        url = urlparse.urljoin(BASE_URL, scrapedurl)
        thumbnail = scrapedthumbnail
        plot = ""
        # Depuracion
        #if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 show=title,
                 viewmode="movie",
                 folder=False))

    # EXTRAE EL PAGINADOR
    #<a href="/channels/new-grannies-2.html" class="last colR"><div class="icon iconPagerNextHover"></div>Próximo</a>
    #<a href="/channels/new-grannies-479.html" class="last" overicon="iconPagerNextHover"><div class="icon iconPagerNext"></div>Próximo</a>
    patronvideos = "<a href='([^']+)' class='last(?: colR)?'(?: overicon='iconPagerNextHover')?><div class='icon iconPagerNext"
    siguiente = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(siguiente)
    if len(siguiente) > 0:
        itemlist.append(
            Item(channel=__channel__,
                 action='videos',
                 title=u"Página siguiente >>",
                 url=urlparse.urljoin(BASE_URL, siguiente[0]),
                 thumbnail="",
                 plot="",
                 folder=False))
    else:
        paginador = None

    return itemlist
Esempio n. 8
0
def test_video_exists(page_url):
    logger.info("[wupload.py] test_video_exists(page_url='%s')" % page_url)

    # Existe: http://www.wupload.com/file/2666595132
    # No existe: http://www.wupload.es/file/2668162342
    location = scrapertools.get_header_from_response(page_url,
                                                     header_to_get="location")
    logger.info("location=" + location)
    if location != "":
        page_url = location

    data = scrapertools.downloadpageWithoutCookies(page_url)
    logger.info("data=" + data)
    patron = '<p class="fileInfo filename"><span>Filename: </span> <strong>([^<]+)</strong></p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    if len(matches) > 0:
        return True, ""
    else:
        patron = '<p class="deletedFile">(Sorry, this file has been removed.)</p>'
        matches = re.compile(patron, re.DOTALL).findall(data)
        if len(matches) > 0:
            return False, matches[0]

        patron = '<div class="section CL3 regDownloadMessage"> <h3>(File does not exist)</h3> </div>'
        matches = re.compile(patron, re.DOTALL).findall(data)
        if len(matches) > 0:
            return False, matches[0]

    return True, ""
Esempio n. 9
0
def get_link_api(page_url):
    from core import jsontools
    file_id = scrapertools.find_single_match(page_url, 'embed/([0-9a-zA-Z-_]+)')
    login = "******"
    key = "AQFO3QJQ"
    data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, login, key))
    data = jsontools.load_json(data)
    if data["status"] == 200:
        ticket = data["result"]["ticket"]
        data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket))
        data = jsontools.load_json(data)
        extension = "." + scrapertools.find_single_match(data["result"]["content_type"], '/(\w+)')
        videourl = data['result']['url']
        videourl = videourl.replace("https", "http")
        return videourl

    return ""
def test_video_exists(page_url):
    logger.info("pelisalacarta.servers.openload test_video_exists(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We are sorry!' in data:
        return False, "[Openload] File cancellato o inesistente"

    return True, ""
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("streamondemand.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="it"')
    # Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    from lib.aadecode import decode as aadecode
    text_encode = scrapertools.find_multiple_matches(data,
                                                     '(゚ω゚.*?\(\'\_\'\));')
    text_decode = ""
    for t in text_encode:
        text_decode += aadecode(t)

    varfnc = scrapertools.find_single_match(
        text_decode, 'charCodeAt\(0\)\s*\+\s*(\w+)\(\)')
    number = scrapertools.find_single_match(
        text_decode,
        'function\s*' + varfnc + '\(\)\s*{\s*return\s*([^;]+);\s*}')
    number = eval(number)
    varj = scrapertools.find_single_match(text_decode,
                                          'var magic\s*=\s*(\w+)\.slice')
    varhidden = scrapertools.find_single_match(
        text_decode, 'var\s*' + varj + '\s*=\s*\$\("[#]*([^"]+)"\).text')
    valuehidden = scrapertools.find_single_match(
        data, 'id="' + varhidden + '">(.*?)<')
    magic = ord(valuehidden[-1])
    valuehidden = valuehidden.split(chr(magic - 1))
    valuehidden = "\t".join(valuehidden)
    valuehidden = valuehidden.split(valuehidden[-1])
    valuehidden = chr(magic - 1).join(valuehidden)
    valuehidden = valuehidden.split("\t")
    valuehidden = chr(magic).join(valuehidden)

    videourl = decode_hidden(valuehidden, number)

    extension = scrapertools.find_single_match(
        data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]
    if config.get_platform() != "plex":
        video_urls.append(
            [extension + " [Openload] ", videourl + header_down, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Esempio n. 12
0
def test_video_exists(page_url):
    logger.info("pelisalacarta.servers.flashx test_video_exists(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'File Not Found' in data:
        return False, "[FlashX] El archivo no existe o ha sido borrado"

    return True, ""
def test_video_exists(page_url):
    logger.info("stramondemand.servers.openload test_video_exists(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We are sorry!' in data:
        return False, "[Openload] La risorsa non esiste o è stata eliminata" 

    return True, ""
Esempio n. 14
0
def test_video_exists(page_url):
    logger.info("pelisalacarta.servers.openload test_video_exists(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We are sorry!' in data:
        return False, "[Openload] El archivo no existe o ha sido borrado" 

    return True, ""
def get_link_api(page_url):
    from core import jsontools
    file_id = scrapertools.find_single_match(page_url, '(?:embed|f)/([0-9a-zA-Z-_]+)')
    login = "******"
    key = "AQFO3QJQ"
    data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, login, key))
    data = jsontools.load_json(data)
    extension = ""
    if data["status"] == 200:
        ticket = data["result"]["ticket"]
        data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket))
        data = jsontools.load_json(data)
        extension = data["result"]["content_type"]
        videourl = data['result']['url']
        videourl = videourl.replace("https", "http")
        return videourl, extension

    return ""
Esempio n. 16
0
def get_link_api(page_url):
    from core import jsontools
    file_id = scrapertools.find_single_match(page_url, 'embed/([0-9a-zA-Z-_]+)')
    login = "******"
    key = "AQFO3QJQ"
    data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, login, key))
    data = jsontools.load_json(data)
    if data["status"] == 200:
        ticket = data["result"]["ticket"]
        data = scrapertools.downloadpageWithoutCookies("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket))
        data = jsontools.load_json(data)
        extension = "." + scrapertools.find_single_match(data["result"]["content_type"], '/(\w+)')
        videourl = data['result']['url'] + '?mime=true'
        videourl = scrapertools.getLocationHeaderFromResponse(videourl)
        videourl = videourl.replace("https", "http").replace("?mime=true", "")
        return videourl

    return ""
Esempio n. 17
0
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We’re Sorry!' in data:
        return False, "[Openload] El archivo no existe o ha sido borrado"

    return True, ""
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We’re Sorry!' in data:
        return False, "[Openload] File inesistente o eliminato" 

    return True, ""
Esempio n. 19
0
def entradas(item):
    logger.info("pelisalacarta.channels.tugoleada entradas")
    itemlist = []

    data = scrapertools.cachePage(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    patron = '<div class="litd fecha"><b>(.*?)</b>(.*?)</div>' \
             '.*?<div class="litd competicion">(.*?)</div>.*?href=[^>]+>(.*?)</a>' \
             '.*?javascript:abrir_evento\((\d+)\)(.*?)</li>'
    matches = scrapertools.find_multiple_matches(data, patron)

    canales = ['Acestream','Sopcast','Web']
    ucanales = ['a','s','']
    lista = []
    urls = []
    for fecha, hora, torneo, partido, id, check_live  in matches:
        urls.append("http://www.elitegol.com/ajax/abrir_evento.php?id=%s" % id)
        evento = "[COLOR darkorange][B]"+partido+"[/B][/COLOR]"
        torneo = "  [COLOR blue]"+torneo+"[/COLOR]"
        if "EN JUEGO" in check_live: scrapedtitle = "[COLOR red][B]"+fecha+hora+"[/B][/COLOR] " + evento + torneo
        else: scrapedtitle = "[COLOR green][B]"+fecha+hora+"[/B][/COLOR] " + evento + torneo
        lista.append(item.clone(channel=__channel__, title=scrapedtitle, action="do_nothing", url="", fulltitle=evento, folder=False))

    try:
        from multiprocessing.dummy import Pool as ThreadPool
        thread = ThreadPool()
        results = thread.map(scrapertools.downloadpageWithoutCookies, urls)
        thread.close()
        thread.join()
    except:
        results = []
        for url_ajax in urls:
            data_result = scrapertools.downloadpageWithoutCookies(url_ajax)
            results.append(data_result)
    
    prox_eventos = []
    for i, data in enumerate(results):
        busqueda = re.search(r'(?i)tugoleada', data, flags=re.DOTALL)
        if busqueda:
            itemlist.append(lista[i])
            canal = scrapertools.find_single_match(data, '(?i)>(?:\w+|\s*|)Tugoleada.*?(\d+).*?</a>')
            fulltitle = lista[i].fulltitle
            for i in range(0, len(canales)):
                scrapedurl = host + "canal" + canal + ucanales[i] + ".php"
                scrapedtitle = "      [COLOR green]CANAL "+canal+" [/COLOR][COLOR indianred][" \
                               +canales[i]+"][/COLOR]"
                itemlist.append(item.clone(channel=__channel__, title=scrapedtitle, action="play", url = scrapedurl, server="p2p", fulltitle=fulltitle, folder=False))
        else:
            prox_eventos.append(lista[i])

    itemlist.append(item.clone(action="do_nothing", title="", folder=False))            
    itemlist.append(item.clone(action="do_nothing", title="[COLOR magenta][B]Posibles próximos eventos (No confirmados)[/B][/COLOR]", folder=False))
    for evento in prox_eventos:
        itemlist.append(evento)

    return itemlist
Esempio n. 20
0
def test_video_exists(page_url):
    logger.info("pelisalacarta.servers.flashx test_video_exists(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url.replace("playvid-", ""))

    if 'File Not Found' in data:
        return False, "[FlashX] El archivo no existe o ha sido borrado"
    elif 'Video is processing now' in data:
        return False, "[FlashX] El archivo se está procesando"

    return True, ""
Esempio n. 21
0
def test_video_exists(page_url):
    logger.info(
        "streamondemand.servers.flashx test_video_exists(page_url='%s')" %
        page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'File Not Found' in data:
        return False, "[FlashX] File inesistente o eliminato"

    return True, ""
Esempio n. 22
0
def test_video_exists(page_url):
    logger.info(
        "streamondemand.servers.openload test_video_exists(page_url='%s')" %
        page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We are sorry!' in data:
        return False, "[Openload] El archivo no existe o ha sido borrado"

    return True, ""
Esempio n. 23
0
def test_video_exists(page_url):
    logger.info(
        "pelisalacarta.servers.flashx test_video_exists(page_url='%s')" %
        page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'File Not Found' in data:
        return False, "[FlashX] Il file non esiste o è stato eliminato"

    return True, ""
Esempio n. 24
0
def test_video_exists(page_url):
    logger.info("streamondemand.servers.flashx test_video_exists(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url.replace("playvid-", ""))

    if 'File Not Found' in data:
        return False, "[FlashX] Nessun file"
    elif 'Video is processing now' in data:
        return False, "[FlashX] File processato"

    return True, ""
Esempio n. 25
0
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url.replace("playvid-", ""))

    if 'File Not Found' in data:
        return False, "[FlashX] El archivo no existe o ha sido borrado"
    elif 'Video is processing now' in data:
        return False, "[FlashX] El archivo se está procesando"

    return True, ""
Esempio n. 26
0
def test_video_exists(page_url):
    logger.info(
        "streamondemand.servers.openload test_video_exists(page_url='%s')" %
        page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'We are sorry!' in data:
        return False, "[Openload] Il file non esiste o è stato cancellato"

    return True, ""
Esempio n. 27
0
def test_video_exists(page_url):
    logger.info(
        "pelisalacarta.servers.flashx test_video_exists(page_url='%s')" %
        page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'File Not Found' in data:
        return False, "[FlashX] El archivo no existe o ha sido borrado"

    return True, ""
Esempio n. 28
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)

    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']+"|"

    from lib.aadecode import decode as aadecode
    if "videocontainer" not in data:
        url = page_url.replace("/embed/","/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.find_single_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = aadecode(text_encode)
        
        videourl = scrapertools.find_single_match(text_decode, '(http.*?)\}').replace("https://","http://")
        extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
        extension = "." + extension.rsplit(".", 1)[1]
        video_urls.append([extension + " [Openload]", videourl+header_down+extension])
    else:
        text_encode = scrapertools.find_multiple_matches(data,'<script[^>]+>(゚ω゚.*?)</script>')
        decodeindex = aadecode(text_encode[0])
        subtract = scrapertools.find_single_match(decodeindex, 'welikekodi.*?(\([^;]+\))')
        index = int(eval(subtract))
        
        # Buscamos la variable que nos indica el script correcto
        text_decode = aadecode(text_encode[index])

        videourl = scrapertools.find_single_match(text_decode, "(http.*?true)").replace("https://","http://")
        extension = "." + scrapertools.find_single_match(text_decode, "video/(\w+)")
        if config.get_platform() != "plex":
            video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
        else:
            video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 29
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[sockshare.py] url=" + page_url)
    data = scrapertools.cache_page(page_url)

    patron = 'value="([0-9a-f]+?)" name="hash"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) == 0: return []

    post = "hash=" + matches[0] + "&confirm=Continue as Free User"
    data = scrapertools.cache_page(
        page_url,
        post=post,
        headers=[[
            'User-Agent',
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'
        ], ['Referer', page_url]])
    logger.info("data=" + data)
    # Extrae el trozo cifrado
    patron = "playlist: '(.+?)'"
    matches = re.compile(patron, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches) > 0:
        xmlurl = urlparse.urljoin(page_url, matches[0])
        logger.info("[sockshare.py] Playlis=" + xmlurl)
    else:
        logger.info("[sockshare.py] No encuentra Playlist=")

        return []

    logger.info("xmlurl=" + xmlurl)
    data = scrapertools.downloadpageWithoutCookies(xmlurl)
    # Extrae la URL
    patron = '</link><media\:content url="(.+?)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    video_urls = []

    if len(matches) > 0:
        video_urls.append([
            "." + matches[0].rsplit('.', 1)[1][0:3] + " [sockshare]",
            matches[0]
        ])

    for video_url in video_urls:
        logger.info("[sockshare.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Esempio n. 30
0
def videos(item):
    logger.info("[beeg.py] videos")
    data = scrapertools.downloadpageWithoutCookies(item.url)
    itemlist = []

    '''
    var tumbid  =[7208081,1022338,1790112,2909082,2410088,8691046,8462396,2014246,8808325,4654327,8062507,7000421,8548437,1767501,6295658,3202102,2773864,9612377,6948607,9943885,5264590,6982322,6165308,9324770,3619002,8925867,5368729,1082464,5214883,8294614,4242507,3240149,4273919,4475499,4804736,8587147,8338151,1038421,1004169,9272512,5305005,5083086,3580156,1874869,9885579,1058926,3174150,1066977,7082903,6530464,4624902,8591759,6102947,4657695,2016527,6577806,2239334,9870613,2440544,8152565,8219890,3030145,8639777,9848873,7349742,2950534,7119152,5997556,2374574,9815315,3214267,5432670,4658434,4580658,8404386,7524628,4124549,4927484,4181863,9276920,2495618,9318916,6860913,1513045,4236984,6061992,4609004,5271124,5401171,8848711,6836799,1980560,4589392,8210830,8490012,5932132,4250379,2306395,1792556,2148371,1700509,5098703,9776305,7905694,4203280,4423411,6712077,9004474,1402706,4263429,5198666,9395866,5457900,4030776,8735416,9222101,5131412,9355090,5153675,3461256,3701598,9727360,3618152,6070007,7335171,5410904,1947651,5306023,6515828,3715868,7608229,3489585,7264184,7743498,3397575,3511213,6619182,7876772,8770245,8093453,7989500,1961547,5522799,6588700,4531347,1437017,2219295,4546557,6982198,2939794,2298045,8080263,7128768,2765167,2319077,9522842,4961084,5604127,9955979,5111810,4778525,6692226,5511006,9344899,4803658,6146385,1775954,4516958,2634309,1630574,4615420,6358106,6047503,6682604,4328147,8154411,7774060,2064366,3405726,5325432,6587525,9334690,8067415,8027270,1138550,4367956,8382277,1024046,4306081,9228857,7817771,6926228,6303685,7710118,4647190,4017796,9559557,5766637,3330727,1944600,5188555,5726903,6942768,4986192,8708803,6080195,9399238,7572599,3186224,4224246,2932085,5380191,3088447,5901127,3887214,2349737,5714776,2563377,4038474,8499863,4227950,6499672,6991633,3024612,6814845,6454321,2843677,8396005,7907216,1301562,8709563,5458247,9226733,8557509,7207699,1922207,1490492,5647366,1591222,8846795,4011662,1303847,3537608,2412365,4457772,9007518,4005900,4225550,8399634,4685661,5839323,7256007,1743108,6664595,9449274,7988727,3676039,4539781,5606635,7285872,5784054,3700937,4002132,1327636,1308207,7921776,4890112,1034360,4438762,7616608,1546748,1764556,7211306,9832815,8328351,7392273,9392812,9536883,3429689,1129731,4112108,6680341,1587601,3872723,7753727,4238235,8065294,6601466,7435260,9834733,6962573,4507291,7187479,8365423,9132777,2375411,2758884,5054277,6612817,2448785,5836846,6353814,6049471,7341687,1989319,4013602,4660258,1981692,5649634,7315856,9405860,6398978,4517613,1315807,8808025,8442117,2160975,5989886,3924302,7065269,8475308,8586280,3215143,4277208,7310326,7217778,7465561,7512526,3067747,8028981,8436023,6517768,5466318,9613743,6767061,3712249,4986686,3187636,3248477,8212121,2837620,8563996,3689648,5153513,5646012,3979442,3023435,3606043,1521306,2602755,7371342,5113191,4669626,1560957,9490908,6871729,2327539,5035151,7543878,3937587];
    var tumbalt =['She gives a b*****b and pussy before she goes to work','Toga Orgy! T**s, Asses and B*****b','Cute c********r','Cute fresh girl f*****g and taking facial cumshot','Russian girl playing with her pussy','Euro amateur wife f*****g on camera','I work with my pussy','Take your clothes off for money.., now','Because I\'ve paid her for sex','Rear stuffing','A company for casual sex','Getting wet by the pool','Shyla Jennings & Avril Hall','Pervert Eva anal toying on webcamera','Group sex with her step-mom','Blonde, brunette and a guy','Blonde natural titted Jessica','Lick mommy\'s pussy first','Pretty with perfect ass f*****g and taking facial','Hardcore sex n the club, in front of public','Black girl with booty that she is not afraid to suck for the cam','Tanned blonde teen fycking by the pool side','Prof. Tate letting her student f**k her pussy','Crew Appreciation','Condition of the apartment','F*****g this round ass after two nights in jail','Anjelica & Momoko','Because Tiffany is f*****g awesome','I\'m rested and ready to f**k Eva Angelina','Money Talks crew is taking care of a fur coat shop','Long legged blonde Euro girl Ivana','Dos Buenos','Cute massagist f*****g and taking facial cumshot','A petulant young woman','Young skinny Zoey wants it big','I have absolutely no problem having sex for money','Cutie with natural t**s getting f****d','Masturbating Jasmin','Don\'t worry! It\'s just a f**k','Amateur Yasmine spreading and masturbating on camera','Super cute with amazing body getting f****d','Young busty gets her perfect ass f****d by 2 big black cocks','Russian amateur girl playing with pussy and ass','Homemade video. Natural titted wife gets f****d from behind','Hottie getting f****d in nature','Shake that Ass babe! Shake that Ass!','Bang Me','Sweet ass sugar','Biking booty','Moans and facial "expressions"','Sunset Love','An extra credit assignment','No choice but to eat out each others pussies','Party Pussy','Facial Interview','Lesbian teens playing with their asses','Not a problem... you can f**k my girlfriend...','Women are waaaaay worse!','Lesbian neighbors','Big titted Vanilla DeVille getting facial cumshot','Fulfilling MILF\'s fantasies','Picked up, f****d and creamed','Teens having group sex party','Heart Line reveals she\'s a true s**t','Tracey Sweet','Kitchen Doll','Classy f**k party at the dorm','Angel in white sexy lingerie','I jumped on the opportunity to f**k Brandi Love','I\'m finally ready to do it!','Brittany\'s sex tape. Big round beautiful silicone t**s','Sharing the house','Testing Ashlynn','Lorena, Penelope, C**k','Take the money and put this c**k in your teen mouth','-','Cut and suck some wood','Romantic Oral','Podcast. Girl getting f****d on webcamera','Alone and horny','Tattooed girlfriend gets f****d on a kitchen table','Late to class?','Punished by a c**k','tied and ass f****d','A French girl surprise','Innocent Asian gets her ass f****d by two. Creampie','Young couple in bed','She invites him for... a b*****b','Pretty busty Euro teen playing with her ass on camera','Vacation Sex','Toying teens','Top dance instructor','Birthday Video','Elainas step mom, Diana, joined the action','Havoc has good t**s and loves good t**s','Loving her lesbian friend while her bf f***s her from behind','F*****g mom in front of her step-daughter','Charlotte & Paloma giving head','The Sweethearts prestigious Title','Kris does exactly as he\'s told','Brought to the apartmen','Alicia is a bubbly redhead with loads of personality','Nadin & Stephanie share a c**k','Young blonde petite Ella','Young amateur couple making a creampie','Taking my aggression out on some pussy... or face','Drink my sweet pussy milk','No problem f*****g my sugar daddy','18 yo Shay giving head on camera','Brooklyn Dayne','Young couple','Hottie getting f****d in public','18 years old massagist','Sierra, and Kenzie get f****d','Ramon is Cast Into Hellfire','Lick our college pussies until we cum! Orgy.','Looking for a cowgirl to ride her pony','Dick in Mrs. Lynn\'s mouth, tongue in her pussy','Caprice','Gorgeous French lesbians','Bysty amateur girl masturbating on camera','Lady\'s night hardcore','V****a Golf','Hardcored on a sofa','Sucking and f*****g his his criminal c**k','Exploiting her roommate\'s slutty behavior','Crew Appreciation','Czech cutie with a nice couple of t**s getting f****d','Orgy in Roman style','Send your boyfriend home','Beautiful Russian Valeria masturbating on camera','Sexual Tendencies','Young couple in homemade hardcore video','Lezley Zen gets f****d hard from behind','A tall slender blonde for the fiesta!','Teen with Juggs from Heaven','Between us','I have two jobs','Young Mandy Sky f*****g','18 year old butthole ride','Some strategizing of her own','Girly Passion','She was ready to go','Brooklyn gets her pussy munched and f****d','To help her out','MILF from Barcelona!','Zoey goes Big','Its official!','German granny in action','Shyla and Avril','College sex life','European country girl gets bend over the table and f****d','Gangbanged by Five','In my hotel room','Letting her student eat her pussy','Long legged Ivana Interested in renting with her pussy','Skinny Stephanie in doggy style','Twist Game','Professional Lesbians','Amateur F**k','F**k with great pleasure','Summer gets a pussy pounding','Young teaser','Prance around show off their asses and pussies','Read my C**t','Young with big beautiful natural t**s','Busty blonde patient gets f****d','A banging ass','Lady\'s night blow out','Delicious Lesbians','Because I\'ve paid for it','Sunset Love','Young Rita with juggs from Heaven','Amateur blonde masturbating on camera','Pole Smoker','Polish GF with big natural t**s f*****g on camera','Nurse having sex with patient','She\'ll be tested on this week','Alicia needs money immediately','The Girls School the Newbs','To give you an idea of how fun she is...','I\'m just one of her clients','Cute Katie massaging my c**k with her pussy','A Milf that keeps on giving','I just love to f**k hot women','She\'s dating my father','I am never letting my girl go to a strip!','Kymber Lee needs her pussy wet!','Pornstar f*****g at a sex shop','My pussy got pranked','Euro teen with big beautiful natural t**s f*****g','Toying and f*****g beautiful round ass','Shy Asian sweetie giving head and taking facial cumshot','Why don\'t you do that to my c**k?','Eat this pussy Milk','Amazing Beauties. No sex','Mom gets f****d in front of her step-daughter','Hardcore Pranking','Cute Czech girl gets taken home and f****d','A shady office at night time','Party Asses','I paid this chick good money','Body of Brooklyn','Girls riding the cruisers','Blondie Boom gets it in doggy style','Sex Instruction','College Rituals','Tied up and helpless. No sex','Too big for my teen ass','Classroom Hardcore','Amateur couple licking, sucking, f*****g and cumming!','Emergency help with anal sex','Redhead Euro MILF with big t**s and good ass in action','A classy party at the dorm','Lick my teen pussy','Hot Latin chick gets banged by dancing bear','OK, I have her now!','Florida Students','Pussy Business','Czech girl with big natural t**s gets f****d on a sofa','Four lesbians in Love','Moms set pounded too','My husband lost his job.., I\'m going to work','Long legged professionals with good asses share a c**k','Sunset Lesbian Nymphos','Shy Japanese teen getting f****d on camera','Strip girl getting her ass f****d by two','Educated women','Money Talks XXX experiments','Jada goes to his office','Busty teacher letting him f**k her','To f**k his brains out','Party Pussy','The economics of modeling','Girl to guy ratio seems to be out of whack','Of course I wanted to f**k','Young blonde beautiful massagist','F*****g his neighbor','Zoey goes Big','Schoolgirl taking c**k','A classy party at the dorm','On the kitchen table','Sex in sauna','I want to f**k her hard','Young amateur couple f*****g in bathroom and bedroom','Rear stuffing','Respect for educated women','Tattooed lesbians on a bed','Amateur 19 year old girlfriend giving head','Teen couple','On our way to the office...','Pumping energy into a teacher','Cute teen gets f****d in her bed','Orgy in a club','A new college course','I need to take my aggression out on some pussy','In doggy style','Dance, Dance, Fornication','Dating service for female millionaires','A little cheerleader gets f****d','In his parents\' bedroom','Strip Poker','Crazy group of ladies','And I get to f**k her. Why? Bc I\'ve paid for it!','Her boyfriend was not satisfying her','Super beautiful amateur teen f*****g on camera','The Art','Hardcore in a dorm','We\'re actually lucky to have Tiffany','The Economics of Modeling','The sisters order','With help of Mrs. Avluv','Angelina gives great head','Alone, and in my hotel room','To see my new college counselor','Super skinny Stephanie giving head','He pays for everything that my husband can\'t','Milf with big natural boobs getting f****d','We got cash for your t**s','GF getting f****d in many positions','Asa Akira in action','I f****d my step-mom in doggy style','Hotel Cockafornia','They have their own party','Big black c**k inside inside his step-mom\'s wet pussy','Amateur girls get f****d on camera, in doggy style','Blonde teen with perfect ass','A tall slender blonde with blue eyes','Magic Box','My teacher letting me f**k her','Victoria rides my dick','Little cheerleader','A big big favor for a nice nice neighbor','Blow Job Frenzy','Crazy group of ladies. No sex','We fertilize her v****a with some magic seeds','Blonde teens on a c**k','Sophie & Sasha in college threesome','Hottie With a Body','A company for casual sex','I\'m into her','Young beautiful sexy massagist','Black babe having sex in different places','Lesbian Maids','Jessica Jaymes','Two hot Milfs share a c**k','Signing up for a new class at college','Teen goes Big','Kenzie & Sierra getting assfucked','Ride with Tracey Sweet','Show Me Your Pussy','Long legged babe with amazing body in hardcore','Amateur blonde gets her Big Natural T**s f****d','A new college course','T**s for the password','Long haired brunette in lingerie f*****g','Angela & Luiza','That\'s why I\'ve paid for Lezley Zen','Until she gets foam on her face','Amateur couple f*****g on camera','T**s & Asses. No sex','Another wonderful day','Beautiful Lizz Tayler f*****g and swallowing','She doesn\'t want him to be lonely','Chloe is a dirty little schoolgirl','All Blonde','Cleaning so to speak','F**k my t**s and my ass','Unable to resist my step-mom\'s sexy French charms','Cum on my step-mom\'s face','An artist looking for a model to paint','Girls f*****g, boys watching','Blonde schoolgirl gets her ass f****d and takes facial cumshot','18 yo teen having sex in her room','Feel the hotness','Because her boyfriend was not satisfying her','She needs to f**k for this audition to continue','Big ass amateur redhead gets f****d by her step son','Dorm Life','I\'ve paid for Amy Brooke tonight','Tonight I cum on Priya Anjali Rai\'s face','So he can do well on his test','Mommy takes care of it','Tied and f****d hard by two','You win my c**k','Beauty with perfectly round ass f*****g and taking facial for money','Hot ass Deborah paying the rent with her pussy','They have their own party','The woman in the middle','18 yo Shay decides to shoot her first p**n movie','Blow Job Frenzy','Young busty Haley meets the Beast of C**k','Sex Instruction','Signing up for a new class at college','Read my C**t','Creaming her pretty pussy. Meggan is stoked to leave laid and paid!','Small Pussy, Big Package','Would it be rude to e*******e on my girlfriend\'s face?','Port of C**k, New Whoreleans','He pounds his girlfriend\'s tight pussy on camera'];
    var writestr = '<div id="thumbs">';
    var URLthumb = 'http://beeg.com/';
    var IMGthumb = 'http://eu1.anythumb.com/236x177/';
    '''
    
    base_thumbnail_url = scrapertools.get_match(data,"var IMGthumb \= '([^']+)'")
    base_url = scrapertools.get_match(data,"var URLthumb \= '([^']+)'")
    base_url = urlparse.urljoin("http://beeg.com/",base_url)

    # Lista de IDs
    id_list = []
    id_string = scrapertools.get_match(data,"var tumbid  =\[([^\]]+)\]")
    
    patron="(\d+)"
    matches = re.compile(patron,re.DOTALL).findall(id_string)
    
    for match in matches:
        id_list.append(match)

    # Lista de titulos
    title_list = []
    title_string = scrapertools.get_match(data,"var tumbalt \=\[([^\]]+)\]")
    
    title_string = title_string.replace("\\'",'"')
    patron="'([^']+)'"
    matches = re.compile(patron,re.DOTALL).findall(title_string)
    
    for match in matches:
        title_list.append(match)

    for i in range(0,len(id_list)):
        try:
            scrapedtitle = unicode( title_list[i], "utf-8" ).encode("iso-8859-1")
        except:
            scrapedtitle = title_list[i]
        scrapedtitle = scrapedtitle.replace('"',"'")
        scrapedurl =  base_url+id_list[i]
        scrapedthumbnail = base_thumbnail_url+id_list[i]+".jpg"
        scrapedplot = ""
        # Depuracion
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")            
        itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, viewmode="movie", folder=False))
    
    return itemlist
def test_video_exists(page_url):
    logger.info(
        "streamondemand.servers.flashx test_video_exists(page_url='%s')" %
        page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    if 'File Not Found' in data:
        return False, "[FlashX] File inesistestente o cancellato"
    elif 'Video is processing now' in data:
        return False, "[FlashX] Processando il file"

    return True, ""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[sockshare.py] url="+page_url)
    data = scrapertools.cache_page(page_url)

    patron = 'value="([0-9a-f]+?)" name="hash"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)==0:return []


    post = "hash="+matches[0]+"&confirm=Continue as Free User"
    data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
    logger.info("data="+data)
    # Extrae el trozo cifrado
    patron = "playlist: '(.+?)'"
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    data = ""
    if len(matches)>0:
        xmlurl = urlparse.urljoin(page_url,matches[0])
        logger.info("[sockshare.py] Playlis="+xmlurl)
    else:
        logger.info("[sockshare.py] No encuentra Playlist=")

        return []
    

    logger.info("xmlurl="+xmlurl)
    data = scrapertools.downloadpageWithoutCookies(xmlurl)
    # Extrae la URL
    patron = '</link><media\:content url="(.+?)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    
    video_urls = []
    
    if len(matches)>0:
        video_urls.append( ["."+matches[0].rsplit('.',1)[1][0:3]+" [sockshare]",matches[0]])

    for video_url in video_urls:
        logger.info("[sockshare.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 33
0
def videos(item):
    logger.info("[xhamster.py] videos")
    data = scrapertools.downloadpageWithoutCookies(item.url)
    #data = scrapertools.get_match(data,'<td valign="top" id="video_title">(.*?)<div id="footer">')
    itemlist = []

    '''
    <a href="/movies/1280051/pussy_pump_002.html?s=10" class='hRotator' >
    <img src='http://et1.xhamster.com/t/051/10_1280051.jpg' width='160' height='120' alt="pussy pump 002"/>
    <img class='hSprite' src="http://eu-st.xhamster.com/images/spacer.gif" width='160' height='120' sprite='http://et1.xhamster.com/t/051/s_1280051.jpg' id="1280051" onmouseover="hRotator.start2(this);">
    '''
    '''
    <a href='http://xhamster.com/movies/1627978/its_so_big_its_stretching_my_insides.html'  class='hRotator' >
    <img src='http://et18.xhamster.com/t/978/4_1627978.jpg' width='160' height='120' class='thumb' alt="Its So Big its Stretching My Insides"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et18.xhamster.com/t/978/s_1627978.jpg' id='1627978' onmouseover='hRotator.start2(this);'><b>12:13</b><u title="Its So Big its Stretching My Insides">Its So Big its Stretching My Insides</u></a><div class='hRate'><div class='fr'>94%</div>Views: 168,430</div></div><div class='video'><a href='http://xhamster.com/movies/1445375/busty_preggo_mom_dp_fuck.html'  class='hRotator' ><img src='http://et15.xhamster.com/t/375/3_1445375.jpg' width='160' height='120' class='thumb' alt="Busty preggo mom dp f**k"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et15.xhamster.com/t/375/s_1445375.jpg' id='1445375' onmouseover='hRotator.start2(this);'><b>13:38</b><u title="Busty preggo mom dp f**k">Busty preggo mom dp f**k</u></a><div class='hRate'><div class='fr'>93%</div>Views: 246,305</div></div><div class='video'><a href='http://xhamster.com/movies/745518/lauren_calendar_audition_netvideogirls.html'  class='hRotator' ><img src='http://et18.xhamster.com/t/518/2_745518.jpg' width='160' height='120' class='thumb' alt="Lauren Calendar Audition - netvideogirls"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et18.xhamster.com/t/518/s_745518.jpg' id='745518' onmouseover='hRotator.start2(this);'><b>46:25</b><u title="Lauren Calendar Audition - netvideogirls">Lauren Calendar Audition - netvideogirls</u></a><div class='hRate'><div class='fr'>95%</div>Views: 691,072</div></div><div class='clear' /></div><div class='video'><a href='http://xhamster.com/movies/1609732/pantyhose_hooker_nylon_prostitute_fetish_sex.html'  class='hRotator' ><img src='http://et12.xhamster.com/t/732/5_1609732.jpg' width='160' height='120' class='thumb' alt="pantyhose hooker nylon prostitute fetish sex"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et12.xhamster.com/t/732/s_1609732.jpg' id='1609732' onmouseover='hRotator.start2(this);'><b>13:02</b><u title="pantyhose hooker nylon prostitute fetish sex">pantyhose hooker nylon prostitute fetish sex</u><div class="hSpriteHD"></div></a><div class='hRate'><div class='fr'>95%</div>Views: 232,148</div></div><div class='video'><a href='http://xhamster.com/movies/1670755/tattooed_and_pierced_lesbians_licking_pussies.html'  class='hRotator' ><img src='http://et15.xhamster.com/t/755/7_1670755.jpg' width='160' height='120' class='thumb' alt="tattooed and pierced lesbians licking pussies"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et15.xhamster.com/t/755/s_1670755.jpg' id='1670755' onmouseover='hRotator.start2(this);'><b>13:32</b><u title="tattooed and pierced lesbians licking pussies">tattooed and pierced lesbians licking pussies</u></a><div class='hRate'><div class='fr'>92%</div>Views: 68,202</div></div><div class='video'><a href='http://xhamster.com/movies/1460297/brunette_en_jupe_jaune_baise_dehors.html'  class='hRotator' ><img src='http://et17.xhamster.com/t/297/6_1460297.jpg' width='160' height='120' class='thumb' alt="Brunette en jupe jaune baise dehors"/><img class='hSprite' src='http://eu-st.xhamster.com/images/spacer.gif' width='160' height='120' sprite='http://et17.xhamster.com/t/297/s_1460297.jpg' id='1460297' onmouseover='hRotator.start2(this);'><b>13:31</b><u title="Brunette en jupe jaune baise dehors">Brunette en jupe jaune baise dehors</u></a><div class='hRate'><div class='fr'>91%</div>Views: 64,222</div></div><div class='clear' /></div><div class="loader"></div>
    '''
#    patron = "<a href='([^']+)'[^<]+<img src='([^']+)' width='[^']+' height='[^']+' class='[^']+' alt=\"([^\"]+)\""
    patron = "<a href='([^']+)'[^<]+<img src='([^']+)' class='[^']+' alt=\"([^\"]+)\""
    matches = re.compile(patron,re.DOTALL).findall(data)
    for url,thumbnail,title in matches:
        try:
            scrapedtitle = unicode( title, "utf-8" ).encode("iso-8859-1")
        except:
            scrapedtitle = title
        scrapedurl = urlparse.urljoin( "http://www.xhamster.com" , url )
        scrapedthumbnail = thumbnail
        scrapedplot = ""
        # Depuracion
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")            
        itemlist.append( Item(channel=__channel__, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, folder=False))
        
    # EXTRAE EL PAGINADOR
    #<a href='search.php?q=sexo&qcat=video&page=3' class='last'>Next</a></td></tr></table></div></td>
    patronvideos  = '<a href=\'([^\']+)\' class=\'last\'>Next</a></td></tr></table></div></td>'
    siguiente = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(siguiente)
    if len(siguiente)>0:
        itemlist.append( Item(channel=__channel__, action='videos' , title=">> Pagina siguiente" , url=urlparse.urljoin( "http://www.xhamster.com" , siguiente[0] ), thumbnail="", plot="", show="!Página siguiente") )
    else:
        paginador = None

    return itemlist
Esempio n. 34
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("streamondemand.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)

    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="it"')
    # Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent'] + "|"

    extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]

    videourl = decode_openload(data)

    video_urls.append([extension + " [Openload] ", videourl + header_down + extension, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.openload %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Esempio n. 35
0
def elgoles(item):
    logger.info("pelisalacarta.channels.miscelanea_p2p elgoles")
    itemlist = []

    try:
        from servers import servertools
    except:
        from core import servertools
        
    data = scrapertools.downloadpage(item.url)
    bloque = scrapertools.find_single_match(data, '<h2>Páginas</h2>.*?<ul>(.*?)</ul>')
    matches = scrapertools.find_multiple_matches(bloque, "<a href='([^']+)'>(.*?)</a>")
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = "[COLOR crimson]"+scrapedtitle.capitalize()+"[/COLOR]"
        data = scrapertools.downloadpageWithoutCookies(scrapedurl)
        urls = servertools.findvideosbyserver(data, "p2p")
        if urls:
            scrapedtitle += "   [COLOR darkcyan]"+urls[0][0]+"[/COLOR]"
            itemlist.append(item.clone(url=scrapedurl, action="play", title=scrapedtitle, folder=False))
    
    return itemlist
Esempio n. 36
0
def listcategorias(item):
    logger.info("[beeg.py] listcategorias")
    itemlist = []

    data = scrapertools.downloadpageWithoutCookies(item.url)

    # Tags Populares
    patron_popular = '<div class="block block-tags-popular">(.*?)</ul>'
    data_popular = scrapertools.find_single_match(data, patron_popular)
    patron_tag = '<li><a target="_self" href="([^"]+)".*?>(.*?)</a></li>'
    matches = re.compile(patron_tag, re.DOTALL).findall(data_popular)

    for scrapedurl, scrapedtag in matches:
        itemlist.append(
            Item(channel=__channel__,
                 action="videos",
                 title=unicode(scrapedtag, "utf-8"),
                 url=urlparse.urljoin(BASE_URL, scrapedurl)))

    if item.extra == "popular":
        return itemlist

    patron_all = '<div class="block block-tags">(.*?)</ul>\s+</div>'
    data_all = scrapertools.find_single_match(data, patron_all)
    matches = re.compile(patron_tag, re.DOTALL).findall(data_all)

    for scrapedurl, scrapedtag in matches:
        itemlist.append(
            Item(channel=__channel__,
                 action="videos",
                 title=unicode(scrapedtag, "utf-8"),
                 url=urlparse.urljoin(BASE_URL, scrapedurl)))

    #sort tags
    itemlist.sort(key=lambda item: item.title.lower().strip())
    return itemlist
Esempio n. 37
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)
            text_encode = scrapertools.find_multiple_matches(data,"(゚ω゚.*?\(\'\_\'\));")
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                videourl = "http://" + scrapertools.find_single_match(text_decode, '(openload.co/.*?)\}')
            except:
                videourl = "http://"

            if videourl == "http://":
                hiddenurl, valuehidden = scrapertools.find_single_match(data, '<span id="([^"]+)">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(valuehidden, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        if not jjencode:
                            pack = scrapertools.find_multiple_matches(data, '(eval \(function\(p,a,c,k,e,d\).*?\{\}\)\))')[-1]
                            jjencode = openload_clean(pack)

                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        varj = scrapertools.find_single_match(jjdec, 'var j\s*=\s*(\w+)\.charCodeAt')
                        varhidden = scrapertools.find_single_match(jjdec, 'var\s*'+varj+'\s*=\s*\$\("[#]*([^"]+)"\).text')
                        if varhidden != hiddenurl:
                            valuehidden = scrapertools.find_single_match(data, 'id="'+varhidden+'">(.*?)<')
                        videourl = decode_hidden(valuehidden, number)
                        
                else:
                    videourl = decodeopenload(data)
            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
        else:
            text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                subtract = scrapertools.find_single_match(text_decode, 'welikekodi.*?(\([^;]+\))')
            except:
                subtract = ""
            
            if subtract:
                index = int(eval(subtract))
                # Buscamos la variable que nos indica el script correcto
                text_decode2 = aadecode(text_encode[index])
                videourl = "https://" + scrapertools.find_single_match(text_decode2, "(openload.co/.*?)\}")
            else:
                hiddenurl, valuehidden = scrapertools.find_single_match(data, '<span id="([^"]+)">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(valuehidden, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        if not jjencode:
                            pack = scrapertools.find_multiple_matches(data, '(eval \(function\(p,a,c,k,e,d\).*?\{\}\)\))')[-1]
                            jjencode = openload_clean(pack)

                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        varj = scrapertools.find_single_match(jjdec, 'var j\s*=\s*(\w+)\.charCodeAt')
                        varhidden = scrapertools.find_single_match(jjdec, 'var\s*'+varj+'\s*=\s*\$\("[#]*([^"]+)"\).text')
                        if varhidden != hiddenurl:
                            valuehidden = scrapertools.find_single_match(data, 'id="'+varhidden+'">(.*?)<')
                        videourl = decode_hidden(valuehidden, number)
                else:
                    videourl = decodeopenload(data)

            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl = get_link_api(page_url)

    extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]
    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 38
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.flashx url=" + page_url)

    data = scrapertools.downloadpageWithoutCookies(page_url)

    file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
    aff = scrapertools.find_single_match(data, "'aff', '([^']+)'")
    headers_c = [[
        'User-Agent',
        'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'
    ], ['Referer', page_url], ['Cookie', '; lang=1']]
    coding_url = scrapertools.find_single_match(
        data, 'src="(http://www.flashx.tv/\w+.js\?[^"]+)"')
    if coding_url.endswith("="):
        coding_url += file_id
    coding = scrapertools.downloadpage(coding_url, headers=headers_c)

    data = scrapertools.downloadpage(page_url, headers=headers)
    flashx_id = scrapertools.find_single_match(data,
                                               'name="id" value="([^"]+)"')
    fname = scrapertools.find_single_match(data,
                                           'name="fname" value="([^"]+)"')
    hash_f = scrapertools.find_single_match(data,
                                            'name="hash" value="([^"]+)"')
    post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (
        flashx_id, urllib.quote(fname), hash_f)

    time.sleep(6)
    headers.append(['Referer', page_url])
    headers.append(['Cookie', 'lang=1; file_id=%s; aff=%s' % (file_id, aff)])
    data = scrapertools.downloadpage('http://www.flashx.tv/dl?%s' % flashx_id,
                                     post=post,
                                     headers=headers)

    match = scrapertools.find_single_match(
        data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
    if match:
        match = jsunpack.unpack(match)
    else:
        match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(
        match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Spanish":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(),
                                        'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info(
                    "pelisalacarta.servers.videomega Error al descargar el subtítulo: "
                    + traceback.format_exc())

    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append([
                "." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0,
                subtitle
            ])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.flashx %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Esempio n. 39
0
def videos(item):
    logger.info("[beeg.py] videos")
    data = scrapertools.downloadpageWithoutCookies(item.url)
    itemlist = []
    '''
    var tumbid  =[7208081,1022338,1790112,2909082,2410088,8691046,8462396,2014246,8808325,4654327,8062507,7000421,8548437,1767501,6295658,3202102,2773864,9612377,6948607,9943885,5264590,6982322,6165308,9324770,3619002,8925867,5368729,1082464,5214883,8294614,4242507,3240149,4273919,4475499,4804736,8587147,8338151,1038421,1004169,9272512,5305005,5083086,3580156,1874869,9885579,1058926,3174150,1066977,7082903,6530464,4624902,8591759,6102947,4657695,2016527,6577806,2239334,9870613,2440544,8152565,8219890,3030145,8639777,9848873,7349742,2950534,7119152,5997556,2374574,9815315,3214267,5432670,4658434,4580658,8404386,7524628,4124549,4927484,4181863,9276920,2495618,9318916,6860913,1513045,4236984,6061992,4609004,5271124,5401171,8848711,6836799,1980560,4589392,8210830,8490012,5932132,4250379,2306395,1792556,2148371,1700509,5098703,9776305,7905694,4203280,4423411,6712077,9004474,1402706,4263429,5198666,9395866,5457900,4030776,8735416,9222101,5131412,9355090,5153675,3461256,3701598,9727360,3618152,6070007,7335171,5410904,1947651,5306023,6515828,3715868,7608229,3489585,7264184,7743498,3397575,3511213,6619182,7876772,8770245,8093453,7989500,1961547,5522799,6588700,4531347,1437017,2219295,4546557,6982198,2939794,2298045,8080263,7128768,2765167,2319077,9522842,4961084,5604127,9955979,5111810,4778525,6692226,5511006,9344899,4803658,6146385,1775954,4516958,2634309,1630574,4615420,6358106,6047503,6682604,4328147,8154411,7774060,2064366,3405726,5325432,6587525,9334690,8067415,8027270,1138550,4367956,8382277,1024046,4306081,9228857,7817771,6926228,6303685,7710118,4647190,4017796,9559557,5766637,3330727,1944600,5188555,5726903,6942768,4986192,8708803,6080195,9399238,7572599,3186224,4224246,2932085,5380191,3088447,5901127,3887214,2349737,5714776,2563377,4038474,8499863,4227950,6499672,6991633,3024612,6814845,6454321,2843677,8396005,7907216,1301562,8709563,5458247,9226733,8557509,7207699,1922207,1490492,5647366,1591222,8846795,4011662,1303847,3537608,2412365,4457772,9007518,4005900,4225550,8399634,4685661,5839323,7256007,1743108,6664595,9449274,7988727,3676039,4539781,5606635,7285872,5784054,3700937,4002132,1327636,1308207,7921776,4890112,1034360,4438762,7616608,1546748,1764556,7211306,9832815,8328351,7392273,9392812,9536883,3429689,1129731,4112108,6680341,1587601,3872723,7753727,4238235,8065294,6601466,7435260,9834733,6962573,4507291,7187479,8365423,9132777,2375411,2758884,5054277,6612817,2448785,5836846,6353814,6049471,7341687,1989319,4013602,4660258,1981692,5649634,7315856,9405860,6398978,4517613,1315807,8808025,8442117,2160975,5989886,3924302,7065269,8475308,8586280,3215143,4277208,7310326,7217778,7465561,7512526,3067747,8028981,8436023,6517768,5466318,9613743,6767061,3712249,4986686,3187636,3248477,8212121,2837620,8563996,3689648,5153513,5646012,3979442,3023435,3606043,1521306,2602755,7371342,5113191,4669626,1560957,9490908,6871729,2327539,5035151,7543878,3937587];
    var tumbalt =['She gives a b*****b and pussy before she goes to work','Toga Orgy! T**s, Asses and B*****b','Cute c********r','Cute fresh girl f*****g and taking facial cumshot','Russian girl playing with her pussy','Euro amateur wife f*****g on camera','I work with my pussy','Take your clothes off for money.., now','Because I\'ve paid her for sex','Rear stuffing','A company for casual sex','Getting wet by the pool','Shyla Jennings & Avril Hall','Pervert Eva anal toying on webcamera','Group sex with her step-mom','Blonde, brunette and a guy','Blonde natural titted Jessica','Lick mommy\'s pussy first','Pretty with perfect ass f*****g and taking facial','Hardcore sex n the club, in front of public','Black girl with booty that she is not afraid to suck for the cam','Tanned blonde teen fycking by the pool side','Prof. Tate letting her student f**k her pussy','Crew Appreciation','Condition of the apartment','F*****g this round ass after two nights in jail','Anjelica & Momoko','Because Tiffany is f*****g awesome','I\'m rested and ready to f**k Eva Angelina','Money Talks crew is taking care of a fur coat shop','Long legged blonde Euro girl Ivana','Dos Buenos','Cute massagist f*****g and taking facial cumshot','A petulant young woman','Young skinny Zoey wants it big','I have absolutely no problem having sex for money','Cutie with natural t**s getting f****d','Masturbating Jasmin','Don\'t worry! It\'s just a f**k','Amateur Yasmine spreading and masturbating on camera','Super cute with amazing body getting f****d','Young busty gets her perfect ass f****d by 2 big black cocks','Russian amateur girl playing with pussy and ass','Homemade video. Natural titted wife gets f****d from behind','Hottie getting f****d in nature','Shake that Ass babe! Shake that Ass!','Bang Me','Sweet ass sugar','Biking booty','Moans and facial "expressions"','Sunset Love','An extra credit assignment','No choice but to eat out each others pussies','Party Pussy','Facial Interview','Lesbian teens playing with their asses','Not a problem... you can f**k my girlfriend...','Women are waaaaay worse!','Lesbian neighbors','Big titted Vanilla DeVille getting facial cumshot','Fulfilling MILF\'s fantasies','Picked up, f****d and creamed','Teens having group sex party','Heart Line reveals she\'s a true s**t','Tracey Sweet','Kitchen Doll','Classy f**k party at the dorm','Angel in white sexy lingerie','I jumped on the opportunity to f**k Brandi Love','I\'m finally ready to do it!','Brittany\'s sex tape. Big round beautiful silicone t**s','Sharing the house','Testing Ashlynn','Lorena, Penelope, C**k','Take the money and put this c**k in your teen mouth','-','Cut and suck some wood','Romantic Oral','Podcast. Girl getting f****d on webcamera','Alone and horny','Tattooed girlfriend gets f****d on a kitchen table','Late to class?','Punished by a c**k','tied and ass f****d','A French girl surprise','Innocent Asian gets her ass f****d by two. Creampie','Young couple in bed','She invites him for... a b*****b','Pretty busty Euro teen playing with her ass on camera','Vacation Sex','Toying teens','Top dance instructor','Birthday Video','Elainas step mom, Diana, joined the action','Havoc has good t**s and loves good t**s','Loving her lesbian friend while her bf f***s her from behind','F*****g mom in front of her step-daughter','Charlotte & Paloma giving head','The Sweethearts prestigious Title','Kris does exactly as he\'s told','Brought to the apartmen','Alicia is a bubbly redhead with loads of personality','Nadin & Stephanie share a c**k','Young blonde petite Ella','Young amateur couple making a creampie','Taking my aggression out on some pussy... or face','Drink my sweet pussy milk','No problem f*****g my sugar daddy','18 yo Shay giving head on camera','Brooklyn Dayne','Young couple','Hottie getting f****d in public','18 years old massagist','Sierra, and Kenzie get f****d','Ramon is Cast Into Hellfire','Lick our college pussies until we cum! Orgy.','Looking for a cowgirl to ride her pony','Dick in Mrs. Lynn\'s mouth, tongue in her pussy','Caprice','Gorgeous French lesbians','Bysty amateur girl masturbating on camera','Lady\'s night hardcore','V****a Golf','Hardcored on a sofa','Sucking and f*****g his his criminal c**k','Exploiting her roommate\'s slutty behavior','Crew Appreciation','Czech cutie with a nice couple of t**s getting f****d','Orgy in Roman style','Send your boyfriend home','Beautiful Russian Valeria masturbating on camera','Sexual Tendencies','Young couple in homemade hardcore video','Lezley Zen gets f****d hard from behind','A tall slender blonde for the fiesta!','Teen with Juggs from Heaven','Between us','I have two jobs','Young Mandy Sky f*****g','18 year old butthole ride','Some strategizing of her own','Girly Passion','She was ready to go','Brooklyn gets her pussy munched and f****d','To help her out','MILF from Barcelona!','Zoey goes Big','Its official!','German granny in action','Shyla and Avril','College sex life','European country girl gets bend over the table and f****d','Gangbanged by Five','In my hotel room','Letting her student eat her pussy','Long legged Ivana Interested in renting with her pussy','Skinny Stephanie in doggy style','Twist Game','Professional Lesbians','Amateur F**k','F**k with great pleasure','Summer gets a pussy pounding','Young teaser','Prance around show off their asses and pussies','Read my C**t','Young with big beautiful natural t**s','Busty blonde patient gets f****d','A banging ass','Lady\'s night blow out','Delicious Lesbians','Because I\'ve paid for it','Sunset Love','Young Rita with juggs from Heaven','Amateur blonde masturbating on camera','Pole Smoker','Polish GF with big natural t**s f*****g on camera','Nurse having sex with patient','She\'ll be tested on this week','Alicia needs money immediately','The Girls School the Newbs','To give you an idea of how fun she is...','I\'m just one of her clients','Cute Katie massaging my c**k with her pussy','A Milf that keeps on giving','I just love to f**k hot women','She\'s dating my father','I am never letting my girl go to a strip!','Kymber Lee needs her pussy wet!','Pornstar f*****g at a sex shop','My pussy got pranked','Euro teen with big beautiful natural t**s f*****g','Toying and f*****g beautiful round ass','Shy Asian sweetie giving head and taking facial cumshot','Why don\'t you do that to my c**k?','Eat this pussy Milk','Amazing Beauties. No sex','Mom gets f****d in front of her step-daughter','Hardcore Pranking','Cute Czech girl gets taken home and f****d','A shady office at night time','Party Asses','I paid this chick good money','Body of Brooklyn','Girls riding the cruisers','Blondie Boom gets it in doggy style','Sex Instruction','College Rituals','Tied up and helpless. No sex','Too big for my teen ass','Classroom Hardcore','Amateur couple licking, sucking, f*****g and cumming!','Emergency help with anal sex','Redhead Euro MILF with big t**s and good ass in action','A classy party at the dorm','Lick my teen pussy','Hot Latin chick gets banged by dancing bear','OK, I have her now!','Florida Students','Pussy Business','Czech girl with big natural t**s gets f****d on a sofa','Four lesbians in Love','Moms set pounded too','My husband lost his job.., I\'m going to work','Long legged professionals with good asses share a c**k','Sunset Lesbian Nymphos','Shy Japanese teen getting f****d on camera','Strip girl getting her ass f****d by two','Educated women','Money Talks XXX experiments','Jada goes to his office','Busty teacher letting him f**k her','To f**k his brains out','Party Pussy','The economics of modeling','Girl to guy ratio seems to be out of whack','Of course I wanted to f**k','Young blonde beautiful massagist','F*****g his neighbor','Zoey goes Big','Schoolgirl taking c**k','A classy party at the dorm','On the kitchen table','Sex in sauna','I want to f**k her hard','Young amateur couple f*****g in bathroom and bedroom','Rear stuffing','Respect for educated women','Tattooed lesbians on a bed','Amateur 19 year old girlfriend giving head','Teen couple','On our way to the office...','Pumping energy into a teacher','Cute teen gets f****d in her bed','Orgy in a club','A new college course','I need to take my aggression out on some pussy','In doggy style','Dance, Dance, Fornication','Dating service for female millionaires','A little cheerleader gets f****d','In his parents\' bedroom','Strip Poker','Crazy group of ladies','And I get to f**k her. Why? Bc I\'ve paid for it!','Her boyfriend was not satisfying her','Super beautiful amateur teen f*****g on camera','The Art','Hardcore in a dorm','We\'re actually lucky to have Tiffany','The Economics of Modeling','The sisters order','With help of Mrs. Avluv','Angelina gives great head','Alone, and in my hotel room','To see my new college counselor','Super skinny Stephanie giving head','He pays for everything that my husband can\'t','Milf with big natural boobs getting f****d','We got cash for your t**s','GF getting f****d in many positions','Asa Akira in action','I f****d my step-mom in doggy style','Hotel Cockafornia','They have their own party','Big black c**k inside inside his step-mom\'s wet pussy','Amateur girls get f****d on camera, in doggy style','Blonde teen with perfect ass','A tall slender blonde with blue eyes','Magic Box','My teacher letting me f**k her','Victoria rides my dick','Little cheerleader','A big big favor for a nice nice neighbor','Blow Job Frenzy','Crazy group of ladies. No sex','We fertilize her v****a with some magic seeds','Blonde teens on a c**k','Sophie & Sasha in college threesome','Hottie With a Body','A company for casual sex','I\'m into her','Young beautiful sexy massagist','Black babe having sex in different places','Lesbian Maids','Jessica Jaymes','Two hot Milfs share a c**k','Signing up for a new class at college','Teen goes Big','Kenzie & Sierra getting assfucked','Ride with Tracey Sweet','Show Me Your Pussy','Long legged babe with amazing body in hardcore','Amateur blonde gets her Big Natural T**s f****d','A new college course','T**s for the password','Long haired brunette in lingerie f*****g','Angela & Luiza','That\'s why I\'ve paid for Lezley Zen','Until she gets foam on her face','Amateur couple f*****g on camera','T**s & Asses. No sex','Another wonderful day','Beautiful Lizz Tayler f*****g and swallowing','She doesn\'t want him to be lonely','Chloe is a dirty little schoolgirl','All Blonde','Cleaning so to speak','F**k my t**s and my ass','Unable to resist my step-mom\'s sexy French charms','Cum on my step-mom\'s face','An artist looking for a model to paint','Girls f*****g, boys watching','Blonde schoolgirl gets her ass f****d and takes facial cumshot','18 yo teen having sex in her room','Feel the hotness','Because her boyfriend was not satisfying her','She needs to f**k for this audition to continue','Big ass amateur redhead gets f****d by her step son','Dorm Life','I\'ve paid for Amy Brooke tonight','Tonight I cum on Priya Anjali Rai\'s face','So he can do well on his test','Mommy takes care of it','Tied and f****d hard by two','You win my c**k','Beauty with perfectly round ass f*****g and taking facial for money','Hot ass Deborah paying the rent with her pussy','They have their own party','The woman in the middle','18 yo Shay decides to shoot her first p**n movie','Blow Job Frenzy','Young busty Haley meets the Beast of C**k','Sex Instruction','Signing up for a new class at college','Read my C**t','Creaming her pretty pussy. Meggan is stoked to leave laid and paid!','Small Pussy, Big Package','Would it be rude to e*******e on my girlfriend\'s face?','Port of C**k, New Whoreleans','He pounds his girlfriend\'s tight pussy on camera'];
    var writestr = '<div id="thumbs">';
    var URLthumb = 'http://beeg.com/';
    var IMGthumb = 'http://eu1.anythumb.com/236x177/';
    '''

    base_thumbnail_url = scrapertools.get_match(data,
                                                "var IMGthumb \= '([^']+)'")
    base_url = scrapertools.get_match(data, "var URLthumb \= '([^']+)'")
    base_url = urlparse.urljoin("http://beeg.com/", base_url)

    # Lista de IDs
    id_list = []
    id_string = scrapertools.get_match(data, "var tumbid  =\[([^\]]+)\]")

    patron = "(\d+)"
    matches = re.compile(patron, re.DOTALL).findall(id_string)

    for match in matches:
        id_list.append(match)

    # Lista de titulos
    title_list = []
    title_string = scrapertools.get_match(data, "var tumbalt \=\[([^\]]+)\]")

    title_string = title_string.replace("\\'", '"')
    patron = "'([^']+)'"
    matches = re.compile(patron, re.DOTALL).findall(title_string)

    for match in matches:
        title_list.append(match)

    for i in range(0, len(id_list)):
        try:
            scrapedtitle = unicode(title_list[i], "utf-8").encode("iso-8859-1")
        except:
            scrapedtitle = title_list[i]
        scrapedtitle = scrapedtitle.replace('"', "'")
        scrapedurl = base_url + id_list[i]
        scrapedthumbnail = base_thumbnail_url + id_list[i] + ".jpg"
        scrapedplot = ""
        # Depuracion
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=scrapedtitle,
                 viewmode="movie",
                 folder=False))

    return itemlist
Esempio n. 40
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.cache_page(page_url, headers=headers)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(
            data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.cache_page(url_reload, headers=headers)
            data = scrapertools.cache_page(page_url, headers=headers)
        except:
            pass

    matches = scrapertools.find_multiple_matches(
        data, "<script type='text/javascript'>(.*?)</script>")
    for n, m in enumerate(matches):
        if m.startswith("eval"):
            try:
                m = jsunpack.unpack(m)
                fake = (scrapertools.find_single_match(m, "(\w{40,})") == "")
                if fake:
                    m = ""
                else:
                    break
            except:
                m = ""
    match = m

    if not "sources:[{file:" in match:
        page_url = page_url.replace("playvid-", "")
        data = scrapertools.downloadpageWithoutCookies(page_url)

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        aff = scrapertools.find_single_match(data, "'aff', '([^']+)'")
        headers_c = [['User-Agent', 'Mozilla/5.0'], ['Referer', page_url],
                     ['Cookie', '; lang=1']]
        coding_url = "https:" + scrapertools.find_single_match(
            data,
            '(?i)src="(?:https:|)((?://www.flashx.tv|//files.fx.fastcontentdelivery.com)/\w+.js\?[^"]+)"'
        )
        if coding_url.endswith("="):
            coding_url += file_id
        coding = scrapertools.downloadpage(coding_url, headers=headers_c)

        data = scrapertools.downloadpage(page_url, headers=headers)
        flashx_id = scrapertools.find_single_match(
            data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data,
                                               'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data,
                                                'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (
            flashx_id, urllib.quote(fname), hash_f)
        wait_time = scrapertools.find_single_match(data,
                                                   "<span id='xxc2'>(\d+)")

        try:
            time.sleep(int(wait_time) + 1)
        except:
            time.sleep(6)
        headers.append(['Referer', "https://www.flashx.tv/"])
        headers.append(
            ['Cookie', 'lang=1; file_id=%s; aff=%s' % (file_id, aff)])
        data = scrapertools.downloadpage('https://www.flashx.tv/dl?playthis',
                                         post=post,
                                         headers=headers)

        matches = scrapertools.find_multiple_matches(
            data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            if match.startswith("eval"):
                try:
                    match = jsunpack.unpack(match)
                    fake = (scrapertools.find_single_match(match,
                                                           "(\w{40,})") == "")
                    if fake:
                        match = ""
                    else:
                        break
                except:
                    match = ""

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(
        match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Spanish":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(),
                                        'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info(
                    "pelisalacarta.servers.flashx Error al descargar el subtítulo: "
                    + traceback.format_exc())

    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append([
                "." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0,
                subtitle
            ])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.flashx %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[sockshare.py] url="+page_url)
    data = scrapertools.cache_page(page_url)

    #<input type="hidden" value="72bed17fd0fa62ac" name="hash" /> <input name="agreeButton" type="submit" value="Continue as Free User" disabled="disabled" id="agreeButton" class="confirm_button" />
    patron  = '<input type="hidden" value="([0-9a-f]+?)" name="([^"]+)">[^<]+'
    patron += '<input name="(confirm)" type="submit" value="([^"]+)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    
    # Patron alternativo para sockshare.ws
    if len(matches)==0:
        patron  = '<input type="hidden" value="([0-9a-f]+?)" name="([^"]+)"[^<]+'
        patron += '<input name="(agreeButton)" type="submit" value="([^"]+)"'
        matches = re.compile(patron,re.DOTALL).findall(data)
        scrapertools.printMatches(matches)

    if len(matches)==0:
        return []

    post = matches[0][1]+"="+matches[0][0]+"&"+matches[0][2]+"="+(matches[0][3].replace(" ","+"))
    headers = []
    headers.append( ['User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.2) Gecko/20100101 Firefox/10.0.2'] )
    headers.append( [ "Accept" , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ])
    headers.append( ['Referer',page_url] )

    data = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("data="+data)

    # Extrae el trozo cifrado
    patron = "playlist: '(.+?)'"
    matches = re.compile(patron,re.DOTALL).findall(data)
    video_urls = []

    if len(matches)>0:
        xmlurl = urlparse.urljoin(page_url,matches[0])
        logger.info("[sockshare.py] Playlis="+xmlurl)
    
        logger.info("xmlurl="+xmlurl)
        data = scrapertools.downloadpageWithoutCookies(xmlurl)
        # Extrae la URL
        patron = '</link><media\:content url="(.+?)"'
        matches = re.compile(patron,re.DOTALL).findall(data)
        scrapertools.printMatches(matches)
        
        if len(matches)>0:
            mediaurl = matches[0]
            mediaurl = mediaurl.replace("&amp;","&")
            video_urls.append( ["."+mediaurl.rsplit('.',1)[1][0:3]+" [sockshare]",mediaurl])

    else:
        logger.info("[sockshare.py] No encuentra Playlist=")
        
        # Patron alternativo para sockshare.ws
        mediaurl = scrapertools.get_match(data,'<input type="hidden" value="([^"]+)" id="videoFile"')
        mediaurl = mediaurl.replace("&amp;","&")
        video_urls.append( [ "[sockshare]" , mediaurl ])

    for video_url in video_urls:
        logger.info("[sockshare.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 42
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)
            text_encode = scrapertools.find_multiple_matches(data,"(゚ω゚.*?\(\'\_\'\));")
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                videourl = "http://" + scrapertools.find_single_match(text_decode, '(openload.co/.*?)\}')
            except:
                videourl = "http://"

            if videourl == "http://":
                hiddenurl = scrapertools.find_single_match(data, 'id="hiddenurl\s*">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(hiddenurl, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        videourl = decode_hidden(hiddenurl, number)
                        
                else:
                    videourl = decodeopenload(data)
            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
        else:
            text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                subtract = scrapertools.find_single_match(text_decode, 'welikekodi.*?(\([^;]+\))')
            except:
                subtract = ""
            
            if subtract:
                index = int(eval(subtract))
                # Buscamos la variable que nos indica el script correcto
                text_decode2 = aadecode(text_encode[index])
                videourl = "https://" + scrapertools.find_single_match(text_decode2, "(openload.co/.*?)\}")
            else:
                hiddenurl = scrapertools.find_single_match(data, 'id="hiddenurl\s*">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(hiddenurl, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        videourl = decode_hidden(hiddenurl, number)
                else:
                    videourl = decodeopenload(data)

            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl = get_link_api(page_url)

    extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]
    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 43
0
def decodeopenload(data):
    import base64, math
    from lib.png import Reader as PNGReader
    # get image data
    imageData = scrapertools.find_single_match(data, '<img *id="linkimg" *src="([^"]+)"')

    imageData = base64.b64decode(imageData.rsplit('base64,', 1)[1])
    x, y, pixel, meta = PNGReader(bytes=imageData).read()

    imageStr = ""
    try:
        for item in pixel:
            for p in item:
                imageStr += chr(p)
    except:
        pass

    # split image data
    imageTabs = []
    i = -1
    for idx in range(len(imageStr)):
        if imageStr[idx] == '\0':
            break
        if 0 == (idx % (12 * 20)):
            imageTabs.append([])
            i += 1
            j = -1
        if 0 == (idx % (20)):
            imageTabs[i].append([])
            j += 1
        imageTabs[i][j].append(imageStr[idx])

    # get signature data
    signStr = ""
    try:
        data_obf = scrapertools.downloadpageWithoutCookies("https://openload.co/assets/js/obfuscator/n.js")
        if "signatureNumbers" in data_obf:
            signStr = scrapertools.find_single_match(data_obf, '[\'"]([^"\']+)[\'"]')
    except:
        pass

    if not signStr:
        scripts = scrapertools.find_multiple_matches(data, '<script src="(/assets/js/obfuscator/[^"]+)"')
        for scr in scripts:
            data_obf = scrapertools.downloadpageWithoutCookies('https://openload.co%s' % scr)
            if "signatureNumbers" in data_obf:
                signStr = scrapertools.find_single_match(data_obf, '[\'"]([^"\']+)[\'"]')
                break

    # split signature data
    signTabs = []
    i = -1
    for idx in range(len(signStr)):
        if signStr[idx] == '\0':
            break
        if 0 == (idx % (11 * 26)):
            signTabs.append([])
            i += 1
            j = -1
        if 0 == (idx % (26)):
            signTabs[i].append([])
            j += 1
        signTabs[i][j].append(signStr[idx])

    # get link data
    linkData = {}
    for i in [2, 3, 5, 7]:
        linkData[i] = []
        tmp = ord('c')
        for j in range(len(signTabs[i])):
            for k in range(len(signTabs[i][j])):
                if tmp > 122:
                    tmp = ord('b')
                if signTabs[i][j][k] == chr(int(math.floor(tmp))):
                    if len(linkData[i]) > j:
                        continue
                    tmp += 2.5;
                    if k < len(imageTabs[i][j]):
                        linkData[i].append(imageTabs[i][j][k])
    res = []
    for idx in linkData:
        res.append(''.join(linkData[idx]).replace(',', ''))

    res = res[3] + '~' + res[1] + '~' + res[2] + '~' + res[0]
    videourl = 'https://openload.co/stream/{0}?mime=true'.format(res)
    
    return videourl
Esempio n. 44
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(
            data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(
            text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(
            data, 'id="' + var_r + '[^"]*">([^<]+)<')

        videourl = ""
        text_decode = ""
        for encode in var_encodes:
            try:
                v1 = int(encode[0:3])
                v2 = int(encode[3:5])
                index = 5
                while index < len(encode):
                    text_decode += chr(
                        int(encode[index:index + 3]) + v1 -
                        v2 * int(encode[index + 3:index + 3 + 2]))
                    index += 5
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = scrapertools.get_headers_from_response(videourl)
            extension = ""
            for head, value in resp_headers:
                if head == "location":
                    videourl = value.replace("https",
                                             "http").replace("?mime=true", "")
                elif head == "content-type":
                    extension = value
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload " + traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(
                data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([
            extension + " [Openload] ", videourl + header_down + extension, 0,
            subtitle
        ])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(data, 'id="'+var_r+'[^"]*">([^<]+)<')

        videourl = ""
        text_decode = ""
        for encode in var_encodes:
            try:
                v1 = int(encode[0:3])
                v2 = int(encode[3:5])
                index = 5
                while index < len(encode):
                    text_decode += chr(int(encode[index:index+3]) + v1 - v2 * int(encode[index+3:index+3+2]))
                    index += 5
            except:
                continue
         
            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = scrapertools.get_headers_from_response(videourl)
            extension = ""
            for head, value in resp_headers:
                if head == "location":
                    videourl = value.replace("https", "http").replace("?mime=true", "")
                elif head == "content-type":
                    extension = value
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("streamondemand.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
            extension = "."+extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 46
0
def episodios(item):
    logger.info("pelisalacarta.channels.seriesdanko episodios")
   
    if config.get_platform()=="xbmc" or config.get_platform()=="xbmcdharma":
        import xbmc
        if config.get_setting("forceview")=="true":
            xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons
            #xbmc.executebuiltin("Container.Content(Movies)")

    item.url = item.url.replace("../","") ## Corrige los enlaces que vienen de search
    if "|" in item.url:
        url = item.url.split("|")[0]
        sw = True
    else:
        url = item.url
        sw = False
    # Descarga la página
    if item.extra:
       
        contenidos = item.extra
        #print contenidos
    else:
        data = scrapertools.downloadpageWithoutCookies(url)

    # Extrae las entradas
        if sw:
            try:
                datadict = eval( "(" + data + ")" )    
                data = urllib.unquote_plus(datadict["entry"]["content"]["$t"].replace("\\u00","%"))
                matches=[]
                matches.append(data)
            except:
                matches = []
        else:
            patronvideos = "entry-content(.*?)<div class='blog-pager' id='blog-pager'>"
            matches = re.compile(patronvideos,re.DOTALL).findall(data)
           
        if len(matches)>0:
            contenidos = matches[0].replace('"',"'").replace("\n","")
        else:
            contenidos = item.url
            if sw:
                url = item.url.split("|")[1]
                if not url.startswith("http://"):
                    url = urlparse.urljoin("http://seriesdanko.com",url)
                # Descarga la página
                data = scrapertools.downloadpageGzip(url)
                patronvideos  = "entry-content(.*?)<div class='post-footer'>"
                matches = re.compile(patronvideos,re.DOTALL).findall(data)
                if len(matches)>0:
                    contenidos = matches[0]
               
    patronvideos  = "<a href='([^']+)'>([^<]+)</a> <img(.+?)/>"
    matches = re.compile(patronvideos,re.DOTALL).findall(contenidos.replace('"',"'"))
    #print contenidos        
    try:
        plot = re.compile(r'(Informac.*?/>)</div>').findall(contenidos)[0]
        if len(plot)==0:
            plot = re.compile(r"(Informac.*?both;'>)</div>").findall(contenidos)[0]
        plot = re.sub('<[^>]+>'," ",plot)
    except:
        plot = ""

    itemlist = []
    for match in matches:
        scrapedtitle = match[1].replace("\n","").replace("\r","")
        logger.info("scrapedtitle="+scrapedtitle)
        ## Eliminado para la opción "Añadir esta serie a la biblioteca de XBMC" (15-12-2014)
        #scrapedtitle = scrapertools.remove_show_from_title(scrapedtitle,item.show)
       
        episode_code = scrapertools.find_single_match(scrapedtitle,"(\d+X\d+)")
        logger.info("episode_code="+episode_code)
        if episode_code!="":
            season_number = scrapertools.find_single_match(scrapedtitle,"(\d+)X\d+")
            logger.info("season_number="+season_number)
            episode_number = scrapertools.find_single_match(scrapedtitle,"\d+X(\d+)")
            logger.info("episode_number="+episode_number)
            new_episode_code = season_number+"x"+episode_number
            logger.info("new_episode_code="+new_episode_code)
            scrapedtitle = scrapedtitle.replace(episode_code,new_episode_code)
            logger.info("scrapedtitle="+scrapedtitle)

        #[1x01 - Capitulo 01]
        #patron = "(\d+x\d+) - Capitulo \d+"
        #matches = re.compile(patron,re.DOTALL).findall(scrapedtitle)
        #print matches
        #if len(matches)>0 and len(matches[0])>0:
        #    scrapedtitle = matches[0]

        if "es.png" in match[2]:
            subtitle = " (Español)"
        elif "la.png" in match[2]:
            subtitle = " (Latino)"
        elif "vo.png" in match[2]:
            subtitle = " (VO)"
        elif "vos.png" in match[2]:
            subtitle = " (VOS)"
        elif "ca.png"  in match[2]:
            subtitle = " (Catalan)"
        elif "ga.jpg"  in match[2]:
            subtitle = " (Gallego)"
        elif "eu.jpg"  in match[2]:
            subtitle = " (Euskera)"
        elif "ba.png"  in match[2]:
            subtitle = " (Bable)"
        else:
            subtitle = ""
        scrapedplot = plot
        scrapedurl = urlparse.urljoin(item.url,match[0]).replace("\n","").replace("\r","")
        if not item.thumbnail:
            try:
                scrapedthumbnail = re.compile(r"src=([^']+)'").findall(contenidos)[0]
            except:
                    scrapedthumbnail = ""
        else:
            scrapedthumbnail = item.thumbnail
        scrapedthumbnail = scrapedthumbnail.replace("\n","").replace("\r","")
        if item.fulltitle == '':
            item.fulltitle = scrapedtitle + subtitle
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        ## Añadido show para la opción "Añadir esta serie a la biblioteca de XBMC" (15-12-2014)
        # Añade al listado de XBMC
        itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle+subtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , fulltitle = item.fulltitle, context="4", show=item.show, folder=True) )

    if config.get_library_support() and len(itemlist)>0:
        itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=item.show))
        itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios###", show=item.show))

    #xbmc.executebuiltin("Container.Content(Movies)")
   
    if len(itemlist)==0:
        listvideos = servertools.findvideos(contenidos)
       
        for title,url,server in listvideos:
           
            if server == "youtube":
                scrapedthumbnail = "http://i.ytimg.com/vi/" + url + "/0.jpg"
            else:
                scrapedthumbnail = item.thumbnail
            scrapedtitle = title
            scrapedplot = ""
            scrapedurl = url
           
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

            # Añade al listado de XBMC
            itemlist.append( Item(channel=__channel__, action="play", server=server, title=item.title +" "+ scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot, fulltitle = scrapedtitle , folder=False) )

    return itemlist
def capitulos(item):
    logger.info("[seriesdanko.py] capitulos")
    
    if config.get_setting("forceview")=="true":
        xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons
        #xbmc.executebuiltin("Container.Content(Movies)")
        
    if "|" in item.url:
        url = item.url.split("|")[0]
        sw = True
    else:
        url = item.url
        sw = False
    # Descarga la página
    if item.extra:
        
        contenidos = item.extra
        #print contenidos
    else:
        data = scrapertools.downloadpageWithoutCookies(url)

    # Extrae las entradas
        if sw:
            try:
                datadict = eval( "(" + data + ")" )    
                data = urllib.unquote_plus(datadict["feed"]["entry"][0]["content"]["$t"].replace("\\u00","%"))
                #data = urllib.unquote_plus(data.replace("\u00","%")).replace('\\"','"')
                #patronvideos  = "content(.*?)post-footer"
                matches=[]
                matches.append(data)
            except:
                matches = []
        else:
            patronvideos = "entry-content(.*?)</div>"
            matches = re.compile(patronvideos,re.DOTALL).findall(data)
            
        if len(matches)>0:
            contenidos = matches[0]
        else:
            contenidos = item.url
            if sw:
                url = item.url.split("|")[1]
                # Descarga la página
                data = scrapertools.downloadpageGzip(url)
                patronvideos  = "entry-content(.*?)<div class='post-footer'>"
                matches = re.compile(patronvideos,re.DOTALL).findall(data)
                if len(matches)>0:
                    contenidos = matches[0]
                
    patronvideos  = '<a href="([^"]+)">([^<]+)</a>.*?src="([^"]+)"'
    matches = re.compile(patronvideos,re.DOTALL).findall(contenidos.replace("'",'"'))
    #print contenidos        
    try:
        plot = re.compile(r'(Informac.*?/>)</div>').findall(contenidos)[0]
        if len(plot)==0:
            plot = re.compile(r"(Informac.*?both;'>)</div>").findall(contenidos)[0]
        plot = re.sub('<[^>]+>'," ",plot)
    except:
        plot = ""

    itemlist = []
    for match in matches:
        scrapedtitle = match[1]
        if "es.png" in match[2]:
            subtitle = " (Español)"
        elif "la.png" in match[2]:
            subtitle = " (Latino)"
        elif "vo.png" in match[2]:
            subtitle = " (Version Original)"
        elif "vos.png" in match[2]:
            subtitle = " (Subtitulado)"
        else:
            subtitle = ""
        scrapedplot = plot
        scrapedurl = urlparse.urljoin(item.url,match[0])
        if not item.thumbnail:
            try:
                scrapedthumbnail = re.compile(r'src="(.+?)"').findall(contenidos)[0]
            except:
                scrapedthumbnail = ""
        else:
            scrapedthumbnail = item.thumbnail
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=CHANNELNAME, action="findvideos", title=scrapedtitle+subtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
    
    #xbmc.executebuiltin("Container.Content(Movies)")
    
    
    if len(itemlist)==0:
        listvideos = servertools.findvideos(contenidos)
        
        for title,url,server in listvideos:
            
            if server == "youtube":
                scrapedthumbnail = "http://i.ytimg.com/vi/" + url + "/0.jpg"
            else:
                scrapedthumbnail = item.thumbnail
            scrapedtitle = title
            scrapedplot = ""
            scrapedurl = url
            
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

            # Añade al listado de XBMC
            xbmctools.addnewvideo( CHANNELNAME , "play" , "" , server , item.title +" "+ scrapedtitle , scrapedurl , scrapedthumbnail , scrapedplot )
                
    return itemlist
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[putlocker.py] url="+page_url)
    
    data = scrapertools.cache_page(page_url)
    logger.info("data="+data)

    patron  = '<input type="hidden" value="([0-9a-f]+?)" name="([^"]+)">[^<]+'
    patron += '<input name="confirm" type="submit" value="([^"]+)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)==0: return []

    post = matches[0][1]+"="+matches[0][0]+"&confirm="+(matches[0][2].replace(" ","+"))
    headers = []
    headers.append( ['User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:10.0.2) Gecko/20100101 Firefox/10.0.2'] )
    headers.append( [ "Accept" , "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" ])
    headers.append( ['Referer',page_url] )

    data = scrapertools.cache_page( page_url , post=post, headers=headers )
    logger.info("data="+data)
    
    # extrae 
    patron = "playlist: '(.+?)'"
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    video_urls = []
    if len(matches)>0:
        xmlurl = urlparse.urljoin(page_url,matches[0])
        logger.info("[putlocker.py] Playlist="+xmlurl)
    
        logger.info("xmlurl="+xmlurl)
        data = scrapertools.downloadpageWithoutCookies(xmlurl)
        logger.info("data="+data)
        # Extrae la URL
        try:
            mediaurl = scrapertools.get_match(data,'</link><media\:content url="(.+?)"')
        except:
            mediaurl = scrapertools.get_match(data,'<media\:content url="(.+?)"')
        logger.info("mediaurl="+mediaurl)
        # web  http://media-a7.putlocker.com/download/17/ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv?h=T6eVK5WKEn3fDwKLcFkAog&e=1341894542&f=%27ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv%27
        # xbmc http://media-a7.putlocker.com/download/17/ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv?h=yFVjhTW95m3LqyqUH1yUDA&amp;e=1341894600&amp;f='ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv'
        # xbmc http://media-a7.putlocker.com/download/17/ecopolis_._6_episodio_final_documaniatv.com_3b1c3.flv
    for match in matches:
        titulo = "[putlocker]"
        url = "http://www.putlocker.com/embed/"+match
        if url not in encontrados:
            logger.info("  url="+url)
            devuelve.append( [ titulo , url , 'putlocker' ] )
            encontrados.add(url)
        else:
            logger.info("  url duplicada="+url)

    #http://www.cinetux.org/video/putlocker.php?id=31A2C1B48C5F8969
    patronvideos  = 'putlocker.php\?id\=([A-Z0-9]+)'
    logger.info("[putlocker.py] find_videos #"+patronvideos+"#")
    matches = re.compile(patronvideos,re.DOTALL).findall(text)

    for match in matches:
        titulo = "[putlocker]"
        url = "http://www.putlocker.com/embed/"+match
        if url not in encontrados:
            logger.info("  url="+url)
            devuelve.append( [ titulo , url , 'putlocker' ] )
            encontrados.add(url)
        else:
            logger.info("  url duplicada="+url)    
    
    return devuelve
Esempio n. 49
0
def decodeopenload(data):
    import base64, math
    from lib.png import Reader as PNGReader
    # get image data
    imageData = scrapertools.find_single_match(data, '<img *id="linkimg" *src="([^"]+)"')

    imageData = base64.b64decode(imageData.rsplit('base64,', 1)[1])
    x, y, pixel, meta = PNGReader(bytes=imageData).read()

    imageStr = ""
    try:
        for item in pixel:
            for p in item:
                imageStr += chr(p)
    except:
        pass

    # split image data
    imageTabs = []
    i = -1
    for idx in range(len(imageStr)):
        if imageStr[idx] == '\0':
            break
        if 0 == (idx % (12 * 20)):
            imageTabs.append([])
            i += 1
            j = -1
        if 0 == (idx % (20)):
            imageTabs[i].append([])
            j += 1
        imageTabs[i][j].append(imageStr[idx])

    # get signature data
    signStr = ""
    try:
        data_obf = scrapertools.downloadpageWithoutCookies("https://openload.co/assets/js/obfuscator/n.js")
        if "signatureNumbers" in data_obf:
            signStr = scrapertools.find_single_match(data_obf, '[\'"]([^"\']+)[\'"]')
    except:
        pass

    if not signStr:
        scripts = scrapertools.find_multiple_matches(data, '<script src="(/assets/js/obfuscator/[^"]+)"')
        for scr in scripts:
            data_obf = scrapertools.downloadpageWithoutCookies('https://openload.co%s' % scr)
            if "signatureNumbers" in data_obf:
                signStr = scrapertools.find_single_match(data_obf, '[\'"]([^"\']+)[\'"]')
                break

    # split signature data
    signTabs = []
    i = -1
    for idx in range(len(signStr)):
        if signStr[idx] == '\0':
            break
        if 0 == (idx % (11 * 26)):
            signTabs.append([])
            i += 1
            j = -1
        if 0 == (idx % (26)):
            signTabs[i].append([])
            j += 1
        signTabs[i][j].append(signStr[idx])

    # get link data
    linkData = {}
    for i in [2, 3, 5, 7]:
        linkData[i] = []
        tmp = ord('c')
        for j in range(len(signTabs[i])):
            for k in range(len(signTabs[i][j])):
                if tmp > 122:
                    tmp = ord('b')
                if signTabs[i][j][k] == chr(int(math.floor(tmp))):
                    if len(linkData[i]) > j:
                        continue
                    tmp += 2.5;
                    if k < len(imageTabs[i][j]):
                        linkData[i].append(imageTabs[i][j][k])
    res = []
    for idx in linkData:
        res.append(''.join(linkData[idx]).replace(',', ''))

    res = res[3] + '~' + res[1] + '~' + res[2] + '~' + res[0]
    videourl = 'https://openload.co/stream/{0}?mime=true'.format(res)
    
    return videourl
Esempio n. 50
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("streamondemand.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.downloadpageWithoutCookies(page_url)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.downloadpageWithoutCookies(url_reload)
            data = scrapertools.downloadpageWithoutCookies(page_url)
        except:
            pass

    matches = scrapertools.find_multiple_matches(data, "<script type='text/javascript'>(.*?)</script>")
    m = ""
    for n, m in enumerate(matches):
        if m.startswith("eval"):
            try:
                m = jsunpack.unpack(m)
                fake = (scrapertools.find_single_match(m, "(\w{40,})") == "")
                if fake:
                    m = ""
                else:
                    break
            except:
                m = ""
    match = m
    if "sources:[{file:" not in match:
        page_url = page_url.replace("playvid-", "")

        headers = {'Host': 'www.flashx.tv', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
                  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5',
                  'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
                  'Cookie': ''}
        data = scrapertools.downloadpage(page_url, headers=headers.items())
        flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (flashx_id, urllib.quote(fname), hash_f)
        wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        coding_url = 'https://files.fx.fastcontentdelivery.com/jquery2.js?fx=%s' % base64.encodestring(file_id)
        headers['Host'] = "files.fx.fastcontentdelivery.com"
        headers['Referer'] = "https://www.flashx.tv/"
        headers['Accept'] = "*/*"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/counter.cgi?fx=%s' % base64.encodestring(file_id)
        headers['Host'] = "www.flashx.tv"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/flashx.php?fxfx=3'
        headers['X-Requested-With'] = 'XMLHttpRequest'
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        try:
           time.sleep(int(wait_time)+1)
        except:
           time.sleep(6)

        headers.pop('X-Requested-With')
        headers['Content-Type'] = 'application/x-www-form-urlencoded'
        data = scrapertools.downloadpage('https://www.flashx.tv/dl?playthis', post=post, headers=headers.items())

        matches = scrapertools.find_multiple_matches(data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            if match.startswith("eval"):
                try:
                    match = jsunpack.unpack(match)
                    fake = (scrapertools.find_single_match(match, "(\w{40,})") == "")
                    if fake:
                        match = ""
                    else:
                        break
                except:
                    match = ""

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Italian":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info("streamondemand.servers.flashx Error al descargar el subtítulo: "+traceback.format_exc())

    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.flashx %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Esempio n. 51
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.cache_page(page_url, headers=headers)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.cache_page(url_reload, headers=headers)
            data = scrapertools.cache_page(page_url, headers=headers)
        except:
            pass

    match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")

    if match.startswith("eval"):
        try:
            match = jsunpack.unpack(match)
        except:
            pass

    if not "sources:[{file:" in match:
        page_url = page_url.replace("playvid-", "")
        data = scrapertools.downloadpageWithoutCookies(page_url)

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        aff = scrapertools.find_single_match(data, "'aff', '([^']+)'")
        headers_c = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'],
                     ['Referer', page_url],
                     ['Cookie', '; lang=1']]
        coding_url = scrapertools.find_single_match(data, '(?i)src="(http://www.flashx.tv/\w+.js\?[^"]+)"')
        if coding_url.endswith("="):
            coding_url += file_id
        coding = scrapertools.downloadpage(coding_url, headers=headers_c)

        data = scrapertools.downloadpage(page_url, headers=headers)
        flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (flashx_id, urllib.quote(fname), hash_f)

        time.sleep(6)
        headers.append(['Referer', page_url])
        headers.append(['Cookie', 'lang=1; file_id=%s; aff=%s' % (file_id, aff)])
        data = scrapertools.downloadpage('http://www.flashx.tv/dl', post=post, headers=headers)

        matches = scrapertools.find_multiple_matches(data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            try:
                match = jsunpack.unpack(match)
            except:
                match = ""
            if "file" in match:
                break

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Spanish":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info("pelisalacarta.servers.flashx Error al descargar el subtítulo: "+traceback.format_exc())
            
    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.flashx %s - %s" % (video_url[0], video_url[1]))

    return video_urls