Ejemplo n.º 1
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    
    logger.info("fusionse.servers.clouddy get_video_url(page_url='%s')" % page_url)
    
    video_urls = []
    
    request_headers = []
    request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"])
    body,response_headers = scrapertools.read_body_and_headers(page_url,headers=request_headers)
    
    patron = 'key: "([^"]+).*?file:"([^"]+)"'
    param = re.compile(patron,re.DOTALL).findall(body)
    url_get_video ='https://www.cloudy.ec/api/player.api.php?user=&cid2=&pass=&numOfErrors=0&key=<clave>&file=<fichero>&cid3='
    url_get_video = url_get_video.replace("<clave>", param[0][0])
    url_get_video = url_get_video.replace("<fichero>", param[0][1])
    
    request_headers.append(["referer",page_url])
    request_headers.append(["accept-encoding", "gzip, deflate, sdch"])
    request_headers.append(["x-requested-with","ShockwaveFlash/20.0.0.286"])
    request_headers.append(["accept-language", "es-ES,es;q=0.8"])
        
    body,request_headers = scrapertools.read_body_and_headers(url_get_video,headers=request_headers)
    
    body = urllib.unquote(body)
     
    video = re.findall("url=(.*?)&title", body, re.DOTALL)
    
    video_urls.append([scrapertools.get_filename_from_url(video[0])[-4:],video[0] ]) 
    
    return video_urls
Ejemplo n.º 2
0
def get_long_url(short_url):
    logger.info("(short_url='%s')" % short_url)

    request_headers = []
    request_headers.append(["User-Agent",
                            "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"])
    request_headers.append(["Referer", "http://linkdecrypter.com"])
    post = urllib.urlencode({"pro_links": short_url, "modo_links": "text", "modo_recursivo": "on", "link_cache": "on"})
    url = "http://linkdecrypter.com/"

    # Parche porque python no parece reconocer bien la cabecera phpsessid
    body, response_headers = scrapertools.read_body_and_headers(url, post=post, headers=request_headers)
    location = ""
    n = 1
    while True:
        for name, value in response_headers:
            if name == "set-cookie":
                logger.info("Set-Cookie: " + value)
                cookie_name = scrapertools.scrapertools.find_single_match(value, '(.*?)\=.*?\;')
                cookie_value = scrapertools.scrapertools.find_single_match(value, '.*?\=(.*?)\;')
                request_headers.append(["Cookie", cookie_name + "=" + cookie_value])

        body, response_headers = scrapertools.read_body_and_headers(url, headers=request_headers)
        logger.info("body=" + body)

        try:
            location = scrapertools.scrapertools.find_single_match(body, '<textarea.*?class="caja_des">([^<]+)</textarea>')
            logger.info("location=" + location)
            break
        except:
            n = n + 1
            if n > 3:
                break

    return location
Ejemplo n.º 3
0
def get_long_url( short_url ):
    logger.info("servers.adfly get_long_url(short_url='%s')" % short_url)

    request_headers = []
    request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"])
    request_headers.append(["Referer","http://linkdecrypter.com"])
    post=urllib.urlencode({"pro_links":short_url,"modo_links":"text","modo_recursivo":"on","link_cache":"on"})
    url = "http://linkdecrypter.com/"
    
    # Parche porque python no parece reconocer bien la cabecera phpsessid
    body,response_headers = scrapertools.read_body_and_headers(url,post=post,headers=request_headers)

    n = 1
    while True:
        for name,value in response_headers:
            if name=="set-cookie":
                logger.info("Set-Cookie: "+value)
                cookie_name = scrapertools.get_match(value,'(.*?)\=.*?\;')
                cookie_value = scrapertools.get_match(value,'.*?\=(.*?)\;')
                request_headers.append(["Cookie",cookie_name+"="+cookie_value])

        body,response_headers = scrapertools.read_body_and_headers(url,headers=request_headers)
        logger.info("body="+body)

        try:
            location = scrapertools.get_match(body,'<textarea.*?class="caja_des">([^<]+)</textarea>')
            logger.info("location="+location)
            break
        except:
            n = n + 1
            if n>3:
                break

    return location
Ejemplo n.º 4
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[mailru.py] get_video_url(page_url='%s')" % (page_url))

    video_urls = []

    ## Carga la página

    data = scrapertools.cache_page(page_url)

    ## Nueva url al final de los datos
    url = page_url.replace("embed/","").replace(".html",".json")
    ## Carga los datos y los headers
    data, headers = scrapertools.read_body_and_headers(url)
    data = jsontools.load_json(data)

    ## La cookie video_key necesaria para poder visonar el video
    for cookie in headers:
        if 'set-cookie' in cookie: break
    cookie_video_key = scrapertools.get_match(cookie[1], '(video_key=[a-f0-9]+)')

    ## Formar url del video + cookie video_key
    for videos in data['videos']:
        media_url = videos['url'] + "|Cookie=" + cookie_video_key
        quality = " "+videos['key']
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + quality +" [mail.ru]", media_url ] )

    for video_url in video_urls:
        logger.info("[mail.ru] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Ejemplo n.º 5
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url,headers=DEFAULT_HEADERS)
    data = data.replace("\\","")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''
    patron = '"([^"]+)":\[\{"type":"video/([^"]+)","url":"([^"]+)"\}\]'
    matches = scrapertools.find_multiple_matches(data, patron)
    subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')

    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie',header_cookie])

    for stream_name,stream_type,stream_url in matches:
        stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",headers=DEFAULT_HEADERS)
        video_urls.append( [ stream_name+"p ."+stream_type+" [dailymotion]", stream_url, 0, subtitle ] )

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.dailymotion %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Ejemplo n.º 6
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[mailru.py] get_video_url(page_url='%s')" % (page_url))

    video_urls = []
    ## Carga la página para coger las cookies
    data = scrapertools.cache_page(page_url)

    ## Nueva url
    url = page_url.replace("embed/","").replace(".html",".json")
    ## Carga los datos y los headers
    data, headers = scrapertools.read_body_and_headers(url)
    data = jsontools.load_json( data )

    ## La cookie video_key necesaria para poder visonar el video
    for cookie in headers:
        if 'set-cookie' in cookie: break
    cookie_video_key = scrapertools.get_match(cookie[1], '(video_key=[a-f0-9]+)')

    ## Formar url del video + cookie video_key
    for videos in data['videos']:
        media_url = videos['url'] + "|Referer=https://my1.imgsmail.ru/r/video2/uvpv3.swf?75&Cookie=" + cookie_video_key
        if not media_url.startswith("http"):
            media_url = "http:"+media_url
        quality = " "+videos['key']
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + quality +" [mail.ru]", media_url ] )
    try:
        video_urls.sort(key=lambda video_urls:int(video_urls[0].rsplit(" ",2)[1][:-1]))
    except:
        pass
    for video_url in video_urls:
        logger.info("[mail.ru] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Ejemplo n.º 7
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url, headers=DEFAULT_HEADERS)
    data = data.replace("\\", "")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''
    patron = '"([^"]+)":\[\{"type":"video/([^"]+)","url":"([^"]+)"\}\]'
    matches = scrapertools.find_multiple_matches(data, patron)
    subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')

    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie', header_cookie])

    for stream_name, stream_type, stream_url in matches:
        stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",
                                                           headers=DEFAULT_HEADERS)
        video_urls.append([stream_name + "p ." + stream_type + " [dailymotion]", stream_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.dailymotion %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 8
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.dailymotion get_video_url(page_url='%s')" % page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url, headers=DEFAULT_HEADERS)
    data = data.replace("\\", "")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''
    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie', header_cookie])

    subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
    qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
    for calidad, urls in qualities:
        if calidad == "auto":
            continue
        patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
        matches = scrapertools.find_multiple_matches(urls, patron)
        for stream_type, stream_url in matches:
            stream_type = stream_type.replace('x-mpegURL', 'm3u8')
            if stream_type == "mp4":
                stream_url = scrapertools.get_header_from_response(stream_url, header_to_get="location",
                                                                   headers=DEFAULT_HEADERS)
            else:
                data_m3u8 = scrapertools.downloadpage(stream_url)
                stream_url = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
            video_urls.append([calidad + "p ." + stream_type + " [dailymotion]", stream_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.dailymotion %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 9
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):

    logger.info(
        "deportesalacarta.servers.clouddy get_video_url(page_url='%s')" %
        page_url)

    video_urls = []

    request_headers = []
    request_headers.append([
        "User-Agent",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"
    ])
    body, response_headers = scrapertools.read_body_and_headers(
        page_url, headers=request_headers)

    patron = 'key: "([^"]+).*?file:"([^"]+)"'
    param = re.compile(patron, re.DOTALL).findall(body)
    url_get_video = 'https://www.cloudy.ec/api/player.api.php?user=&cid2=&pass=&numOfErrors=0&key=<clave>&file=<fichero>&cid3='
    url_get_video = url_get_video.replace("<clave>", param[0][0])
    url_get_video = url_get_video.replace("<fichero>", param[0][1])

    request_headers.append(["referer", page_url])
    request_headers.append(["accept-encoding", "gzip, deflate, sdch"])
    request_headers.append(["x-requested-with", "ShockwaveFlash/20.0.0.286"])
    request_headers.append(["accept-language", "es-ES,es;q=0.8"])

    body, request_headers = scrapertools.read_body_and_headers(
        url_get_video, headers=request_headers)

    body = urllib.unquote(body)

    video = re.findall("url=(.*?)&title", body, re.DOTALL)

    video_urls.append(
        [scrapertools.get_filename_from_url(video[0])[-4:], video[0]])

    return video_urls
Ejemplo n.º 10
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "streamondemand.servers.dailymotion get_video_url(page_url='%s')" %
        page_url)
    video_urls = []

    data, headers = scrapertools.read_body_and_headers(page_url,
                                                       headers=DEFAULT_HEADERS)
    data = data.replace("\\", "")
    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''

    for cookie in headers:
        if cookie[0] == "set-cookie":
            header_cookie = cookie[1]
    DEFAULT_HEADERS.append(['Cookie', header_cookie])

    subtitle = scrapertools.find_single_match(
        data, '"subtitles":.*?"it":.*?urls":\["([^"]+)"')
    qualities = scrapertools.find_multiple_matches(
        data, '"([^"]+)":(\[\{"type":".*?\}\])')
    for calidad, urls in qualities:
        if calidad == "auto":
            continue
        patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
        matches = scrapertools.find_multiple_matches(urls, patron)
        for stream_type, stream_url in matches:
            stream_type = stream_type.replace('x-mpegURL', 'm3u8')
            if stream_type == "mp4":
                stream_url = scrapertools.get_header_from_response(
                    stream_url,
                    header_to_get="location",
                    headers=DEFAULT_HEADERS)
            else:
                data_m3u8 = scrapertools.downloadpage(stream_url)
                stream_url = scrapertools.find_single_match(
                    data_m3u8, '(http:.*?\.m3u8)')
            video_urls.append([
                calidad + "p ." + stream_type + " [dailymotion]", stream_url,
                0, subtitle
            ])

    for video_url in video_urls:
        logger.info("streamondemand.servers.dailymotion %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[mailru.py] get_video_url(page_url='%s')" % (page_url))

    video_urls = []

    ## Carga la página
    ## Nueva url al final de los datos
    data = scrapertools.cache_page(page_url)

    ## Carga los nuevos datos de la nueva url
    #<a href="http://r.mail.ru/clb15944866/my.mail.ru/mail/gottsu04/video/_myvideo/709.html?from=watchonmailru" class="b-player__button" target="_blank">Watch video</a>
    url = scrapertools.get_match(
        data,
        '<a href="([^"]+)" class="b-player__button" target="_blank">Watch video</a>'
    )
    data = scrapertools.cache_page(url)

    ## API ##
    ## Se necesita la id del vídeo para formar la url de la API
    #<link rel="image_src" href="http://filed9-14.my.mail.ru/pic?url=http%3A%2F%2Fvideoapi.my.mail.ru%2Ffile%2Fsc03%2F3450622080461046469&mw=&mh=&sig=5d50e747aa59107d805263043e3efe64" />
    id_api_video = scrapertools.get_match(data, 'sc\d+%2F([^&]+)&mw')
    url = "http://videoapi.my.mail.ru/videos/" + id_api_video + ".json"
    ## Carga los datos y los headers
    data, headers = scrapertools.read_body_and_headers(url)
    data = jsontools.load_json(data)

    ## La cookie video_key necesaria para poder visonar el video
    for cookie in headers:
        if 'set-cookie' in cookie: break
    cookie_video_key = scrapertools.get_match(cookie[1],
                                              '(video_key=[a-f0-9]+)')

    ## Formar url del video + cookie video_key
    media_url = data['videos'][0]['url'] + "|Cookie=" + cookie_video_key

    video_urls.append([
        scrapertools.get_filename_from_url(media_url)[-4:] + " [mail.ru]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("[mail.ru] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Ejemplo n.º 12
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[mailru.py] get_video_url(page_url='%s')" % (page_url))

    video_urls = []

    ## Carga la página
    ## Nueva url al final de los datos
    data = scrapertools.cache_page(page_url)

    ## Carga los nuevos datos de la nueva url
    #<a href="http://r.mail.ru/clb15944866/my.mail.ru/mail/gottsu04/video/_myvideo/709.html?from=watchonmailru" class="b-player__button" target="_blank">Watch video</a>
    url = scrapertools.get_match(data,'<a href="([^"]+)" class="b-player__button" target="_blank">Watch video</a>')
    data = scrapertools.cache_page(url)

    ## API ##
    ## Se necesita la id del vídeo para formar la url de la API
    #<link rel="image_src" href="http://filed9-14.my.mail.ru/pic?url=http%3A%2F%2Fvideoapi.my.mail.ru%2Ffile%2Fsc03%2F3450622080461046469&mw=&mh=&sig=5d50e747aa59107d805263043e3efe64" />
    id_api_video = scrapertools.get_match(data,'sc\d+%2F([^&]+)&mw')
    url = "http://videoapi.my.mail.ru/videos/" + id_api_video + ".json"
    ## Carga los datos y los headers
    data, headers = scrapertools.read_body_and_headers(url)
    data = jsontools.load_json( data )

    ## La cookie video_key necesaria para poder visonar el video
    for cookie in headers:
        if 'set-cookie' in cookie: break
    cookie_video_key = scrapertools.get_match(cookie[1], '(video_key=[a-f0-9]+)')

    ## Formar url del video + cookie video_key
    media_url = data['videos'][0]['url'] + "|Cookie=" + cookie_video_key

    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [mail.ru]", media_url ] )

    for video_url in video_urls:
        logger.info("[mail.ru] %s - %s" % (video_url[0],video_url[1]))

    return video_urls