def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[filebox.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []
    '''
    <input type="hidden" name="op" value="download2">
    <input type="hidden" name="id" value="235812b1j9w1">
    <input type="hidden" name="rand" value="na73zeeooqyfkndsv4uxzzpbajwi6mhbmixtogi">
    <input type="hidden" name="referer" value="http://www.seriesyonkis.com/s/ngo/2/5/1/8/773">
    '''
    logger.info("[filebox.py] URL ")
    data = scrapertools.cache_page(page_url)
    # Espera los 5 segundos
    try:
        from platformcode.xbmc import xbmctools
        xbmctools.handle_wait(5,"filebox",'')
    except:
        import time
        time.sleep(5)

    codigo = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)">[^<]+')
    rand = scrapertools.get_match(data,'<input type="hidden" name="rand" value="([^"]+)">')

    #op=download2&id=xuquejiv6xdf&rand=r6dq7hn7so2ygpnxv2zg2i3cu3sbdsunf57gtni&referer=&method_free=&method_premium=&down_direct=1
    post = "op=download2&id="+codigo+"&rand="+rand+"&referer=&method_free=&method_premium=&down_direct=1"

    data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
    logger.info("data="+data)
    media_url = scrapertools.get_match(data,"this.play\('([^']+)'")
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [filebox]",media_url])

    for video_url in video_urls:
        logger.info("[filebox.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Exemple #2
0
def get_long_url(short_url):
    logger.info("[adfly.py] get_long_url(short_url='%s')" % short_url)

    data = scrapertools.cache_page(short_url)
    #var zzz = 'http://freakshare.com/files/ivkf5hm4/The.Following.S01E01.UNSOLOCLIC.INFO.avi.html'
    location = scrapertools.get_match(data, "var zzz \= '([^']+)'")
    logger.info("location=" + location)

    # Espera los 5 segundos
    try:
        from platformcode.xbmc import xbmctools
        xbmctools.handle_wait(5, "adf.ly", '')
    except:
        import time
        time.sleep(5)

    if "adf.ly" in location:
        # Obtiene la url larga
        data = scrapertools.cache_page(location)
        logger.info("data=" + data)

        location = scrapertools.get_match(
            data, '<META HTTP-EQUIV\="Refresh".*?URL=([^"]+)"')

    logger.info("location=" + location)

    return location
Exemple #3
0
def get_long_url( short_url ):
    logger.info("[adfly.py] get_long_url(short_url='%s')" % short_url)
    
    data = scrapertools.cache_page( short_url )
    #var zzz = 'http://freakshare.com/files/ivkf5hm4/The.Following.S01E01.UNSOLOCLIC.INFO.avi.html'
    location = scrapertools.get_match(data,"var zzz \= '([^']+)'")
    logger.info("location="+location)

    # Espera los 5 segundos
    try:
        from platformcode.xbmc import xbmctools
        xbmctools.handle_wait(5,"adf.ly",'')
    except:
        import time
        time.sleep(5)

    if "adf.ly" in location:
        # Obtiene la url larga
        data = scrapertools.cache_page(location)
        logger.info("data="+data)

        location = scrapertools.get_match(data,'<META HTTP-EQUIV\="Refresh".*?URL=([^"]+)"')

    logger.info("location="+location)

    return location
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[streamcloud.py] url="+page_url)
    
    # Lo pide una vez
    headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
    data = scrapertools.cache_page( page_url , headers=headers )
    #logger.info("data="+data)

    logger.info("[streamcloud.py] Esperando 10 segundos...")

    try:
        from platformcode.xbmc import xbmctools
        xbmctools.handle_wait(12,"streamcloud",'')
    except:
        import time
        time.sleep(12)

    logger.info("[streamcloud.py] Espera concluida")
    
    try:
        media_url = scrapertools.get_match( data , 'file\: "([^"]+)"' )+"?start=0"
    except:
        op = scrapertools.get_match(data,'<input type="hidden" name="op" value="([^"]+)"')
        usr_login = ""
        id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)"')
        fname = scrapertools.get_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
        referer = scrapertools.get_match(data,'<input type="hidden" name="referer" value="([^"]*)"')
        hashstring = scrapertools.get_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
        imhuman = scrapertools.get_match(data,'<input type="submit" name="imhuman".*?value="([^"]+)">').replace(" ","+")
        
        post = "op="+op+"&usr_login="******"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
        headers.append(["Referer",page_url])
        data = scrapertools.cache_page( page_url , post=post, headers=headers )

        if 'id="justanotice"' in data:
            logger.info("[streamcloud.py] data="+data)
            logger.info("[streamcloud.py] Ha saltado el detector de adblock")
            return []

        # Extrae la URL
        media_url = scrapertools.get_match( data , 'file\: "([^"]+)"' )+"?start=0"
        
    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [streamcloud]",media_url])

    for video_url in video_urls:
        logger.info("[streamcloud.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[bayfiles.py] get_video_url("+page_url+")")
    from servers import servertools
    video_urls = []

    data = scrapertools.cache_page(page_url)
    try:
        vfid = re.compile('var vfid = ([^;]+);').findall(data)[0]
    except:
        logger.info("[bayfiles.py] Error no encontro vfid")
        return ''
    try:
        delay = re.compile('var delay = ([^;]+);').findall(data)[0]
        delay = int(delay)
    except:
        delay = 300

    logger.info("[bayfiles.py] vfid="+vfid)
    logger.info("[bayfiles.py] delay="+str(delay))

    from platformcode.xbmc import xbmctools
 
    t = millis()
    #http://bayfiles.com/ajax_download?_=1336330599281&action=startTimer&vfid=2174049
    url_token = "http://bayfiles.com/ajax_download?_=%s&action=startTimer&vfid=%s"%(t,vfid)
    data = scrapertools.cache_page(url_token)
    logger.info("data="+data)
    datajson = load_json(data)

    if datajson['set']==True:
        token=datajson['token']
        resultado = xbmctools.handle_wait(delay,"Progreso","Conectando con servidor BayFiles (Free)")
        #if resultado == False:
            
        url_ajax = 'http://bayfiles.com/ajax_download'
        post = "action=getLink&vfid=%s&token=%s" %(vfid,token)
        data = scrapertools.cache_page( url_ajax , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
    
        # Extrae la url del video
        patron = 'onclick="javascript:window.location.href = \'(.+?)\''
        matches = re.compile(patron,re.DOTALL).findall(data)
        #scrapertools.printMatches(matches)
        
        if len(matches)>0:
            mediaurl = matches[0]
            try:
                location = scrapertools.getLocationHeaderFromResponse(mediaurl)
                if location:
                    mediaurl = location
            except:
                logger.info("Error al redireccionar")
            mediaurl = mediaurl + "|Referer="+urllib.quote(page_url)
            video_urls.append( ["."+mediaurl.rsplit('.',1)[1]+" [bayfiles]",mediaurl,60])

    for video_url in video_urls:
        logger.info("[bayfiles.py] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Exemple #6
0
def get_free_url(page_url):
    location = scrapertools.get_header_from_response(page_url,
                                                     header_to_get="location")
    if location != "":
        page_url = location

    logger.info("[wupload.py] location=%s" % page_url)

    video_id = extract_id(page_url)
    logger.info("[wupload.py] video_id=%s" % video_id)

    data = scrapertools.cache_page(url=page_url)
    patron = 'href="(.*?start=1.*?)"'
    matches = re.compile(patron).findall(data)
    scrapertools.printMatches(matches)
    if len(matches) == 0:
        logger.error("[wupload.py] No encuentra el enlace Free")
        return []

    # Obtiene link de descarga free
    download_link = matches[0]
    if not download_link.startswith("http://"):
        download_link = urlparse.urljoin(page_url, download_link)

    logger.info("[wupload.py] Link descarga: " + download_link)

    # Descarga el enlace
    headers = []
    headers.append(["X-Requested-With", "XMLHttpRequest"])
    headers.append(["Referer", page_url])
    headers.append([
        "User-Agent",
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"
    ])
    headers.append(
        ["Content-Type", "application/x-www-form-urlencoded; charset=UTF-8"])
    headers.append(["Accept-Encoding", "gzip, deflate"])
    headers.append(["Accept", "*/*"])
    headers.append(["Accept-Language", "es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
    headers.append(["Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
    headers.append(["Connection", "keep-alive"])
    headers.append(["Pragma", "no-cache"])
    headers.append(["Cache-Control", "no-cache"])

    data = scrapertools.cache_page(download_link, headers=headers, post="")
    logger.info(data)

    while True:
        # Detecta el tiempo de espera
        patron = "countDownDelay = (\d+)"
        matches = re.compile(patron).findall(data)
        if len(matches) > 0:
            tiempo_espera = int(matches[0])
            logger.info("[wupload.py] tiempo de espera %d segundos" %
                        tiempo_espera)

            #import time
            #time.sleep(tiempo_espera)
            from platformcode.xbmc import xbmctools
            resultado = xbmctools.handle_wait(
                tiempo_espera + 5, "Progreso",
                "Conectando con servidor Wupload (Free)")
            if resultado == False:
                break

            tm = get_match(data, "name='tm' value='([^']+)'")
            tm_hash = get_match(data, "name='tm_hash' value='([^']+)'")
            post = "tm=" + tm + "&tm_hash=" + tm_hash
            data = scrapertools.cache_page(download_link,
                                           headers=headers,
                                           post=post)
            logger.info(data)
        else:
            logger.info("[wupload.py] no encontrado tiempo de espera")

        # Detecta captcha
        patron = "Recaptcha\.create"
        matches = re.compile(patron).findall(data)
        if len(matches) > 0:
            logger.info("[wupload.py] est� pidiendo el captcha")
            recaptcha_key = get_match(data, 'Recaptcha\.create\("([^"]+)"')
            logger.info("[wupload.py] recaptcha_key=" + recaptcha_key)

            data_recaptcha = scrapertools.cache_page(
                "http://www.google.com/recaptcha/api/challenge?k=" +
                recaptcha_key)
            patron = "challenge.*?'([^']+)'"
            challenges = re.compile(patron, re.S).findall(data_recaptcha)
            if (len(challenges) > 0):
                challenge = challenges[0]
                image = "http://www.google.com/recaptcha/api/image?c=" + challenge

                #CAPTCHA
                exec "import seriesly.captcha as plugin"
                tbd = plugin.Keyboard("", "", image)
                tbd.doModal()
                confirmed = tbd.isConfirmed()
                if (confirmed):
                    tecleado = tbd.getText()

                #logger.info("")
                #tecleado = raw_input('Grab ' + image + ' : ')
            post = "recaptcha_challenge_field=%s&recaptcha_response_field=%s" % (
                challenge, tecleado.replace(" ", "+"))
            data = scrapertools.cache_page(download_link,
                                           headers=headers,
                                           post=post)
            logger.info(data)

        else:
            logger.info("[wupload.py] no encontrado captcha")

        # Detecta captcha
        patron = '<p><a href="(http\:\/\/.*?wupload[^"]+)">'
        matches = re.compile(patron).findall(data)
        if len(matches) > 0:
            final_url = matches[0]
            '''
            'GET /download/2616019677/4f0391ba/9bed4add/0/1/580dec58/3317afa30905a31794733c6a32da1987719292ff
            HTTP/1.1
            Accept-Language: es-es,es;q=0.8,en-us;q=0.5,en;q=0.3
            Accept-Encoding: gzip, deflate
            Connection: close\r\nAccept: */*\r\nUser-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12
            Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
            Host: s107.wupload.es
            Referer: http://www.wupload.es/file/2616019677
            Pragma: no-cache
            Cache-Control: no-cache
            Content-Type: application/x-www-form-urlencoded; charset=UTF-8
            00:39:39 T:2956623872  NOTICE: reply:
            00:39:39 T:2956623872  NOTICE: 'HTTP/1.1 200 OK\r\n'
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Server: nginx
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Date: Tue, 03 Jan 2012 23:39:39 GMT
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Content-Type: "application/octet-stream"
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Content-Length: 230336429
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Last-Modified: Tue, 06 Sep 2011 01:07:26 GMT
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Connection: close
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Set-Cookie: dlc=1; expires=Thu, 02-Feb-2012 23:39:39 GMT; path=/; domain=.wupload.es
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: : attachment; filename="BNS609.mp4"
            '''
            logger.info("[wupload.py] link descarga " + final_url)

            return [[
                "(Free)", final_url + '|' + 'Referer=' +
                urllib.quote(page_url) + "&Content-Type=" + urllib.quote(
                    "application/x-www-form-urlencoded; charset=UTF-8") +
                "&Cookie=" + urllib.quote("lastUrlLinkId=" + video_id)
            ]]
        else:
            logger.info("[wupload.py] no detectado link descarga")
Exemple #7
0
def get_free_url(page_url):
    location = scrapertools.get_header_from_response(page_url,header_to_get="location")
    if location!="":
        page_url = location

    logger.info("[wupload.py] location=%s" % page_url)

    video_id = extract_id(page_url)
    logger.info("[wupload.py] video_id=%s" % video_id)

    data = scrapertools.cache_page(url=page_url)
    patron = 'href="(.*?start=1.*?)"'
    matches = re.compile(patron).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)==0:
        logger.error("[wupload.py] No encuentra el enlace Free")
        return []
    
    # Obtiene link de descarga free
    download_link = matches[0]
    if not download_link.startswith("http://"):
        download_link = urlparse.urljoin(page_url,download_link)

    logger.info("[wupload.py] Link descarga: "+ download_link)

    # Descarga el enlace
    headers = []
    headers.append( ["X-Requested-With", "XMLHttpRequest"] )
    headers.append( ["Referer"         , page_url ])
    headers.append( ["User-Agent"      , "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12" ])
    headers.append( ["Content-Type"    , "application/x-www-form-urlencoded; charset=UTF-8"])
    headers.append( ["Accept-Encoding" , "gzip, deflate"])
    headers.append( ["Accept","*/*"])
    headers.append( ["Accept-Language","es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
    headers.append( ["Accept-Charset","ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
    headers.append( ["Connection","keep-alive"])
    headers.append( ["Pragma","no-cache"])
    headers.append( ["Cache-Control","no-cache"])

    data = scrapertools.cache_page( download_link , headers=headers, post="" )
    logger.info(data)
    
    while True:
        # Detecta el tiempo de espera
        patron = "countDownDelay = (\d+)"
        matches = re.compile(patron).findall(data)
        if len(matches)>0:
            tiempo_espera = int(matches[0])
            logger.info("[wupload.py] tiempo de espera %d segundos" % tiempo_espera)
            
            #import time
            #time.sleep(tiempo_espera)
            from platformcode.xbmc import xbmctools
            resultado = xbmctools.handle_wait(tiempo_espera+5,"Progreso","Conectando con servidor Wupload (Free)")
            if resultado == False:
               break

            tm = get_match(data,"name='tm' value='([^']+)'")
            tm_hash = get_match(data,"name='tm_hash' value='([^']+)'")
            post = "tm=" + tm + "&tm_hash=" + tm_hash
            data = scrapertools.cache_page( download_link , headers=headers, post=post )
            logger.info(data)
        else:
            logger.info("[wupload.py] no encontrado tiempo de espera")
    
        # Detecta captcha
        patron = "Recaptcha\.create"
        matches = re.compile(patron).findall(data)
        if len(matches)>0:
            logger.info("[wupload.py] est� pidiendo el captcha")
            recaptcha_key = get_match( data , 'Recaptcha\.create\("([^"]+)"')
            logger.info("[wupload.py] recaptcha_key="+recaptcha_key)

            data_recaptcha = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+recaptcha_key)
            patron="challenge.*?'([^']+)'"
            challenges = re.compile(patron, re.S).findall(data_recaptcha)
            if(len(challenges)>0):
                challenge = challenges[0]
                image = "http://www.google.com/recaptcha/api/image?c="+challenge
                
                #CAPTCHA
                exec "import seriesly.captcha as plugin"
                tbd = plugin.Keyboard("","",image)
                tbd.doModal()
                confirmed = tbd.isConfirmed()
                if (confirmed):
                   tecleado = tbd.getText()
                
                #logger.info("")
                #tecleado = raw_input('Grab ' + image + ' : ')
            post = "recaptcha_challenge_field=%s&recaptcha_response_field=%s" % (challenge,tecleado.replace(" ","+"))
            data = scrapertools.cache_page( download_link , headers=headers, post=post )
            logger.info(data)

        else:
            logger.info("[wupload.py] no encontrado captcha")
    
        # Detecta captcha
        patron = '<p><a href="(http\:\/\/.*?wupload[^"]+)">'
        matches = re.compile(patron).findall(data)
        if len(matches)>0:
            final_url = matches[0]
            '''
            'GET /download/2616019677/4f0391ba/9bed4add/0/1/580dec58/3317afa30905a31794733c6a32da1987719292ff
            HTTP/1.1
            Accept-Language: es-es,es;q=0.8,en-us;q=0.5,en;q=0.3
            Accept-Encoding: gzip, deflate
            Connection: close\r\nAccept: */*\r\nUser-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12
            Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
            Host: s107.wupload.es
            Referer: http://www.wupload.es/file/2616019677
            Pragma: no-cache
            Cache-Control: no-cache
            Content-Type: application/x-www-form-urlencoded; charset=UTF-8
            00:39:39 T:2956623872  NOTICE: reply:
            00:39:39 T:2956623872  NOTICE: 'HTTP/1.1 200 OK\r\n'
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Server: nginx
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Date: Tue, 03 Jan 2012 23:39:39 GMT
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Content-Type: "application/octet-stream"
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Content-Length: 230336429
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Last-Modified: Tue, 06 Sep 2011 01:07:26 GMT
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Connection: close
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: Set-Cookie: dlc=1; expires=Thu, 02-Feb-2012 23:39:39 GMT; path=/; domain=.wupload.es
            00:39:39 T:2956623872  NOTICE: header:
            00:39:39 T:2956623872  NOTICE: : attachment; filename="BNS609.mp4"
            '''
            logger.info("[wupload.py] link descarga " + final_url)
            
            return [["(Free)",final_url + '|' + 'Referer=' + urllib.quote(page_url) + "&Content-Type=" + urllib.quote("application/x-www-form-urlencoded; charset=UTF-8")+"&Cookie="+urllib.quote("lastUrlLinkId="+video_id)]]
        else:
            logger.info("[wupload.py] no detectado link descarga")
Exemple #8
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[streamcloud.py] url=" + page_url)

    # Lo pide una vez
    headers = [[
        'User-Agent',
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'
    ]]
    data = scrapertools.cache_page(page_url, headers=headers)
    #logger.info("data="+data)

    logger.info("[streamcloud.py] Esperando 10 segundos...")

    try:
        from platformcode.xbmc import xbmctools
        xbmctools.handle_wait(12, "streamcloud", '')
    except:
        import time
        time.sleep(12)

    logger.info("[streamcloud.py] Espera concluida")

    try:
        media_url = scrapertools.get_match(data,
                                           'file\: "([^"]+)"') + "?start=0"
    except:
        op = scrapertools.get_match(
            data, '<input type="hidden" name="op" value="([^"]+)"')
        usr_login = ""
        id = scrapertools.get_match(
            data, '<input type="hidden" name="id" value="([^"]+)"')
        fname = scrapertools.get_match(
            data, '<input type="hidden" name="fname" value="([^"]+)"')
        referer = scrapertools.get_match(
            data, '<input type="hidden" name="referer" value="([^"]*)"')
        hashstring = scrapertools.get_match(
            data, '<input type="hidden" name="hash" value="([^"]*)"')
        imhuman = scrapertools.get_match(
            data,
            '<input type="submit" name="imhuman".*?value="([^"]+)">').replace(
                " ", "+")

        post = "op=" + op + "&usr_login="******"&id=" + id + "&fname=" + fname + "&referer=" + referer + "&hash=" + hashstring + "&imhuman=" + imhuman
        headers.append(["Referer", page_url])
        data = scrapertools.cache_page(page_url, post=post, headers=headers)

        if 'id="justanotice"' in data:
            logger.info("[streamcloud.py] data=" + data)
            logger.info("[streamcloud.py] Ha saltado el detector de adblock")
            return []

        # Extrae la URL
        media_url = scrapertools.get_match(data,
                                           'file\: "([^"]+)"') + "?start=0"

    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(media_url)[-4:] + " [streamcloud]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("[streamcloud.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls