コード例 #1
0
def anti_cloudflare(url):
    result = scrapertools.cache_page(url, headers=headers)
    try:
        jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0]
        init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0]
        builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0]
        decrypt_val = parseJSString(init)
        lines = builder.split(';')

        for line in lines:
            if len(line) > 0 and '=' in line:
                sections = line.split('=')
                line_val = parseJSString(sections[1])
                decrypt_val = int(eval(str(decrypt_val) + sections[0][-1] + str(line_val)))

        urlsplit = urlparse.urlsplit(url)
        h = urlsplit.netloc
        s = urlsplit.scheme

        answer = decrypt_val + len(h)

        query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer)

        if 'type="hidden" name="pass"' in result:
            passval = re.compile('name="pass" value="(.*?)"').findall(result)[0]
            query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (
            s + '://' + h, urllib.quote_plus(passval), jschl, answer)
            time.sleep(5)

        scrapertools.get_headers_from_response(query, headers=headers)
        return scrapertools.cache_page(url, headers=headers)
    except:
        return result
コード例 #2
0
ファイル: googlevideo.py プロジェクト: Dreancala/Prova
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[googlevideo.py] get_video_url(page_url='%s')" % page_url)

    data, video_urls = _parse_google(page_url)

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:46.0) Gecko/20100101 Firefox/46.0'
    }
    if data is not None:
        res_headers = dict(scrapertools.get_headers_from_response(page_url))
        if 'set-cookie' in res_headers:
            headers['Cookie'] = res_headers['set-cookie']
    headers = urllib.urlencode(headers)

    if not video_urls:
        video_urls.append(['Unknown Quality', page_url])

    for video_url in video_urls:
        if ('redirector.' in video_url[1]) or ('googleusercontent'
                                               in video_url[1]):
            video_url[1] = urllib2.urlopen(video_url[1]).geturl()

        if 'plugin://' not in video_url[1]:
            video_url[1] += '|' + headers

    for video_url in video_urls:
        logger.info("[googlevideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #3
0
def anti_cloudflare(url):
    try:
        resp_headers = scrapertools.get_headers_from_response(url,
                                                              headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers
コード例 #4
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("[googlevideo.py] get_video_url(page_url='%s')" % page_url)

    data, video_urls = _parse_google(page_url)

    headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:46.0) Gecko/20100101 Firefox/46.0'}
    if data is not None:
        res_headers = dict(scrapertools.get_headers_from_response(page_url))
        if 'set-cookie' in res_headers:
            headers['Cookie'] = res_headers['set-cookie']
    headers = urllib.urlencode(headers)

    if not video_urls:
        video_urls.append(['Unknown Quality', page_url])

    for video_url in video_urls:
        if ('redirector.' in video_url[1]) or ('googleusercontent' in video_url[1]):
            video_url[1] = urllib2.urlopen(video_url[1]).geturl()

        if 'plugin://' not in video_url[1]:
            video_url[1] += '|' + headers

    for video_url in video_urls:
        logger.info("[googlevideo.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #5
0
def decode_hidden(text, number):
    logger.info(text)
    text = re.sub(r'(\&|\')(gt|lt|amp|anp|amq)(9|;|:)', r'&\2;', text)
    text = text.replace("&anp;", "&").replace(
        "&amq;", "&")  #.replace("&quot9", '"').replace("&lt9", '<') \
    # .replace("&amp9", '&').replace("&gt;", ">").replace("&lt;", "<") \
    # .replace("'lt;", "<").replace("'amp;", "&").replace("&lt:", "<") \
    # .replace("?", "notdecode")
    logger.info(text)
    text = scrapertools.decodeHtmlentities(text)

    logger.info(text)
    s = []
    for char in text:
        j = ord(char)
        s.append(chr(33 + ((j + 14) % 94)))

    temp = "".join(s)
    text_decode = temp[0:-number[0]] + chr(ord(temp[-number[1]]) +
                                           number[2]) + temp[len(temp) -
                                                             number[3] + 1:]
    videourl = "https://openload.co/stream/%s?mime=true" % text_decode
    resp_headers = scrapertools.get_headers_from_response(videourl)
    extension = ""
    for head, value in resp_headers:
        if head == "location":
            videourl = value.replace("https", "http").replace("?mime=true", "")
        elif head == "content-type":
            logger.info("extension " + value)
            extension = value

    return videourl, extension
コード例 #6
0
def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers
コード例 #7
0
def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = {v[0]: v[1] for v in resp_headers}
    except urllib2.HTTPError, e:
        resp_headers = e.headers
コード例 #8
0
ファイル: wupload.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[wupload.py] get_video_url( page_url='%s' , user='******' , password='******', video_password=%s)" % (page_url , user , "**************************"[0:len(password)] , video_password) )

    if not premium:
        #return get_free_url(page_url)
        logger.info("[wupload.py] free no soportado")
    else:
        # Hace el login y consigue la cookie
        #login_url = "http://www.wupload.es/account/login"
        login_url = "http://www.wupload.com/account/login"
        post = "email="+user.replace("@","%40")+"&redirect=%2F&password="******"&rememberMe=1"
        location = scrapertools.get_header_from_response( url=login_url, header_to_get="location", post=post)
        logger.info("location="+location)
        
        if location!="":
            login_url = location

        data = scrapertools.cache_page(url=login_url, post=post)

        # Obtiene la URL final
        headers = scrapertools.get_headers_from_response(page_url)
        location1 = ""
        for header in headers:
            logger.info("header1="+str(header))
            
            if header[0]=="location":
                location1 = header[1]
                logger.info("location1="+str(header))

        # Obtiene la URL final
        headers = scrapertools.get_headers_from_response(location1)
        location2 = ""
        content_disposition = ""
        for header in headers:
            logger.info("header2="+str(header))
            
            if header[0]=="location":
                location2 = header[1]
    
        location = location2
        if location=="":
            location = location1

        return [ ["(Premium) [wupload]",location + "|" + "User-Agent="+urllib.quote("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12") ] ]

    return []
コード例 #9
0
def anti_cloudflare(url, location=False):
    # global headers
    respuesta = ""
    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
        if resp_headers.has_key('location'): respuesta = resp_headers['location']
    except urllib2.HTTPError, e:
        resp_headers = e.headers
コード例 #10
0
def anti_cloudflare(url, location=False):
    # global headers
    respuesta = ""
    try:
        resp_headers = scrapertools.get_headers_from_response(url,
                                                              headers=headers)
        resp_headers = dict(resp_headers)
        if resp_headers.has_key('location'):
            respuesta = resp_headers['location']
    except urllib2.HTTPError, e:
        resp_headers = e.headers
コード例 #11
0
def findvideos(item):
    logger.info("[divxatope.py] findvideos")
    itemlist = []

    # Averigua el PHPSESSID
    login = LOGIN.replace("@", "%40")
    headers = scrapertools.get_headers_from_response(
        "http://www.divxatope.com/index.php",
        post="login=%s&password=%s&Submit=ENTRAR" % (login, PASSWORD))
    logger.info("headers=" + str(headers))
    request_headers = [[
        "User-Agent",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:14.0) Gecko/20100101 Firefox/14.0.1"
    ]]
    for header in headers:
        if header[0] == "set-cookie":
            #['set-cookie', 'PHPSESSID=d514b41a42fec11a7cae8bdb07fcef58; path=/']
            #Cookie: PHPSESSID=82336dad3c64c9c4110e793aa54abc4a;
            cookie_value = scrapertools.get_match(header[1],
                                                  "PHPSESSID\=([a-z0-9]+)\;")
            request_headers.append(["Cookie", "PHPSESSID=" + cookie_value])

    # Descarga la página
    data = scrapertools.cache_page(item.url, headers=request_headers)

    #logger.info("data="+data)
    #href ="redirect.php?file=31351&url=http://www.divxatope.com/uploads/torrents/attachments/5730_iceberg-
    link = scrapertools.get_match(
        data, 'redirect.php\?file=\d+\&url=(.*?\.torrent)')
    itemlist.append(
        Item(channel=__channel__,
             action="play",
             server="torrent",
             title=item.title,
             fulltitle=item.title,
             url=link,
             thumbnail=item.thumbnail,
             plot=item.plot,
             folder=False))

    # Ahora busca los vídeos
    itemlist.extend(servertools.find_video_items(data=data))

    for videoitem in itemlist:
        videoitem.channel = __channel__

        fichero = scrapertools.get_filename_from_url(videoitem.url)
        partes = fichero.split("/")
        titulo = partes[len(partes) - 1]
        videoitem.title = titulo + " - [" + videoitem.server + "]"
        videoitem.fulltitle = item.fulltitle

    return itemlist
コード例 #12
0
def anti_cloudflare(url):
    result = scrapertools.cache_page(url, headers=headers)
    try:
        jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(
            result)[0]
        init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(
            result)[0]
        builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(
            result)[0]
        decrypt_val = parseJSString(init)
        lines = builder.split(';')

        for line in lines:
            if len(line) > 0 and '=' in line:
                sections = line.split('=')
                line_val = parseJSString(sections[1])
                decrypt_val = int(
                    eval(str(decrypt_val) + sections[0][-1] + str(line_val)))

        urlsplit = urlparse.urlsplit(url)
        h = urlsplit.netloc
        s = urlsplit.scheme

        answer = decrypt_val + len(h)

        query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (
            url, jschl, answer)

        if 'type="hidden" name="pass"' in result:
            passval = re.compile('name="pass" value="(.*?)"').findall(
                result)[0]
            query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (
                s + '://' + h, urllib.quote_plus(passval), jschl, answer)
            time.sleep(5)

        scrapertools.get_headers_from_response(query, headers=headers)
        return scrapertools.cache_page(url, headers=headers)
    except:
        return result
コード例 #13
0
def links(item):

    itemlist = []
    try:
        count = 0
        exit = False
        urlvideo = ""
        while not exit and count < 5 and urlvideo == "":
            # A veces da error al intentar acceder
            try:
                page = scrapertools.get_headers_from_response(item.extra)
                logger.info(page)
                for s in page:
                    if "location" in s:
                        urlvideo = '"' + s[1] + '"'
                exit = True
            except:
                import traceback

                logger.info(traceback.format_exc())
                count = count + 1
                urlvideo = item.url

        logger.info("urlvideo=" + urlvideo)
        for video in servertools.findvideos(urlvideo):
            # scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0]
            scrapedtitle = scrapertools.htmlclean(video[0])
            scrapedurl = video[1]
            server = video[2]
            itemlist.append(
                Item(
                    channel=__channel__,
                    action="play",
                    title=scrapedtitle,
                    url=scrapedurl,
                    thumbnail=item.thumbnail,
                    plot="",
                    server=server,
                    extra="",
                    category=item.category,
                    fanart=item.thumbnail,
                    folder=False,
                )
            )
    except:
        import sys

        for line in sys.exc_info():
            logger.error("%s" % line)

    return itemlist
コード例 #14
0
ファイル: seriesly.py プロジェクト: Bycacha/BYCACHA
def links(item):

    itemlist = []
    try:
        count = 0
        exit = False
        urlvideo = ""
        while (not exit and count < 5 and urlvideo == ""):
            #A veces da error al intentar acceder
            try:
                page = scrapertools.get_headers_from_response(item.extra)
                logger.info(page)
                for s in page:
                    if "location" in s:
                        urlvideo = "\"" + s[1] + "\""
                exit = True
            except:
                import traceback
                logger.info(traceback.format_exc())
                count = count + 1
                urlvideo = item.url

        logger.info("urlvideo=" + urlvideo)
        for video in servertools.findvideos(urlvideo):
            #scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0]
            scrapedtitle = scrapertools.htmlclean(video[0])
            scrapedurl = video[1]
            server = video[2]
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=item.thumbnail,
                     plot="",
                     server=server,
                     extra="",
                     category=item.category,
                     fanart=item.thumbnail,
                     folder=False))
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)

    return itemlist
コード例 #15
0
def findvideos(item):
    logger.info("[divxatope.py] findvideos")
    itemlist=[]

    # Averigua el PHPSESSID
    login = LOGIN.replace("@","%40")
    headers = scrapertools.get_headers_from_response("http://www.divxatope.com/index.php",post="login=%s&password=%s&Submit=ENTRAR" % (login,PASSWORD))
    logger.info("headers="+str(headers))
    request_headers=[ ["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:14.0) Gecko/20100101 Firefox/14.0.1"] ]
    for header in headers:
        if header[0]=="set-cookie":
            #['set-cookie', 'PHPSESSID=d514b41a42fec11a7cae8bdb07fcef58; path=/']
            #Cookie: PHPSESSID=82336dad3c64c9c4110e793aa54abc4a;
            cookie_value = scrapertools.get_match(header[1],"PHPSESSID\=([a-z0-9]+)\;")
            request_headers.append(["Cookie","PHPSESSID="+cookie_value])

    # Descarga la página
    data = scrapertools.cache_page(item.url,headers=request_headers)

    #logger.info("data="+data)
    #href ="redirect.php?file=31351&url=http://www.divxatope.com/uploads/torrents/attachments/5730_iceberg-
    link = scrapertools.get_match(data,'redirect.php\?file=\d+\&url=(.*?\.torrent)')
    itemlist.append( Item(channel=__channel__, action="play", server="torrent", title=item.title , fulltitle = item.title, url=link , thumbnail=item.thumbnail , plot=item.plot , folder=False) )

    # Ahora busca los vídeos
    itemlist.extend(servertools.find_video_items(data=data))

    for videoitem in itemlist:
        videoitem.channel = __channel__

        fichero = scrapertools.get_filename_from_url(videoitem.url)
        partes = fichero.split("/")
        titulo = partes[ len(partes)-1 ]
        videoitem.title = titulo + " - [" + videoitem.server+"]"
        videoitem.fulltitle = item.fulltitle

    return itemlist
コード例 #16
0
        var1 = re.sub(par2[ii], par3[ii], var1)

    var1 = re.sub("%26", "&", var1)
    var1 = re.sub("%3B", ";", var1)
    return var1.replace('<!--?--><?', '<!--?-->')


def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = {v[0]: v[1] for v in resp_headers}
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))

        # dict_headers = {v[0]: v[1] for v in headers}
        # dict_headers['cookie'] = resp_headers['set-cookie'].split(';')[0]

        # resp_headers = scrapertools.get_headers_from_response(sito + resp_headers['refresh'][7:], headers=[[k, v] for k, v in dict_headers.iteritems()])
        scrapertools.get_headers_from_response(sito + resp_headers['refresh'][7:], headers=headers)
        # resp_headers = {v[0]: v[1] for v in resp_headers}

        # dict_headers['cookie'] = dict_headers['cookie'] + resp_headers['set-cookie'].split(';')[0]
        # headers = [[k, v] for k, v in dict_headers.iteritems()]

    return scrapertools.cache_page(url, headers=headers)
コード例 #17
0
    return itemlist

def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))

        scrapertools.get_headers_from_response(host + '/' + resp_headers['refresh'][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)

def info_tv(title, thumbnail):
    logger.info("streamondemand.guardaserie info")
    try:
        from core.tmdb import Tmdb
        oTmdb= Tmdb(texto_buscado=title, tipo= "tv", include_adult="true", idioma_busqueda="it")
        count = 0
        if oTmdb.total_results > 0:
            #Mientras el thumbnail no coincida con el del resultado de la búsqueda, pasa al siguiente resultado
            while oTmdb.get_poster(size="w185") != thumbnail:
                count += 1
                oTmdb.load_resultado(index_resultado=count)
                if count == oTmdb.total_results : break
コード例 #18
0
    try:
        resp_headers = scrapertools.get_headers_from_response(url,
                                                              headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))

        urlsplit = urlparse.urlsplit(url)
        h = urlsplit.netloc
        s = urlsplit.scheme
        scrapertools.get_headers_from_response(s + '://' + h + "/" +
                                               resp_headers['refresh'][7:],
                                               headers=headers)

    return scrapertools.cache_page(url, headers=headers)


def info_tv(title):
    logger.info("streamondemand.vediserie info")
    try:
        from core.tmdb import Tmdb
        oTmdb = Tmdb(texto_buscado=title,
                     tipo="tv",
                     include_adult="true",
                     idioma_busqueda="it")
        count = 0
        if oTmdb.total_results > 0:
コード例 #19
0
ファイル: tvvip.py プロジェクト: codixor/maximumTv
    return itemlist

def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))

        scrapertools.get_headers_from_response(host + '/' + resp_headers['refresh'][7:], headers=headers)

    try:
        data = scrapertools.downloadpageGzip(url)
        cloudflare = False
    except:
        data = scrapertools.cache_page(url, headers=headers)
        cloudflare = True

    return data, cloudflare

def get_cookie_value(extension=""):
    cookies = os.path.join( config.get_data_path(), 'cookies.dat' )
    cookiedatafile = open(cookies,'r')
    cookiedata = cookiedatafile.read()
    cookiedatafile.close();
コード例 #20
0
ファイル: wupload.py プロジェクト: conejoninja/xbmc-seriesly
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "[wupload.py] get_video_url( page_url='%s' , user='******' , password='******', video_password=%s)"
        % (page_url, user, "**************************"[0:len(password)],
           video_password))

    if not premium:
        #return get_free_url(page_url)
        logger.info("[wupload.py] free no soportado")
    else:
        # Hace el login y consigue la cookie
        #login_url = "http://www.wupload.es/account/login"
        login_url = "http://www.wupload.com/account/login"
        post = "email=" + user.replace(
            "@",
            "%40") + "&redirect=%2F&password="******"&rememberMe=1"
        location = scrapertools.get_header_from_response(
            url=login_url, header_to_get="location", post=post)
        logger.info("location=" + location)

        if location != "":
            login_url = location

        data = scrapertools.cache_page(url=login_url, post=post)

        # Obtiene la URL final
        headers = scrapertools.get_headers_from_response(page_url)
        location1 = ""
        for header in headers:
            logger.info("header1=" + str(header))

            if header[0] == "location":
                location1 = header[1]
                logger.info("location1=" + str(header))

        # Obtiene la URL final
        headers = scrapertools.get_headers_from_response(location1)
        location2 = ""
        content_disposition = ""
        for header in headers:
            logger.info("header2=" + str(header))

            if header[0] == "location":
                location2 = header[1]

        location = location2
        if location == "":
            location = location1

        return [[
            "(Premium) [wupload]",
            location + "|" + "User-Agent=" + urllib.quote(
                "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"
            )
        ]]

    return []
コード例 #21
0
ファイル: openload.py プロジェクト: Dragonfir3/pelisalacarta
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(
            data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(
            text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(
            data, 'id="' + var_r + '[^"]*">([^<]+)<')

        videourl = ""
        text_decode = ""
        for encode in var_encodes:
            try:
                v1 = int(encode[0:3])
                v2 = int(encode[3:5])
                index = 5
                while index < len(encode):
                    text_decode += chr(
                        int(encode[index:index + 3]) + v1 -
                        v2 * int(encode[index + 3:index + 3 + 2]))
                    index += 5
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = scrapertools.get_headers_from_response(videourl)
            extension = ""
            for head, value in resp_headers:
                if head == "location":
                    videourl = value.replace("https",
                                             "http").replace("?mime=true", "")
                elif head == "content-type":
                    extension = value
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload " + traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(
                data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([
            extension + " [Openload] ", videourl + header_down + extension, 0,
            subtitle
        ])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
コード例 #22
0
def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))

        urlsplit = urlparse.urlsplit(url)
        h = urlsplit.netloc
        s = urlsplit.scheme
        scrapertools.get_headers_from_response(s + '://' + h + "/" + resp_headers['refresh'][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)


def HomePage(item):
    import xbmc
    xbmc.executebuiltin("ReplaceWindow(10024,plugin://plugin.video.streamondemand)")


def info(title):
    logger.info("streamondemand.cineblog01 info")
    try:
        from core.tmdb import Tmdb
        oTmdb = Tmdb(texto_buscado=title, tipo="movie", include_adult="false", idioma_busqueda="it")
        if oTmdb.total_results > 0:
コード例 #23
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(data, 'id="'+var_r+'[^"]*">([^<]+)<')

        videourl = ""
        text_decode = ""
        for encode in var_encodes:
            try:
                v1 = int(encode[0:3])
                v2 = int(encode[3:5])
                index = 5
                while index < len(encode):
                    text_decode += chr(int(encode[index:index+3]) + v1 - v2 * int(encode[index+3:index+3+2]))
                    index += 5
            except:
                continue
         
            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = scrapertools.get_headers_from_response(videourl)
            extension = ""
            for head, value in resp_headers:
                if head == "location":
                    videourl = value.replace("https", "http").replace("?mime=true", "")
                elif head == "content-type":
                    extension = value
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("streamondemand.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
            extension = "."+extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #24
0
    # Descarga la página
    data = anti_cloudflare(item.url)

    itemlist = servertools.find_video_items(data=data)

    for videoitem in itemlist:
        videoitem.title = "".join([item.title, "[COLOR green][B]" + videoitem.title + "[/B][/COLOR]"])
        videoitem.fulltitle = item.fulltitle
        videoitem.show = item.show
        videoitem.thumbnail = item.thumbnail
        videoitem.channel = __channel__

    return itemlist


def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if "refresh" in resp_headers:
        time.sleep(int(resp_headers["refresh"][:1]))

        scrapertools.get_headers_from_response(host + "/" + resp_headers["refresh"][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)