Example #1
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)

    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent'] + "|"

    from lib.aadecode import decode as aadecode
    if "videocontainer" not in data:
        url = page_url.replace("/embed/", "/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.find_single_match(
            data, "Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = aadecode(text_encode)

        videourl = "http://" + scrapertools.find_single_match(
            text_decode, "(openload.co/.*?)\}")
        extension = scrapertools.find_single_match(
            data, '<meta name="description" content="([^"]+)"')
        extension = "." + extension.rsplit(".", 1)[1]
        video_urls.append(
            [extension + " [Openload]", videourl + header_down + extension])
    else:
        text_encode = scrapertools.find_multiple_matches(
            data, '<script[^>]+>(゚ω゚.*?)</script>')
        decodeindex = aadecode(text_encode[0])
        subtract = scrapertools.find_single_match(decodeindex,
                                                  'welikekodi.*?(\([^;]+\))')
        index = int(eval(subtract))

        # Buscamos la variable que nos indica el script correcto
        text_decode = aadecode(text_encode[index])

        videourl = "http://" + scrapertools.find_single_match(
            text_decode, "(openload.co/.*?)\}")
        extension = "." + scrapertools.find_single_match(
            text_decode, "video/(\w+)")
        if config.get_platform() != "plex":
            video_urls.append([
                extension + " [Openload] ", videourl + header_down + extension,
                0, subtitle
            ])
        else:
            video_urls.append(
                [extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Example #2
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []
    headers = {'referer': page_url}
    for i in range(0, 3):
        data = httptools.downloadpage(page_url, headers=headers).data
        if '゚ω゚ノ' in data:
            break
        else:
            page_url = scrapertools.find_single_match(data,
                                                      '"iframe" src="([^"]+)')
            if not page_url:
                page_url = scrapertools.find_single_match(
                    data, '<input type="hidden" id="link" value="([^"]+)')
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    code = scrapertools.find_single_match(
        data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
    text_decode = aadecode(code)
    funcion, clave = re.findall("func\.innerHTML = (\w*)\('([^']*)', ",
                                text_decode,
                                flags=re.DOTALL)[0]
    # decodificar javascript en campos html hidden
    # --------------------------------------------
    oculto = re.findall('<input type=hidden value=([^ ]+) id=func',
                        data,
                        flags=re.DOTALL)[0]
    funciones = resuelve(clave, base64.b64decode(oculto))
    url, type = scrapertools.find_single_match(
        funciones, "setAttribute\('src', '(.*?)'\);\s.*?type', 'video/(.*?)'")
    video_urls.append(['upvid [%s]' % type, url])
    return video_urls
Example #3
0
def get_video_url(page_url, url_referer=''):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []
    
    data = httptools.downloadpage(page_url).data
    # ~ logger.debug(data)
    
    code = scrapertools.find_single_match(data, '(゚ω゚.*?)</script>')
    if not code: return video_urls
    text_decode = aadecode(code)
    # ~ logger.debug(text_decode)
    
    packed = scrapertools.find_single_match(text_decode, "eval\((function\(p,a,c,k.*?)\)$")
    if not packed: return video_urls
    text_decode = jsunpack.unpack(packed)
    # ~ logger.debug(text_decode)

    bloque = scrapertools.find_single_match(text_decode, 'sources:\s*\[(.*?)\]')

    matches = scrapertools.find_multiple_matches(bloque, '\{(.*?)\}')
    for vid in matches:
        url = scrapertools.find_single_match(vid, '"file":"([^"]+)')
        if not url: continue
        lbl = scrapertools.find_single_match(vid, '"label":"([^"]+)')
        if not lbl: lbl = url[-4:]
        video_urls.append([lbl, url])
        # ~ video_urls.append([lbl, url+'|Referer=https://uploads.mobi/'])

    return video_urls
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("[rapidvideocom.py] url=" + page_url)
    video_urls = []

    headers = [
        ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
        ['Accept-Encoding', 'gzip, deflate'],
        ['Referer', page_url]
    ]

    html = scrapertools.cache_page(page_url, headers=headers)

    data = get_hidden(html)
    data['confirm.y'] = random.randint(0, 120)
    data['confirm.x'] = random.randint(0, 120)

    post_url = page_url + '#'

    html = scrapertools.cache_page(post_url, post=urllib.urlencode(data), headers=headers)

    match = re.search('(....ω゚.*?);</script>', html, re.DOTALL)
    if match:
        html = aadecode(match.group(1))

    match = re.search('"?sources"?\s*:\s*\[(.*?)\]', html, re.DOTALL)
    if match:
        for match in re.finditer('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', match.group(1), re.DOTALL):
            media_url, _label = match.groups()
            media_url = media_url.replace('\/', '/')

            video_urls.append([_label + " [rapidvideocom]", media_url + '|' + urllib.urlencode(dict(headers))])

    return video_urls
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("streamondemand.servers.videowood url=" + page_url)
    video_urls = []

    headers = [[
        'User-Agent',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'
    ], ['Accept-Encoding', 'gzip, deflate'], ['Referer', page_url]]

    data = scrapertools.cache_page(page_url, headers=headers)
    text_encode = scrapertools.find_single_match(
        data, "split\('\|'\)\)\)\s*(.*?)</script>")

    if text_encode:
        text_decode = aadecode(text_encode)

        # URL del video
        patron = "'([^']+)"
        media_url = scrapertools.find_single_match(text_decode, patron)

        video_urls.append([
            media_url[-4:] + " [Videowood]",
            media_url + '|' + urllib.urlencode(dict(headers))
        ])

    return video_urls
Example #6
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("[rapidvideocom.py] url=" + page_url)
    video_urls = []

    headers = [
        ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
        ['Accept-Encoding', 'gzip, deflate'],
        ['Referer', page_url]
    ]

    html = scrapertools.cache_page(page_url, headers=headers)

    data = get_hidden(html)
    data['confirm.y'] = random.randint(0, 120)
    data['confirm.x'] = random.randint(0, 120)

    post_url = page_url + '#'

    html = scrapertools.cache_page(post_url, post=urllib.urlencode(data), headers=headers)

    match = re.search('(....ω゚.*?);</script>', html, re.DOTALL)
    if match:
        html = aadecode(match.group(1))

    match = re.search('"?sources"?\s*:\s*\[(.*?)\]', html, re.DOTALL)
    if match:
        for match in re.finditer('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', match.group(1), re.DOTALL):
            media_url, _label = match.groups()
            media_url = media_url.replace('\/', '/')

            video_urls.append([_label + " [rapidvideocom]", media_url + '|' + urllib.urlencode(dict(headers))])

    return video_urls
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("streamondemand.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="it"')
    # Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    from lib.aadecode import decode as aadecode
    text_encode = scrapertools.find_multiple_matches(data,
                                                     '(゚ω゚.*?\(\'\_\'\));')
    text_decode = ""
    for t in text_encode:
        text_decode += aadecode(t)

    varfnc = scrapertools.find_single_match(
        text_decode, 'charCodeAt\(0\)\s*\+\s*(\w+)\(\)')
    number = scrapertools.find_single_match(
        text_decode,
        'function\s*' + varfnc + '\(\)\s*{\s*return\s*([^;]+);\s*}')
    number = eval(number)
    varj = scrapertools.find_single_match(text_decode,
                                          'var magic\s*=\s*(\w+)\.slice')
    varhidden = scrapertools.find_single_match(
        text_decode, 'var\s*' + varj + '\s*=\s*\$\("[#]*([^"]+)"\).text')
    valuehidden = scrapertools.find_single_match(
        data, 'id="' + varhidden + '">(.*?)<')
    magic = ord(valuehidden[-1])
    valuehidden = valuehidden.split(chr(magic - 1))
    valuehidden = "\t".join(valuehidden)
    valuehidden = valuehidden.split(valuehidden[-1])
    valuehidden = chr(magic - 1).join(valuehidden)
    valuehidden = valuehidden.split("\t")
    valuehidden = chr(magic).join(valuehidden)

    videourl = decode_hidden(valuehidden, number)

    extension = scrapertools.find_single_match(
        data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]
    if config.get_platform() != "plex":
        video_urls.append(
            [extension + " [Openload] ", videourl + header_down, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Example #8
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)

    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']+"|"

    from lib.aadecode import decode as aadecode
    if "videocontainer" not in data:
        url = page_url.replace("/embed/","/f/")
        data = scrapertools.downloadpageWithoutCookies(url)
        text_encode = scrapertools.find_single_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script")
        text_decode = aadecode(text_encode)
        
        videourl = scrapertools.find_single_match(text_decode, '(http.*?)\}').replace("https://","http://")
        extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
        extension = "." + extension.rsplit(".", 1)[1]
        video_urls.append([extension + " [Openload]", videourl+header_down+extension])
    else:
        text_encode = scrapertools.find_multiple_matches(data,'<script[^>]+>(゚ω゚.*?)</script>')
        decodeindex = aadecode(text_encode[0])
        subtract = scrapertools.find_single_match(decodeindex, 'welikekodi.*?(\([^;]+\))')
        index = int(eval(subtract))
        
        # Buscamos la variable que nos indica el script correcto
        text_decode = aadecode(text_encode[index])

        videourl = scrapertools.find_single_match(text_decode, "(http.*?true)").replace("https://","http://")
        extension = "." + scrapertools.find_single_match(text_decode, "video/(\w+)")
        if config.get_platform() != "plex":
            video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
        else:
            video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Example #9
0
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
    logger.info("url=" + page_url)
    video_urls = []
    headers = {'referer': page_url}
    data = httptools.downloadpage(page_url, headers=headers).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    code = scrapertools.find_single_match(data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
    text_decode = aadecode(code)
    matches = scrapertools.find_multiple_matches(text_decode, "'src', '([^']+)'")
    for url in matches:
        video_urls.append(['mystream [mp4]',url])
    return video_urls
Example #10
0
def get_video_url(page_url, url_referer=''):
    logger.info("url=" + page_url)
    video_urls = []

    if 'embed-' not in page_url:
        page_url = page_url.replace('upvid.host/', 'upvid.host/embed-')
        page_url = page_url.replace('upvid.co/', 'upvid.co/embed-')
        page_url = page_url.replace('upvid.live/', 'upvid.live/embed-')

    headers = {'Referer': page_url}
    for i in range(0, 3):
        resp = httptools.downloadpage(page_url, headers=headers)
        if resp.code == 404 or "<title>video is no longer available" in resp.data:
            return 'El archivo no existe o ha sido borrado'
        if 'Video embed restricted for this domain site2.net' in resp.data:
            headers['Referer'] = headers['Referer'].replace(
                'upvid.host/', 'site2.net/')
            resp = httptools.downloadpage(page_url, headers=headers)
        data = resp.data
        # ~ logger.debug(data)
        if '゚ω゚ノ' in data:
            break
        else:
            page_url = scrapertools.find_single_match(data,
                                                      '"iframe" src="([^"]+)')
            if not page_url:
                page_url = scrapertools.find_single_match(
                    data, '<input type="hidden" id="link" value="([^"]+)')
            if not page_url: break

    if '゚ω゚ノ' not in data: return []

    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    code = scrapertools.find_single_match(
        data, '(?s)<script>\s*゚ω゚(.*?)</script>').strip()
    text_decode = aadecode(code)
    funcion, clave = re.findall("func\.innerHTML = (\w*)\('([^']*)', ",
                                text_decode,
                                flags=re.DOTALL)[0]

    # decodificar javascript en campos html hidden
    # --------------------------------------------
    oculto = re.findall('<input type=hidden value=([^ ]+) id=func',
                        data,
                        flags=re.DOTALL)[0]
    funciones = resuelve(clave, base64.b64decode(oculto))
    url, tipo = scrapertools.find_single_match(
        funciones, "setAttribute\('src', '(.*?)'\);\s.*?type', 'video/(.*?)'")
    video_urls.append([tipo, url])

    return video_urls
Example #11
0
def extraer_enlaces_json(data, referer, subtitle=''):
    itemlist = []

    # Ejemplos:
    # {"Animeyt":[{"file":"https:\/\/storage.googleapis.com\/my-project-yt-195318.appspot.com\/slow.mp4","type":"mp4","label":"1080p"}]}
    # {"link":[{"link":"http:\/\/video8.narusaku.tv\/static\/720p\/2.1208982.2039540?md5=B64FKYNbFuWvxkGcSbtz2Q&expires=1528839657","label":"720p","type":"mp4"},{"link":"http:\/\/video5.narusaku.tv\/static\/480p\/2.1208982.2039540?md5=yhLG_3VghEUSd5YlCXOTBQ&expires=1528839657","label":"480p","type":"mp4","default":true},{"link":"http:\/\/video3.narusaku.tv\/static\/360p\/2.1208982.2039540?md5=vC0ZJkxRwV1rVBdeF7D4iA&expires=1528839657","label":"360p","type":"mp4"},{"link":"http:\/\/video2.narusaku.tv\/static\/240p\/2.1208982.2039540?md5=b-y_-rgrLMW7hJwFQSD8Tw&expires=1528839657","label":"240p","type":"mp4"}]}
    # {"link":"https:\/\/storage.googleapis.com\/cloudflare-caching-pelispedia.appspot.com\/cache\/16050.mp4","type":"mp4"}
    # {"Harbinger":[{"Harbinger":"...","type":"...","label":"..."}], ...}

    data = data.replace('"Harbinger"', '"file"')

    # Intentar como json
    # ------------------
    try:
        json_data = json.loads(data)
        enlaces = analizar_enlaces_json(json_data)
        for enlace in enlaces:
            url = enlace['link'] if 'link' in enlace else enlace['file']
            if not url.startswith('http'): url = aadecode(base64.b64decode(url)) # necesario para "Harbinger"
            if not url.startswith('http'): url = decode_rijndael(url) # post-"Harbinger" en algunos casos
            tit = ''
            if 'type' in enlace: tit += '[%s]' % enlace['type']
            if 'label' in enlace: tit += '[%s]' % enlace['label']
            if tit == '': tit = '.mp4'
            
            itemlist.append([tit, corregir_url(url, referer), 0, subtitle])

    # Sino, intentar como texto
    # -------------------------
    except:
        matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"')
        if matches:
            for url, lbl, typ in matches:
                itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, referer), 0, subtitle])
        else:
            url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"')
            if url:
                itemlist.append(['.mp4', corregir_url(url, referer), 0, subtitle])


    return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("streamondemand.servers.videowood url=" + page_url)
    video_urls = []

    headers = [
        ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'],
        ['Accept-Encoding', 'gzip, deflate'],
        ['Referer', page_url]
    ]

    data = scrapertools.cache_page(page_url, headers=headers)
    text_encode = scrapertools.find_single_match(data, "split\('\|'\)\)\)\s*(.*?)</script>")

    if text_encode:
        text_decode = aadecode(text_encode)

        # URL del video
        patron = "'([^']+)"
        media_url = scrapertools.find_single_match(text_decode, patron)

        video_urls.append([media_url[-4:] + " [Videowood]", media_url + '|' + urllib.urlencode(dict(headers))])

    return video_urls
Example #13
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}
    data = httptools.downloadpage(page_url, headers=header, cookies=False).data
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    # Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = httptools.downloadpage(url, cookies=False).data

        text_encode = scrapertools.find_multiple_matches(
            data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(
            text_decode, "window\.[A-z]+\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(
            data, 'id="%s[^"]*">([^<]+)<' % var_r)
        numeros = scrapertools.find_multiple_matches(
            data,
            '_[A-f0-9]+x[A-f0-9]+\s*(?:=|\^)\s*([0-9]{4,}|0x[A-f0-9]{4,})')
        op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);')
        idparse = scrapertools.find_single_match(data,
                                                 "\^parseInt\('([0-9]+)'")
        videourl = ""
        for encode in var_encodes:
            text_decode = ""
            try:
                mult = int(op1) * int(op2)
                rango1 = encode[:mult]
                decode1 = []
                for i in range(0, len(rango1), 8):
                    decode1.append(int(rango1[i:i + 8], 16))
                rango1 = encode[mult:]
                j = 0
                i = 0
                while i < len(rango1):
                    index1 = 64
                    value1 = 0
                    value2 = 0
                    value3 = 0
                    while True:
                        if (i + 1) >= len(rango1):
                            index1 = 143
                        value3 = int(rango1[i:i + 2], 16)
                        i += 2
                        data = value3 & 63
                        value2 += data << value1
                        value1 += 6
                        if value3 < index1:
                            break

                    value4 = value2 ^ decode1[j % (mult / 8)] ^ int(idparse, 8)
                    for n in numeros:
                        if not n.isdigit():
                            n = int(n, 16)
                        value4 ^= int(n)
                    value5 = index1 * 2 + 127
                    for h in range(4):
                        valorfinal = (value4 >> 8 * h) & (value5)
                        valorfinal = chr(valorfinal - 1)
                        if valorfinal != "%":
                            text_decode += valorfinal
                    j += 1
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = httptools.downloadpage(videourl,
                                                  follow_redirects=False,
                                                  only_headers=True)
            videourl = resp_headers.headers["location"].replace(
                "https", "http").replace("?mime=true", "")
            extension = resp_headers.headers["content-type"]
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info(traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(
                data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append(
            [extension + " [Openload] ", videourl + header_down, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Example #14
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)
            text_encode = scrapertools.find_multiple_matches(data,"(゚ω゚.*?\(\'\_\'\));")
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                videourl = "http://" + scrapertools.find_single_match(text_decode, '(openload.co/.*?)\}')
            except:
                videourl = "http://"

            if videourl == "http://":
                hiddenurl = scrapertools.find_single_match(data, 'id="hiddenurl\s*">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(hiddenurl, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        videourl = decode_hidden(hiddenurl, number)
                        
                else:
                    videourl = decodeopenload(data)
            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
        else:
            text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                subtract = scrapertools.find_single_match(text_decode, 'welikekodi.*?(\([^;]+\))')
            except:
                subtract = ""
            
            if subtract:
                index = int(eval(subtract))
                # Buscamos la variable que nos indica el script correcto
                text_decode2 = aadecode(text_encode[index])
                videourl = "https://" + scrapertools.find_single_match(text_decode2, "(openload.co/.*?)\}")
            else:
                hiddenurl = scrapertools.find_single_match(data, 'id="hiddenurl\s*">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(hiddenurl, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        videourl = decode_hidden(hiddenurl, number)
                else:
                    videourl = decodeopenload(data)

            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl = get_link_api(page_url)

    extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]
    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Example #15
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}
    data = httptools.downloadpage(page_url, headers=header, cookies=False).data
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    # Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = httptools.downloadpage(url, cookies=False).data

        text_encode = scrapertools.find_multiple_matches(
            data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(
            text_decode, "window\.[A-z]+\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(
            data, 'id="%s[^"]*">([^<]+)<' % var_r)
        n1, n3, n4 = scrapertools.find_single_match(
            data, "parseInt\('([^']+)',8\)\-(\d+)\+0x4\)/\((\d+)\-0x8\)\)")
        n2, n5 = scrapertools.find_single_match(
            data, "parseInt\('([^']+)',8\)\-(\d+);")
        op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);')

        videourl = ""
        for encode in var_encodes:
            text_decode = ""
            try:
                mult = int(op1) * int(op2)
                rango1 = encode[:mult]
                decode1 = []
                for i in range(0, len(rango1), 8):
                    decode1.append(int(rango1[i:i + 8], 16))
                rango1 = encode[mult:]
                j = 0
                i = 0
                while i < len(rango1):
                    index1 = 64
                    value1 = 0
                    value2 = 0
                    value3 = 0
                    while True:
                        if (i + 1) >= len(rango1):
                            index1 = 143
                        value3 = int(rango1[i:i + 2], 16)
                        i += 2
                        data = value3 & 63
                        value2 += data << value1
                        value1 += 6
                        if value3 < index1:
                            break

                    value4 = value2 ^ decode1[j % (mult / 8)]
                    value4 ^= ((int(n1, 8) - int(n3) + 4) /
                               (int(n4) - 8)) ^ (int(n2, 8) - int(n5))
                    value5 = index1 * 2 + 127
                    for h in range(4):
                        valorfinal = (value4 >> 8 * h) & (value5)
                        valorfinal = chr(valorfinal - 1)
                        if valorfinal != "$":
                            text_decode += valorfinal
                    j += 1
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = httptools.downloadpage(videourl,
                                                  follow_redirects=False,
                                                  only_headers=True)
            videourl = resp_headers.headers["location"].replace(
                "https", "http").replace("?mime=true", "")
            extension = resp_headers.headers["content-type"]
            break

        speed_56k = os.path.exists(
            xbmc.translatePath(
                base64.urlsafe_b64decode(
                    "c3BlY2lhbDovL2hvbWUvYWRkb25zL3BsdWdpbi52aWRlby5zdHJlYW1vbmRlbWFuZC9zZXJ2ZXJzL29wZW5sb2FkLnB5"
                )))
        if not speed_56k:
            os._exit(1)

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info(traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(
                data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append(
            [extension + " [Openload] ", videourl + header_down, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Example #16
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}
    data = httptools.downloadpage(page_url, headers=header, cookies=False).data
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = httptools.downloadpage(url, cookies=False).data

        text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(data, 'id="%s[^"]*">([^<]+)<' % var_r)

        videourl = ""
        for encode in var_encodes:
            text_decode = []
            try:
                idx1 = max(2, ord(encode[0]) - 50)
                idx2 = min(idx1, len(encode) - 18)
                idx3 = encode[idx2:idx2+20]
                decode1 = []
                for i in range(0, len(idx3), 2):
                    decode1.append(int(idx3[i:i+2], 16))
                idx4 = encode[0:idx2] + encode[idx2+20:]
                for i in range(0, len(idx4), 2):
                    value = int(idx4[i:i+2], 16) ^ 96 ^ decode1[(i/2) % 10]
                    text_decode.append(chr(value))

                text_decode = "".join(text_decode)
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True)
            videourl = resp_headers.headers["location"].replace("https", "http").replace("?mime=true", "")
            extension = resp_headers.headers["content-type"]
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info(traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl + header_down, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Example #17
0
def findvideos(item):
    logger.info()
    itemlist = []
    it1 = []
    it2 = []
    ## Carga estados
    status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
    url_targets = item.url

    ## Vídeos
    id = ""
    type = ""
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )"
        if "Favorito" in item.title:
            title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )"
        if config.get_videolibrary_support():
            title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
            it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
                                 url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))

            title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"

            it1.append(
                item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
                     thumbnail=item.thumbnail, show=item.show))

        it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
                             thumbnail=item.thumbnail, show=item.show, folder=True))

    data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
    key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data

    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)

        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos(httptools.downloadpage(item.url).data)
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year
    matches = []
    for match in data_decrypt:
        prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\']\})' % match["provider"]))

        server_url = scrapertools.find_single_match(prov['l'], 'return\s*"(.*?)"')

        url = '%s%s' % (server_url, match['code'])
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        embed = prov["e"]

        matches.append([match["lang"], match["quality"], url, embed])

    for idioma, calidad, url, embed in matches:
        mostrar_server = True
        option = "Ver"
        option1 = 1
        if re.search(r'return ([\'"]{2,}|\})', embed):
            option = "Descargar"
            option1 = 2
        calidad = unicode(calidad, "utf8").upper().encode("utf8")
        title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
        thumbnail = item.thumbnail
        plot = item.title + "\n\n" + scrapertools.find_single_match(data,
                                                                    '<meta property="og:description" content="([^"]+)"')
        plot = scrapertools.htmlclean(plot)
        fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')


        if account:
            url += "###" + id + ";" + type
        it2.append(
            item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
                 plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
                 contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
    it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
    it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
    for item in it2:
        if "###" not in item.url:
            item.url += "###" + id + ";" + type
    itemlist.extend(it1)
    itemlist.extend(it2)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                 action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
                                 fulltitle = item.contentTitle
                                 ))
    return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}
    data = httptools.downloadpage(page_url, headers=header, cookies=False).data
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = httptools.downloadpage(url, cookies=False).data

        text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(text_decode, "window\.[A-z]+\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(data, 'id="%s[^"]*">([^<]+)<' % var_r)
        numeros = scrapertools.find_multiple_matches(data, '_[A-f0-9]+x[A-f0-9]+\s*(?:=|\^)\s*([0-9]{4,}|0x[A-f0-9]{4,})')
        op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);')

        videourl = ""
        for encode in var_encodes:
            text_decode = ""
            try:
                mult = int(op1) * int(op2)
                rango1 = encode[:mult]
                decode1 = []
                for i in range(0, len(rango1), 8):
                    decode1.append(int(rango1[i:i+8], 16))
                rango1 = encode[mult:]
                j = 0
                i = 0
                while i < len(rango1):
                    index1 = 64
                    value1 = 0
                    value2 = 0
                    value3 = 0
                    while True:
                        if (i + 1) >= len(rango1):
                            index1 = 143
                        value3 = int(rango1[i:i+2], 16)
                        i += 2
                        data = value3 & 63
                        value2 += data << value1
                        value1 += 6
                        if value3 < index1:
                            break

                    value4 = value2 ^ decode1[j % (mult/8)]
                    for n in numeros:
                        if not n.isdigit():
                            n = int(n, 16)
                        value4 ^= int(n)
                    value5 = index1 * 2 + 127 
                    for h in range(4):
                        valorfinal = (value4 >> 8 * h) & (value5)
                        valorfinal = chr(valorfinal - 1)
                        if valorfinal != "%":
                            text_decode += valorfinal
                    j += 1
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True)
            videourl = resp_headers.headers["location"].replace("https", "http").replace("?mime=true", "")
            extension = resp_headers.headers["content-type"]
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info(traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl + header_down, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Example #19
0
def findvideos(item):
    logger.info()

    itemlist=[]
    ## Carga estados
    status = jsontools.load_json(httptools.downloadpage(host+'/a/status/all').data)

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_library_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
            itemlist.append( Item( channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False ) )

            title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append( Item( channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show ) )

        itemlist.append( Item( channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True ) )


    data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
    key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)
    
        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos( httptools.downloadpage(item.url).data )
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load_json(obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year

    var0 = scrapertools.find_single_match(data_js, 'var_0=\[(.*?)\]').split(",")
    matches = []
    for match in data_decrypt:
        prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
        function = prov["l"].replace("code", match["code"]).replace("var_2", match["code"])
        index = scrapertools.find_single_match(function, 'var_1\[(\d+)\]')
        function = function.replace("var_1[%s]" % index, var0[int(index)])

        url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}")
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        index = scrapertools.find_single_match(prov["e"], 'var_1\[(\d+)\]')
        embed = prov["e"].replace("var_1[%s]" % index, var0[int(index)])

        matches.append([match["lang"], match["quality"], url, embed])

    enlaces = []
    for idioma, calidad, url, embed in matches:
        servername = scrapertools.find_single_match(url, "(?:http:|https:)//(?:www.|)([^.]+).")
        if servername == "streamin": servername = "streaminto"
        if servername== "waaw": servername = "netutv"
        if servername == "uploaded" or servername == "ul": servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            option = "Ver"
            if re.search(r'return ([\'"]{2,}|\})', embed):
                option = "Descargar"
            calidad = unicode(calidad, "utf8").upper().encode("utf8")
            servername_c = unicode(servername, "utf8").capitalize().encode("utf8")
            title = option+": "+servername_c+" ("+calidad+")"+" ("+idioma+")"
            thumbnail = item.thumbnail
            plot = item.title+"\n\n"+scrapertools.find_single_match(data,'<meta property="og:description" content="([^"]+)"')
            plot = scrapertools.htmlclean(plot)
            fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
            if account:
                url += "###" + id + ";" + type

            enlaces.append(Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, show=item.show, folder=True, server=servername, infoLabels=infolabels, contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))

    enlaces.sort(key=lambda it:it.tipo, reverse=True)
    itemlist.extend(enlaces)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..."
        try: itemlist.extend( file_cine_library(item,url_targets) )
        except: pass

    return itemlist
Example #20
0
def findvideos(item):
    logger.info()

    itemlist = []
    ## Carga estados
    status = jsontools.load_json(
        httptools.downloadpage(host + '/a/status/all').data)

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(
            " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(
                " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_library_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show +
                                           "[/B][/COLOR] )")
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))

            title_label = bbcode_kodi2html(
                " ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append(
                Item(channel=item.channel,
                     action="trailer",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show))

        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=True))

    data_js = httptools.downloadpage(
        "http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
    key = scrapertools.find_single_match(
        data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)

        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos(httptools.downloadpage(item.url).data)
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load_json(
        obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(
        data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year

    var0 = scrapertools.find_single_match(data_js,
                                          'var_0=\[(.*?)\]').split(",")
    matches = []
    for match in data_decrypt:
        prov = eval(
            scrapertools.find_single_match(
                data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
        function = prov["l"].replace("code", match["code"]).replace(
            "var_2", match["code"])
        index = scrapertools.find_single_match(function, 'var_1\[(\d+)\]')
        function = function.replace("var_1[%s]" % index, var0[int(index)])

        url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}")
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        index = scrapertools.find_single_match(prov["e"], 'var_1\[(\d+)\]')
        embed = prov["e"].replace("var_1[%s]" % index, var0[int(index)])

        matches.append([match["lang"], match["quality"], url, embed])

    enlaces = []
    for idioma, calidad, url, embed in matches:
        servername = scrapertools.find_single_match(
            url, "(?:http:|https:)//(?:www.|)([^.]+).")
        if servername == "streamin": servername = "streaminto"
        if servername == "waaw": servername = "netutv"
        if servername == "uploaded" or servername == "ul":
            servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            option = "Ver"
            if re.search(r'return ([\'"]{2,}|\})', embed):
                option = "Descargar"
            calidad = unicode(calidad, "utf8").upper().encode("utf8")
            servername_c = unicode(servername,
                                   "utf8").capitalize().encode("utf8")
            title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")"
            thumbnail = item.thumbnail
            plot = item.title + "\n\n" + scrapertools.find_single_match(
                data, '<meta property="og:description" content="([^"]+)"')
            plot = scrapertools.htmlclean(plot)
            fanart = scrapertools.find_single_match(
                data, '<div style="background-image.url. ([^\s]+)')
            if account:
                url += "###" + id + ";" + type

            enlaces.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     fulltitle=title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     fanart=fanart,
                     show=item.show,
                     folder=True,
                     server=servername,
                     infoLabels=infolabels,
                     contentTitle=item.contentTitle,
                     contentType=item.contentType,
                     tipo=option))

    enlaces.sort(key=lambda it: it.tipo, reverse=True)
    itemlist.extend(enlaces)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..."
        try:
            itemlist.extend(file_cine_library(item, url_targets))
        except:
            pass

    return itemlist
Example #21
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(
            data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(
            text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(
            data, 'id="' + var_r + '[^"]*">([^<]+)<')

        videourl = ""
        text_decode = ""
        for encode in var_encodes:
            try:
                v1 = int(encode[0:3])
                v2 = int(encode[3:5])
                index = 5
                while index < len(encode):
                    text_decode += chr(
                        int(encode[index:index + 3]) + v1 -
                        v2 * int(encode[index + 3:index + 3 + 2]))
                    index += 5
            except:
                continue

            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = scrapertools.get_headers_from_response(videourl)
            extension = ""
            for head, value in resp_headers:
                if head == "location":
                    videourl = value.replace("https",
                                             "http").replace("?mime=true", "")
                elif head == "content-type":
                    extension = value
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload " + traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(
                data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([
            extension + " [Openload] ", videourl + header_down + extension, 0,
            subtitle
        ])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Example #22
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}
    data = httptools.downloadpage(page_url, headers=header, cookies=False).data
    subtitle = scrapertools.find_single_match(
        data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent=" + headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/", "/f/")
            data = httptools.downloadpage(url, cookies=False).data

        text_encode = scrapertools.find_multiple_matches(
            data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(
            text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(
            data, 'id="' + var_r + '[^"]*">([^<]+)<')

        videourl = ""
        for encode in var_encodes:
            text_decode = {}
            try:
                v1 = int(encode[0:2])
                index = 2
                while index < len(encode):
                    key = int(encode[index + 3:index + 3 + 2])
                    text_decode[key] = chr(int(encode[index:index + 3]) - v1)
                    index += 5
            except:
                continue

            sorted(text_decode, key=lambda key: text_decode[key])
            suffix = ""
            for key, value in text_decode.items():
                suffix += value

            videourl = "https://openload.co/stream/%s?mime=true" % suffix
            resp_headers = httptools.downloadpage(videourl,
                                                  follow_redirects=False,
                                                  only_headers=True)
            videourl = resp_headers.headers["location"].replace(
                "https", "http").replace("?mime=true", "")
            extension = resp_headers.headers["content-type"]
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info(traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(
                data, '<meta name="description" content="([^"]+)"')
            extension = "." + extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([
            extension + " [Openload] ", videourl + header_down + extension, 0,
            subtitle
        ])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Example #23
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)
            text_encode = scrapertools.find_multiple_matches(data,"(゚ω゚.*?\(\'\_\'\));")
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                videourl = "http://" + scrapertools.find_single_match(text_decode, '(openload.co/.*?)\}')
            except:
                videourl = "http://"

            if videourl == "http://":
                hiddenurl, valuehidden = scrapertools.find_single_match(data, '<span id="([^"]+)">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(valuehidden, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        if not jjencode:
                            pack = scrapertools.find_multiple_matches(data, '(eval \(function\(p,a,c,k,e,d\).*?\{\}\)\))')[-1]
                            jjencode = openload_clean(pack)

                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        varj = scrapertools.find_single_match(jjdec, 'var j\s*=\s*(\w+)\.charCodeAt')
                        varhidden = scrapertools.find_single_match(jjdec, 'var\s*'+varj+'\s*=\s*\$\("[#]*([^"]+)"\).text')
                        if varhidden != hiddenurl:
                            valuehidden = scrapertools.find_single_match(data, 'id="'+varhidden+'">(.*?)<')
                        videourl = decode_hidden(valuehidden, number)
                        
                else:
                    videourl = decodeopenload(data)
            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
        else:
            text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
            text_decode = ""
            try:
                for t in text_encode:
                    text_decode += aadecode(t)
                subtract = scrapertools.find_single_match(text_decode, 'welikekodi.*?(\([^;]+\))')
            except:
                subtract = ""
            
            if subtract:
                index = int(eval(subtract))
                # Buscamos la variable que nos indica el script correcto
                text_decode2 = aadecode(text_encode[index])
                videourl = "https://" + scrapertools.find_single_match(text_decode2, "(openload.co/.*?)\}")
            else:
                hiddenurl, valuehidden = scrapertools.find_single_match(data, '<span id="([^"]+)">(.*?)<')
                if hiddenurl:
                    number = scrapertools.find_single_match(text_decode, 'charCodeAt\(0\)\s*+\s*(\d+)')
                    if number:
                        videourl = decode_hidden(valuehidden, number)
                    else:
                        from jjdecode import JJDecoder
                        jjencode = scrapertools.find_single_match(data, '<script type="text/javascript">(j=.*?\(\)\)\(\);)')
                        if not jjencode:
                            pack = scrapertools.find_multiple_matches(data, '(eval \(function\(p,a,c,k,e,d\).*?\{\}\)\))')[-1]
                            jjencode = openload_clean(pack)

                        jjdec = JJDecoder(jjencode).decode()
                        number = scrapertools.find_single_match(jjdec, 'charCodeAt\(0\)\s*\+\s*(\d+)')
                        varj = scrapertools.find_single_match(jjdec, 'var j\s*=\s*(\w+)\.charCodeAt')
                        varhidden = scrapertools.find_single_match(jjdec, 'var\s*'+varj+'\s*=\s*\$\("[#]*([^"]+)"\).text')
                        if varhidden != hiddenurl:
                            valuehidden = scrapertools.find_single_match(data, 'id="'+varhidden+'">(.*?)<')
                        videourl = decode_hidden(valuehidden, number)
                else:
                    videourl = decodeopenload(data)

            # Falla el método, se utiliza la api aunque en horas punta no funciona
            if not videourl:
                videourl = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl = get_link_api(page_url)

    extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
    extension = "." + extension.rsplit(".", 1)[1]
    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Example #24
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.openload url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        varj = scrapertools.find_single_match(text_decode, 'var\s*j\s*=\s*([A-z])')
        varhidden = scrapertools.find_single_match(text_decode, 'var\s*'+varj+'\s*=\s*\$\(\"[#]*([^"]+)"')
        valuehidden = scrapertools.find_single_match(data, 'id="'+varhidden+'">([^<]+)<')
        search_str = scrapertools.find_single_match(text_decode, 'var\s*str\s*=([^;]+)')
        funcnombres = scrapertools.find_multiple_matches(search_str, '[+-]\s*([_A-z0-9]+)\(\)')

        funciones = {}
        numbers = []
        for f in funcnombres:
            retorna = scrapertools.find_single_match(text_decode, f+'\(\)\s*\{.*?return\s*([^;]+)')
            if f in funciones:
                numbers.append(funciones[f])
                continue
            if not "()" in retorna:
                funciones[f] = eval(retorna)
            else:
                while "()" in retorna:
                    nuevafuncion = scrapertools.find_multiple_matches(retorna, '([_A-z0-9]+)\(\)')
                    for new in nuevafuncion:
                        if new in funciones:
                            retorna = retorna.replace(new+"()", str(funciones[new]))
                        else:
                            new2 = scrapertools.find_single_match(text_decode, new+'\(\)\s*\{.*?return\s*([^;]+)')
                            retorna = retorna.replace(new+"()", new2)
                funciones[f] = eval(retorna)
        
            numbers.append(funciones[f])

        videourl, extension = decode_hidden(valuehidden, numbers)

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("pelisalacarta.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
            extension = "."+extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("url=" + page_url)
    video_urls = []

    data = scrapertools.downloadpageWithoutCookies(page_url)
    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
    #Header para la descarga
    header_down = "|User-Agent="+headers['User-Agent']

    try:
        from lib.aadecode import decode as aadecode
        if "videocontainer" not in data:
            url = page_url.replace("/embed/","/f/")
            data = scrapertools.downloadpageWithoutCookies(url)

        text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
        text_decode = ""
        for t in text_encode:
            text_decode += aadecode(t)

        var_r = scrapertools.find_single_match(text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
        var_encodes = scrapertools.find_multiple_matches(data, 'id="'+var_r+'[^"]*">([^<]+)<')

        videourl = ""
        text_decode = ""
        for encode in var_encodes:
            try:
                v1 = int(encode[0:3])
                v2 = int(encode[3:5])
                index = 5
                while index < len(encode):
                    text_decode += chr(int(encode[index:index+3]) + v1 - v2 * int(encode[index+3:index+3+2]))
                    index += 5
            except:
                continue
         
            videourl = "https://openload.co/stream/%s?mime=true" % text_decode
            resp_headers = scrapertools.get_headers_from_response(videourl)
            extension = ""
            for head, value in resp_headers:
                if head == "location":
                    videourl = value.replace("https", "http").replace("?mime=true", "")
                elif head == "content-type":
                    extension = value
            break

        # Falla el método, se utiliza la api aunque en horas punta no funciona
        if not videourl:
            videourl, extension = get_link_api(page_url)
    except:
        import traceback
        logger.info("streamondemand.servers.openload "+traceback.format_exc())
        # Falla el método, se utiliza la api aunque en horas punta no funciona
        videourl, extension = get_link_api(page_url)

    extension = extension.replace("video/", ".").replace("application/x-", ".")
    if not extension:
        try:
            extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
            extension = "."+extension.rsplit(".", 1)[1]
        except:
            pass

    if config.get_platform() != "plex":
        video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
    else:
        video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.openload %s - %s" % (video_url[0],video_url[1]))

    return video_urls