Example #1
0
def uptostream(data):
    video_id = match(data, patron=r"var videoId\s*=\s*'([^']+)").match
    subtitle = match(data, patron=r'kind="subtitles" src="([^"]+)"').match
    if subtitle and not '://' in subtitle:
        subtitle = "http://" + subtitle
    video_urls = []
    api_url = "https://uptostream.com/api/streaming/source/get?token=null&file_code=%s" % video_id
    api_data = httptools.downloadpage(api_url).json
    js_code = api_data.get('data', '').get('sources', '')

    from lib import js2py

    context = js2py.EvalJs({'atob': atob})
    context.execute(js_code)
    result = context.sources

    for x in result:
        media_url = x.get('src', '')
        tipo = x.get('type', '')
        res = x.get('label', '')
        lang = x.get('lang', '')
        tipo = tipo.replace("video/","")
        if lang: extension = "{} - {} [{}]".format(tipo, res, lang.upper())
        else: extension = "{} - {}".format(tipo, res)
        video_urls.append([extension + " [UPtoStream]", media_url, 0, subtitle])
        video_urls.sort(key=lambda url: int(match(url[0], patron=r'(\d+)p').match))
    return video_urls
Example #2
0
def uptostream(data):
    video_id = scrapertools.find_single_match(data,
                                              "var videoId\s*=\s*'([^']+)';")
    subtitle = scrapertools.find_single_match(
        data, "kind='subtitles' src='//([^']+)'")
    if subtitle:
        subtitle = "http://" + subtitle
    video_urls = []
    api_url = "https://uptostream.com/api/streaming/source/get?token=null&file_code=%s" % video_id
    api_data = httptools.downloadpage(api_url).json
    js_code = api_data.get('data', '').get('sources', '')

    from lib import js2py

    context = js2py.EvalJs({'atob': atob})
    context.execute(js_code)
    result = context.sources

    for x in result:
        media_url = x.get('src', '')
        tipo = x.get('type', '')
        res = x.get('label', '')
        #lang = x.get('lang', '')
        tipo = tipo.replace("video/", "")
        extension = ".%s (%s)" % (tipo, res)
        #if lang:
        #    extension = extension.replace(")", "/%s)" % lang[:3])
        video_urls.append(
            [extension + " [uptostream]", media_url, 0, subtitle])
    return video_urls
Example #3
0
def js2py_conversion(data, domain=".cliver.to"):
    logger.info()
    from lib import js2py
    import base64

    patron = ",\s*S='([^']+)'"
    data_new = scrapertools.find_single_match(data, patron)

    if not data_new:
        logger.error('js2py_conversion: NO data_new')

    try:
        for x in range(10):  # Da hasta 10 pasadas o hasta que de error
            data_end = base64.b64decode(data_new).decode('utf-8')
            data_new = data_end
    except:
        js2py_code = data_new
    else:
        logger.error('js2py_conversion: base64 data_new NO Funciona: ' +
                     str(data_new))

    if not js2py_code:
        logger.error('js2py_conversion: NO js2py_code BASE64')

    js2py_code = js2py_code.replace('document',
                                    'window').replace(" location.reload();",
                                                      "")
    js2py.disable_pyimport()
    context = js2py.EvalJs({'atob': atob})
    new_cookie = context.eval(js2py_code)

    logger.info('new_cookie: ' + new_cookie)

    dict_cookie = {
        'domain': domain,
    }

    if ';' in new_cookie:
        new_cookie = new_cookie.split(';')[0].strip()
        namec, valuec = new_cookie.split('=')
        dict_cookie['name'] = namec.strip()
        dict_cookie['value'] = valuec.strip()
    zanga = httptools.set_cookies(dict_cookie)
Example #4
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url, canonical=canonical).data
    _sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
    _sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
    sl = eval(_sl)

    buttons = scrapertools.find_multiple_matches(
        data, '<button.*?class="selop" sl="([^"]+)">')

    if not buttons:
        buttons = [0, 1, 2]
    for id in buttons:
        title = '%s'
        new_url = golink(int(id), _sa, sl)
        data_new = httptools.downloadpage(new_url).data
        matches = scrapertools.find_multiple_matches(
            data_new, 'javascript">(.*?)</script>')
        js = ""
        for part in matches:
            js += part
        #logger.info("test before:" + js)

        try:
            matches = scrapertools.find_multiple_matches(
                data_new, '" id="(.*?)" val="(.*?)"')
            for zanga, val in matches:
                js = js.replace(
                    'var %s = document.getElementById("%s");' % (zanga, zanga),
                    "")
                js = js.replace('%s.getAttribute("val")' % zanga, '"%s"' % val)
            #logger.info("test1 after:" +js)
        except:
            pass

        #v1
        js = re.sub('(document\[.*?)=', 'prem=', js)

        #Parcheando a lo bruto v2
        video = scrapertools.find_single_match(js,
                                               "sources: \[\{src:(.*?), type")
        js = re.sub(' videojs\((.*?)\);', video + ";", js)

        from lib import js2py
        js2py.disable_pyimport()
        context = js2py.EvalJs({'atob': atob})

        try:
            result = context.eval(js)
        except:
            logger.error("Js2Py no puede desofuscar el codigo, ¿cambió?")
            continue

        url = scrapertools.find_single_match(result, 'src="(.*?)"')
        #v2
        if not url:
            url = result.strip()

        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=url,
                 action='play',
                 language='latino',
                 infoLabels=item.infoLabels))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist