Exemple #1
0
def seasons(item):
    logger.info()
    id = "0"
    itemlist = []
    infoLabels = item.infoLabels
    
    ## Carga estados
    if account:
        status = httptools.downloadpage(host + '/a/status/all').json
    
    url_targets = item.url
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
    
    data = agrupa_datos(item.url)
    
    if account:
        str = get_status(status, "shows", id)
        #TODO desenredar todo el lio este
        if str != "" and item.category != "Series" and "XBMC" not in item.title:
            platformtools.itemlist_refresh()
            title = str.replace('steelblue', 'darkgrey').replace('Siguiendo', 'Abandonar')
            itemlist.append(Item(channel=item.channel, action="set_status", title=title, url=url_targets,
                                 thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=True))
        elif item.category != "Series" and "XBMC" not in item.title:
            
            title = " [COLOR steelblue][B]( Seguir )[/B][/COLOR]"
            itemlist.append(Item(channel=item.channel, action="set_status", title=title, url=url_targets,
                                 thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=True))
        
    sid = scrapertools.find_single_match(data, "<script>var sid = '(\d+)'")
    
    patron = 'itemprop="season".*?<a href=\'.*?/temporada-(\d+).*?'
    patron += 'alt="([^"]+)" src="([^"]+)"'
    
    matches = re.compile(patron, re.DOTALL).findall(data)

    
    for ssid, scrapedtitle, scrapedthumbnail in matches:
        if ssid == '0':
            scrapedtitle = "Especiales"
        infoLabels['season'] = ssid
        thumbnail = scrapedthumbnail.replace('tthumb/130x190', 'thumbs')
        thumbnail += '|User-Agent=%s' % httptools.get_user_agent()
        
        itemlist.append(
                Item(channel=item.channel, action="episodesxseason", title=scrapedtitle,
                     url=item.url, thumbnail=thumbnail, sid=sid, text_bold=True,
                     contentSerieName=item.contentSerieName,
                     contentSeasonNumber=ssid, infoLabels=infoLabels))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(Item(channel=item.channel, title="[COLOR greenyellow]Añadir esta serie a la videoteca[/COLOR]",
                             action="add_serie_to_library", url=item.url, text_bold=True, extra="episodios",
                             contentSerieName=item.contentSerieName,
                             ))

    return itemlist
Exemple #2
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    global data
    logger.debug("URL", page_url)
    video_urls = []
    # support.dbg()

    headers = {'User-Agent': httptools.get_user_agent(),
               'Referer': page_url,
               'Origin': 'https://ninjastream.to',
               'X-Requested-With': 'XMLHttpRequest'}

    apiUrl = 'https://ninjastream.to/api/video/get'
    post = {'id':page_url.split('/')[-1]}
    data = httptools.downloadpage(apiUrl, headers=headers, post=post).json

    if data.get('result',{}).get('playlist'):
        # support.dbg()
        url = data.get('result',{}).get('playlist')

        video_urls.append([url.split('.')[-1], url + '|Referer=' + page_url])

    return video_urls

# def decode(host):
#     Host = ''
#     for n in range(len(host)):
#         Host += chr(ord(host[n]) ^ ord('2'))
#     return Host
Exemple #3
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    global data
    logger.debug("URL", page_url)

    video_urls = []
    host = 'https://dood.to'
    headers = {'User-Agent': httptools.get_user_agent(), 'Referer': page_url}

    match = support.match(
        data,
        patron=
        r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)'''
    ).match
    if match:
        url, token = match
        ret = scraper.get(host + url, headers=headers).text
        video_urls.append([
            'mp4 [DooD Stream]',
            '{}{}{}{}|Referer={}'.format(randomize(ret), url, token,
                                         int(time.time() * 1000), host)
        ])

    return video_urls
Exemple #4
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []
    page_url = page_url.replace("https", "http") + "?wmode=transparent"
    data = httptools.downloadpage(page_url).data
    media_url = scrapertools.find_single_match(data, 'src: "([^"]+)"')
    qualities = scrapertools.find_single_match(data, 'qualities: (\[.*?\])')
    qualities = scrapertools.find_multiple_matches(qualities, ' "([^"]+)')
    for calidad in qualities:
        media = media_url
        title = "%s [filepup]" % (calidad)
        if "480" not in calidad:
            med = media_url.split(".mp4")
            media = med[0] + "-%s.mp4" % calidad + med[1]
        media += "|Referer=%s" % page_url
        media += "&User-Agent=" + httptools.get_user_agent()
        video_urls.append([title, media, int(calidad.replace("p", ""))])
    video_urls.sort(key=lambda x: x[2])
    for video_url in video_urls:
        video_url[2] = 0
        logger.info("%s - %s" % (video_url[0], video_url[1]))
    return video_urls
Exemple #5
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):

    video_urls = list()
    key = scrapertools.find_single_match(data, 'render=([^"]+)"')
    co = "aHR0cHM6Ly9ldm9sb2FkLmlvOjQ0Mw"
    loc = "https://evoload.io"
    tk = generictools.rec(key, co, "", loc)
    player_url = "https://evoload.io/SecurePlayer"
    code = scrapertools.find_single_match(page_url, "/e/([A-z0-9]+)")
    post = {"code": code, "token": tk}
    v_data = httptools.downloadpage(player_url,
                                    headers={
                                        "User-Agent":
                                        httptools.get_user_agent(),
                                        "Referer": page_url
                                    },
                                    post=post).json
    if "stream" in v_data:
        if "backup" in v_data["stream"]:
            media_url = v_data["stream"]["backup"]
        else:
            media_url = v_data["stream"]["src"]
        ext = v_data["name"][-4:]
        video_urls.append(['%s [evoload]' % ext, media_url])
    else:
        pass
    return video_urls
Exemple #6
0
def get_video_url(page_url, video_password=""):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []

    url = re.sub(r'(\.\w{2,3})/\w', '\\1/getlink-', data.url) + '.dll'
    url += "|User-Agent=%s" % httptools.get_user_agent()
    video_urls.append([".mp4 [Streamz]", url])

    return video_urls
Exemple #7
0
def get_video_url(page_url, video_password):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []

    url = re.sub(r'(\.\w{2,3})/\w', '\\1/getlink-', data.url) + '.dll'
    url += "|User-Agent=%s" % httptools.get_user_agent()
    url += '|follow_redirects=False).headers["location"]'
    # url += "|verifypeer=false"
    video_urls.append([".mp4 [Streamz]", url])

    return video_urls
Exemple #8
0
def get_ua(data_assistant):
    
    if not data_assistant or not isinstance(data_assistant, dict):
        return 'Default'
    
    UA = data_assistant.get("userAgent", 'Default')
    
    if UA == httptools.get_user_agent():
        UA = 'Default'

    config.set_setting('cf_assistant_ua', UA)

    return UA
Exemple #9
0
def get_link(url, referer):
    _id = scrapertools.find_single_match(referer, 'ver/([^/]+)/')
    #logger.info("play: %s" % item.url)
    itemlist = []
    post = "embed_id=%s" % _id
    clen = len(post)
    headers = {"Referer": referer}
    data = httptools.downloadpage(url, post=post, headers=headers).data
    dict_data = jsontools.load(data)
    frame_src = scrapertools.find_single_match(dict_data["value"],
                                               'iframe src="([^"]+)"')
    new_data = httptools.downloadpage(frame_src, headers={
        "Referer": referer
    }).data
    url = scrapertools.find_single_match(new_data, '"file":"([^"]+)"')
    url = url.replace("\\", "")
    logger.info("tabon3 %s" % new_data)
    ua = httptools.get_user_agent()
    if "openstream" in url:
        ua = httptools.get_user_agent()
        url = "%s|User-Agent=%s" % (url, ua)
    link = url
    return link
Exemple #10
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("url=" + page_url)

    video_urls = []
    # Code taken from Kodi On Demand (KOD)
    # https://github.com/kodiondemand/addon/blob/master/servers/streamtape.py
    find_url = scrapertools.find_multiple_matches(data, 'innerHTML = ([^;]+)')[-1]
    possible_url = js2py.eval_js(find_url)
    url = "https:" + possible_url
    url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")

    video_urls.append(['MP4 [streamtape]', "{}|{}".format(url, "User-Agent=%s" % httptools.get_user_agent())])

    return video_urls
Exemple #11
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)

    video_urls = []
    pattern = """getElementById\('vide\w+link'\).innerHTML = "([^"]+)" .* \('([^']+)'\)(.substring\(\d+\))"""
    url_data = scrapertools.find_single_match(data, pattern)
    url = "https:" + url_data[0].strip(
    ) + url_data[1][int(scrapertools.find_single_match(url_data[2], "\d+")):]
    video_urls.append([
        'MP4 [streamtape]',
        url + "|User-Agent=%s" % httptools.get_user_agent()
    ])
    return video_urls
Exemple #12
0
 def __init__(self, key, referer):
     if sys.version_info[0] < 3:
         self.rc = None
         platformtools.dialog_ok(
             'reCAPTCHA',
             'Il sito sta mostrando la schermata "Non sono un robot".\nQuesta schermata tuttavia è superabile solo da kodi 19'
         )
     else:
         prog = platformtools.dialog_progress(
             'Caricamento reCAPTCHA',
             'Il sito sta mostrando la schermata "Non sono un robot"')
         filetools.rmdirtree(temp_dir)
         self.rc = ReCaptcha(api_key=key,
                             site_url=referer,
                             user_agent=httptools.get_user_agent(),
                             lang=lang)
         prog.close()
Exemple #13
0
def get_link(source, referer):
    logger.info()
    itemlist = []

    headers = {"Referer": referer}
    _id = scrapertools.find_single_match(referer, 'ver/([^/]+)/')
    post = "embed_id=%s" % _id

    dict_data = httptools.downloadpage(source, post=post, headers=headers).json
    frame_src = scrapertools.find_single_match(dict_data["value"],
                                               'iframe src="([^"]+)"')

    try:
        new_data = httptools.downloadpage(frame_src, headers=headers).data
    except:
        logger.error('Problema con headers???')
        return ''

    if 'hydrax.net' in new_data:
        slug = scrapertools.find_single_match(new_data,
                                              '"slug","value":"([^"]+)"')
        post = "slug=%s&dataType=mp4" % slug
        #based on https://github.com/thorio/KGrabber/issues/35#issuecomment-667401636
        data = httptools.downloadpage("https://ping.iamcdn.net/",
                                      post=post).json
        url = data.get("url", '')
        if url:
            import base64
            url = "https://www.%s" % base64.b64decode(url[-1:] + url[:-1])
            url += '|Referer=https://playhydrax.com/?v=%s&verifypeer=false' % slug

        return url

    new_data = new_data.replace("'", '"')
    patron = '"file":'

    if 's=fserver' in source:
        patron = r'window.open\('

    url = scrapertools.find_single_match(new_data, patron + '"([^"]+)"')

    url = url.replace("\\", "")
    url += "|User-Agent=%s" % httptools.get_user_agent()

    return url
Exemple #14
0
def get_video_url(page_url, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % page_url)
    data = httptools.downloadpage(page_url).data
    post = ""
    patron = '(?s)type="hidden" name="([^"]+)".*?value="([^"]*)"'
    match = scrapertools.find_multiple_matches(data, patron)
    for nombre, valor in match:
        post += nombre + "=" + valor + "&"
    time.sleep(1)
    data1 = httptools.downloadpage(page_url, post=post, headers=headers).data
    patron = "window.open\('([^']+)"
    file = scrapertools.find_single_match(data1, patron).replace(" ", "%20")
    file += "|User-Agent=" + httptools.get_user_agent()
    file += "&Host=fs30.indifiles.com:182"
    video_urls = []
    videourl = file
    video_urls.append([".MP4 [bdupload]", videourl])
    return video_urls
Exemple #15
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("(page_url='%s')" % page_url)

    video_urls = list()
    if "const source = '" in data:
        url = scrapertools.find_single_match(data, "const source = '([^']+)")
        v_type = "hls"
    else:
        url, v_type = scrapertools.find_single_match(
            data, '"file": "([^"]+)",\s+"type": "([^"]+)"')
    headers = {"referer": page_url}

    if v_type == "mp4":
        url = httptools.downloadpage(url,
                                     headers=headers,
                                     follow_redirects=False,
                                     stream=True).headers["location"]
        page_url = "%s|Referer=%s&User-Agent=%s" % (url, page_url,
                                                    httptools.get_user_agent())

    elif v_type == "hls":

        hls_data = httptools.downloadpage(url, headers=headers).data
        base_url = scrapertools.find_single_match(hls_data, "(https?://[^/]+)")
        hls_data = hls_data.replace(base_url, 'http://localhost:8781')
        m3u8 = os.path.join(config.get_data_path(), "op_master.m3u8")
        outfile = open(m3u8, 'wb')
        outfile.write(hls_data)
        outfile.close()
        page_url = m3u8
        v_type = "m3u8"
        servop.start(base_url)
    else:
        return video_urls

    video_urls = [["%s [Oprem]" % v_type, page_url]]

    return video_urls
Exemple #16
0
def play(item):
    import time
    logger.info()
    itemlist = []
    s = item.s_id['id']
    uri = item.uri
    tt = int(time.time()*1000)
    headers = {'Referer':item.url.replace('/json/repo', '/film').replace('index.json', ''),
               'X-Requested-With': 'XMLHttpRequest'}
    uri_1 = 'http://tv-vip.com/video2-prod/s/uri?uri=/transcoder%s&s=%s' % (uri, s)
    data = httptools.downloadpage(uri_1, headers=headers, forced_proxy=True).json
    b = data['b']
    tt = data['a']['tt']
    mm = data['a']['mm']
    bb = data['a']['bb']

    url = 'http://%s.%s/e/transcoder%s?tt=%s&mm=%s&bb=%s' % (s, b, uri, tt, mm, bb)
    url += "|User-Agent=%s" % httptools.get_user_agent()
    itemlist.append(item.clone(url=url))
    return itemlist
Exemple #17
0
def get_video_url(page_url, video_password):
    logger.debug("(page_url='%s')" % page_url)
    video_urls = []
    from core.support import match
    matches = match(
        data, patron=r'(eval\(function\(p,a,c,k,e,d\).*?)\s+</script>').matches
    unpacked = ''
    for packed in matches:
        unpacked += jsunpack.unpack(packed) + '\n'

    urls = match(
        unpacked,
        patron=
        r"videojs\d+[^;]+[^']+'[^']+'[^']+'(https://streamz.*?/get.*?.dll)"
    ).matches

    for url in urls:
        url = url + "|User-Agent=%s" % httptools.get_user_agent()
        if not video_urls or url not in video_urls[-1]:
            video_urls.append(["[streamZ]", url])

    return video_urls
Exemple #18
0
def get_link(url, referer):
    logger.info()
    itemlist = []

    headers = {"Referer": referer}
    _id = scrapertools.find_single_match(referer, 'ver/([^/]+)/')
    post = "embed_id=%s" % _id

    dict_data = httptools.downloadpage(url, post=post, headers=headers).json
    frame_src = scrapertools.find_single_match(dict_data["value"],
                                               'iframe src="([^"]+)"')

    try:
        new_data = httptools.downloadpage(frame_src, headers=headers).data
    except:
        logger.error('Problema con headers???')
        return ''

    #TODO descurbir como reproducir esta lista en kodi
    '''if 'hydrax.net' in new_data:
        slug = scrapertools.find_single_match(new_data, '"slug","value":"([^"]+)"')
        post = "slug=%s&dataType=mp4" % slug
        ua = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Mobile Safari/537.36"
        
        data = httptools.downloadpage("https://multi.hydrax.net/guest", post=post, headers={"User-Agent": ua}).data

        url = scrapertools.find_single_match(data, '"link":"([^"]+)"')
        url += '|User-Agent=%s' % ua
        
        return url'''

    url = scrapertools.find_single_match(
        new_data, '(?:"file":|var urlVideo = )"([^"]+)"')

    url = url.replace("\\", "")
    url += "|User-Agent=%s" % httptools.get_user_agent()

    return url
Exemple #19
0
def play(
    item
):  #Permite preparar la descarga de los .torrents y subtítulos externos
    logger.info()
    itemlist = []
    headers = []
    import os
    from core import downloadtools
    from core import ziptools

    #buscamos la url del .torrent
    patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?">\s*Torrent:<\/td><td class="(?:[^"]+)?">\s*<img src="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*border="(?:[^"]+)?"\s*\/>\s*<a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?" href="([^"]+)".*?<\/a>'
    try:
        data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                      httptools.downloadpage(item.url, timeout=timeout).data)
        data = unicode(data, "utf-8", errors="replace").encode("utf-8")
    except:
        pass
    status, itemlist = check_blocked_IP(
        data, itemlist)  #Comprobamos si la IP ha sido bloqueada
    if status:
        return itemlist  #IP bloqueada
    if not scrapertools.find_single_match(data, patron):
        logger.error(
            'ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web.  Verificar en la Web esto último y reportar el error con el log: PATRON: '
            + patron + ' / DATA: ' + data)
        itemlist.append(
            item.clone(
                action='',
                title=item.channel.capitalize() +
                ': ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web.  Verificar en la Web esto último y reportar el error con el log'
            ))
        return itemlist
    item.url = urlparse.urljoin(host,
                                scrapertools.find_single_match(data, patron))

    #buscamos subtítulos en español
    patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*>\s*Subs.*?<\/td><td class="(?:[^"]+)?"\s*>(.*?)(?:<br\/>)?<\/td><\/tr>'
    data_subt = scrapertools.find_single_match(data, patron)
    if data_subt:
        patron = '<a href="([^"]+)"\s*onmouseover="return overlib\('
        patron += "'Download Spanish subtitles'"
        patron += '\)"\s*onmouseout="(?:[^"]+)?"\s*><img src="(?:[^"]+)?"\s*><\/a>'
        subt = scrapertools.find_single_match(data_subt, patron)
        if subt:
            item.subtitle = urlparse.urljoin(host, subt)

    if item.subtitle:  #Si hay urls de sub-títulos, se descargan
        headers.append(["User-Agent", httptools.get_user_agent()
                        ])  #Se busca el User-Agent por defecto
        videolibrary_path = config.get_videolibrary_path(
        )  #Calculamos el path absoluto a partir de la Videoteca
        if videolibrary_path.lower().startswith(
                "smb://"):  #Si es una conexión SMB, usamos userdata local
            videolibrary_path = config.get_data_path(
            )  #Calculamos el path absoluto a partir de Userdata
        videolibrary_path = os.path.join(videolibrary_path, "subtitles")
        #Primero se borra la carpeta de subtitulos para limpiar y luego se crea
        if os.path.exists(videolibrary_path):
            import shutil
            shutil.rmtree(videolibrary_path, ignore_errors=True)
            time.sleep(1)
        if not os.path.exists(videolibrary_path):
            os.mkdir(videolibrary_path)
        subtitle_name = 'Rarbg-ES_SUBT.zip'  #Nombre del archivo de sub-títulos
        subtitle_folder_path = os.path.join(videolibrary_path,
                                            subtitle_name)  #Path de descarga
        ret = downloadtools.downloadfile(item.subtitle,
                                         subtitle_folder_path,
                                         headers=headers,
                                         continuar=True,
                                         silent=True)

        if os.path.exists(subtitle_folder_path):
            # Descomprimir zip dentro del addon
            # ---------------------------------
            try:
                unzipper = ziptools.ziptools()
                unzipper.extract(subtitle_folder_path, videolibrary_path)
            except:
                import xbmc
                xbmc.executebuiltin('XBMC.Extract("%s", "%s")' %
                                    (subtitle_folder_path, videolibrary_path))
                time.sleep(1)

            # Borrar el zip descargado
            # ------------------------
            os.remove(subtitle_folder_path)

            #Tomo el primer archivo de subtítulos como valor por defecto
            for raiz, subcarpetas, ficheros in os.walk(videolibrary_path):
                for f in ficheros:
                    if f.endswith(".srt"):
                        #f_es = 'rarbg_subtitle.spa.srt'
                        f_es = scrapertools.find_single_match(
                            item.url,
                            '&f=(.*?).torrent$').replace('.', ' ').replace(
                                '-', ' ').lower() + '.spa.srt'
                        if not f_es:
                            f_es = item.infoLabels['originaltitle'] + '.spa.srt'
                            f_es = f_es.replace(':', '').lower()
                        os.rename(os.path.join(videolibrary_path, f),
                                  os.path.join(videolibrary_path, f_es))
                        item.subtitle = os.path.join(
                            videolibrary_path, f_es)  #Archivo de subtitulos
                        break
                break

    itemlist.append(item.clone())  #Reproducción normal

    return itemlist
Exemple #20
0
def novedades_episodios(item):
    logger.info()
    itemlist = []
    ## Carga estados
    status = check_status()

    ## Episodios
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]
    old_start = scrapertools.find_single_match(post, 'start=([^&]+)&')
    start = "%s" % (int(old_start) + 24)
    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post
    #episodes = httptools.downloadpage(url, post=post).json
    episodes = httptools.downloadpage(url,
                                      post=post,
                                      headers={
                                          "Referer": item.url
                                      }).json
    for episode in episodes:
        #Fix para thumbs
        thumb = episode['show'].get('thumbnail', '')
        if not thumb:
            thumb = episode.get('thumbnail', '')
        ua = httptools.get_user_agent()
        thumbnail = "%s/thumbs/%s|User-Agent=%s" % (host, thumb, ua)

        temporada = episode['season']
        episodio = episode['episode']
        #if len(episodio) == 1: episodio = '0' + episodio

        #Idiomas
        language = episode.get('languages', '[]')
        texto_idiomas, langs = extrae_idiomas(language, list_language=True)

        if language != "[]" and show_langs and not unify:
            idiomas = "[COLOR darkgrey]%s[/COLOR]" % texto_idiomas

        else:
            idiomas = ""

        #Titulo serie en español, si no hay, en inglés
        cont_en = episode['show']['title'].get('en', '').strip()
        contentSerieName = episode['show'].get('es', cont_en).strip()

        if episode['title']:
            try:
                title = episode['title']['es'].strip()
            except:
                title = episode['title']['en'].strip()
        if len(title) == 0: title = "Episodio " + episodio

        title = '%s %sx%s: [COLOR greenyellow]%s[/COLOR] %s' % (
            contentSerieName, temporada, episodio, title, idiomas)

        if account:
            str = get_status(status, 'episodes', episode['id'])
            if str != "": title += str

        url = urlparse.urljoin(
            host, '/serie/' + episode['permalink'] + '/temporada-' +
            temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3"
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 contentSerieName=contentSerieName,
                 url=url,
                 thumbnail=thumbnail,
                 contentType="episode",
                 language=langs,
                 text_bold=True))

    if len(itemlist) == 24:
        itemlist.append(
            Item(channel=item.channel,
                 action="novedades_episodios",
                 title=">> Página siguiente",
                 url=next_page,
                 text_bold=True))

    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    return itemlist
Exemple #21
0
def episodesxseason(item):
    logger.info()
    itemlist = []

    url = host + "/a/episodes"
    infoLabels = item.infoLabels
    sid = item.sid
    ssid = item.contentSeasonNumber

    #si hay cuenta
    status = check_status()

    post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid)
    #episodes = httptools.downloadpage(url, post=post).json
    episodes = httptools.downloadpage(url,
                                      post=post,
                                      headers={
                                          "Referer":
                                          item.url + "/temporada-" + ssid
                                      }).json

    for episode in episodes:

        language = episode['languages']
        temporada = episode['season']
        episodio = episode['episode']

        #Fix para thumbs
        thumb = episode.get('thumbnail', '')
        if not thumb:
            thumb = episode['show'].get('thumbnail', '')
        ua = httptools.get_user_agent()
        thumbnail = "%s/thumbs/%s|User-Agent=%s" % (host, thumb, ua)

        infoLabels['episode'] = episodio

        if len(episodio) == 1: episodio = '0' + episodio

        #Idiomas
        texto_idiomas, langs = extrae_idiomas(language, list_language=True)

        if language != "[]" and show_langs and not unify:
            idiomas = "[COLOR darkgrey]%s[/COLOR]" % texto_idiomas

        else:
            idiomas = ""

        if episode['title']:

            title = episode['title'].get('es', '')
            if not title:
                title = episode['title'].get('en', '')

        if len(title) == 0: title = "Episodio " + episodio

        serie = item.contentSerieName

        title = '%sx%s: [COLOR greenyellow]%s[/COLOR] %s' % (
            temporada, episodio, title.strip(), idiomas)
        if account:
            str = get_status(status, 'episodes', episode['id'])
            if str != "": title += str

        url = urlparse.urljoin(
            host, '/serie/' + episode['permalink'] + '/temporada-' +
            temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3"
        itemlist.append(
            item.clone(action="findvideos",
                       title=title,
                       url=url,
                       contentType="episode",
                       language=langs,
                       text_bold=True,
                       infoLabels=infoLabels,
                       thumbnail=thumbnail))

    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    return itemlist
Exemple #22
0
def fichas(item):
    logger.info()
    itemlist = []
    or_matches = ""
    textoidiomas = ''
    infoLabels = dict()
    ## Carga estados
    status = check_status()

    if item.title == "Buscar...":
        data = agrupa_datos(item.url, post=item.extra)
        s_p = scrapertools.find_single_match(
            data,
            '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
                '<h3 class="section-title">')
        if len(s_p) == 1:
            data = s_p[0]
            if 'Lo sentimos</h3>' in s_p[0]:
                return [
                    Item(channel=item.channel,
                         title=
                         "[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR steelblue]"
                         + item.texto.replace('%20', ' ') +
                         "[/COLOR] sin resultados")
                ]
        else:
            data = s_p[0] + s_p[1]
    else:
        data = agrupa_datos(item.url)

    data = re.sub(
        r'<div class="span-6[^<]+<div class="item"[^<]+' + \
        '<a href="([^"]+)"[^<]+' + \
        '<img.*?src="([^"]+)".*?' + \
        '<div class="left"(.*?)</div>' + \
        '<div class="right"(.*?)</div>.*?' + \
        'title="([^"]+)".*?' + \
        'onclick="setFavorite.\d, (\d+),',
        r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
        data
    )
    patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"
    matches = re.compile(patron, re.DOTALL).findall(data)

    if item.page != '':
        or_matches = matches
        matches = matches[item.page:item.page + 40]

    for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:

        thumbnail = scrapedthumbnail.replace('tthumb/130x190', 'thumbs')
        thumbnail += '|User-Agent=%s' % httptools.get_user_agent()
        language = ''
        title = scrapedtitle.strip()
        show = title

        #Valoración
        if scrapedrating != ">" and not unify:
            valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>',
                                r'\1,\2', scrapedrating)
            title += " [COLOR greenyellow](%s)[/COLOR]" % valoracion

        #Idiomas
        if scrapedlangs != ">":
            textoidiomas, language = extrae_idiomas(scrapedlangs)

            if show_langs:
                title += " [COLOR darkgrey]%s[/COLOR]" % textoidiomas

        url = urlparse.urljoin(item.url, scrapedurl)
        #Acción para series/peliculas
        if "/serie" in url or "/tags-tv" in url:
            action = "seasons"
            url += "###" + scrapedid + ";1"
            type = "shows"
            contentType = "tvshow"
        else:
            action = "findvideos"
            url += "###" + scrapedid + ";2"
            type = "movies"
            contentType = "movie"
            infoLabels['year'] = '-'
        #items usuario en titulo (visto, pendiente, etc)
        if account:
            str = get_status(status, type, scrapedid)
            if str != "": title += str
        #Muesta tipo contenido tras busqueda
        if item.title == "Buscar...":
            bus = host[-4:]
            #Cuestiones estéticas (TODO probar unify)
            c_t = "darkgrey"

            tag_type = scrapertools.find_single_match(url, '%s/([^/]+)/' % bus)
            if tag_type == 'pelicula':
                c_t = "steelblue"
            title += " [COLOR %s](%s)[/COLOR]" % (c_t, tag_type.capitalize())

        if "/serie" in url or "/tags-tv" in url:
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     contentSerieName=show,
                     text_bold=True,
                     contentType=contentType,
                     language=language,
                     infoLabels=infoLabels,
                     thumbnail=thumbnail))
        else:
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     text_bold=True,
                     contentTitle=show,
                     language=language,
                     infoLabels=infoLabels,
                     thumbnail=thumbnail))
    ## Paginación
    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">.raquo;</a>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 title=">> Página siguiente",
                 url=urlparse.urljoin(item.url, next_page_url),
                 text_bold=True))

    elif item.page != '':
        if item.page + 40 < len(or_matches):
            itemlist.append(
                item.clone(page=item.page + 40,
                           title=">> Página siguiente",
                           text_bold=True,
                           text_color="blue"))

    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    return itemlist
Exemple #23
0
def items_usuario(item):
    logger.info()
    itemlist = []
    ## Carga estados
    status = check_status()
    ## Fichas usuario
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]
    old_start = scrapertools.find_single_match(post, 'start=([^&]+)&')
    limit = scrapertools.find_single_match(post, 'limit=(\d+)')
    start = "%s" % (int(old_start) + int(limit))
    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post
    ## Carga las fichas de usuario
    fichas_usuario = httptools.downloadpage(url,
                                            post=post,
                                            headers={
                                                'referer': host
                                            }).json
    for ficha in fichas_usuario:
        try:
            title = ficha['title']['es'].strip()
        except:
            title = ficha['title']['en'].strip()
        try:
            title = title.encode('utf-8')
        except:
            pass
        show = title
        try:
            thumbnail = host + "/thumbs/" + ficha['thumbnail']
        except:
            thumbnail = host + "/thumbs/" + ficha['thumb']
        thumbnail += '|User-Agent=%s' % httptools.get_user_agent()
        try:
            url = urlparse.urljoin(
                host,
                '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1"
            action = "seasons"
            str = get_status(status, 'shows', ficha['id'])
            if "show_title" in ficha:
                action = "findvideos"
                try:
                    serie = ficha['show_title']['es'].strip()
                except:
                    serie = ficha['show_title']['en'].strip()
                temporada = ficha['season']
                episodio = ficha['episode']
                serie = "[COLOR whitesmoke]" + serie + "[/COLOR]"
                if len(episodio) == 1: episodio = '0' + episodio
                try:
                    title = temporada + "x" + episodio + " - " + serie + ": " + title
                except:
                    title = temporada + "x" + episodio + " - " + serie.decode(
                        'iso-8859-1') + ": " + title.decode('iso-8859-1')
                url = urlparse.urljoin(
                    host, '/serie/' + ficha['permalink'] + '/temporada-' +
                    temporada + '/episodio-' +
                    episodio) + "###" + ficha['id'] + ";3"
                if str != "": title += str
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     contentSerieName=show,
                     text_bold=True))
        except:
            url = urlparse.urljoin(host, '/pelicula/' +
                                   ficha['perma']) + "###" + ficha['id'] + ";2"
            str = get_status(status, 'movies', ficha['id'])
            if str != "": title += str
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     contentTitle=show,
                     url=url,
                     thumbnail=thumbnail,
                     text_bold=True,
                     infoLabels={'year': '-'}))
    if len(itemlist) == int(limit):
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=">> Página siguiente",
                 url=next_page,
                 text_bold=True))

    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    return itemlist
Exemple #24
0
def get_cl(resp, timeout=20, debug=False, extraPostDelay=15, retry=False, blacklist=True, retryIfTimeout=True, **kwargs):
    blacklist_clear = True
    if 'hideproxy' in resp.url or 'webproxy' in resp.url or kwargs.get('proxies'):
        blacklist_clear = False
        blacklist = False
    
    if timeout < 15: timeout = 20
    if timeout + extraPostDelay > 35: timeout = 20

    domain_full = urlparse.urlparse(resp.url).netloc
    domain = domain_full
    if blacklist and not retry: 
        blacklist_clear = check_blacklist(domain_full)
    
    if blacklist_clear:
        host = config.get_system_platform()[:1]
        
        freequent_data = [domain, 'CF2,0.0.0,0,%s0,NoApp' % host]
        
        check_assistant = alfa_assistant.open_alfa_assistant(getWebViewInfo=True, retry=retry)
        if not isinstance(check_assistant, dict) and retry:
            alfa_assistant.close_alfa_assistant()
            time.sleep(2)
            check_assistant = alfa_assistant.open_alfa_assistant(getWebViewInfo=True, retry=True)
            if not check_assistant:
                time.sleep(10)
                check_assistant = alfa_assistant.get_generic_call('getWebViewInfo', timeout=2, alfa_s=True)
            
        if check_assistant and isinstance(check_assistant, dict):

            if check_assistant.get('assistantLatestVersion') and check_assistant.get('assistantVersion'):
                installed_version = check_assistant['assistantVersion'].split('.')
                available_version = check_assistant['assistantLatestVersion'].split('.')
                newer = False
                for i, ver in enumerate(available_version):
                    if int(ver) > int(installed_version[i]):
                        newer = True
                        break
                    if int(ver) < int(installed_version[i]):
                        break
                if newer:
                    help_window.show_info('cf_2_02', wait=False)

            ua = get_ua(check_assistant)
            
            try:
                vers = int(scrapertools.find_single_match(ua, r"Android\s*(\d+)"))
            except:
                vers = 0

            wvbVersion = check_assistant.get('wvbVersion', '0.0.0').split('.')[0]
            if len(wvbVersion) > 3: wvbVersion = wvbVersion[:2]
            freequent_data[1] = 'CF2,%s,%s,%s%s,' % (check_assistant.get('assistantVersion', '0.0.0'), wvbVersion, host, vers)

            if vers:
                dan = {'User-Agent': ua}
                resp.headers.update(dict(dan))
                ua = None
            else:
                ua = httptools.get_user_agent()

            logger.debug("UserAgent: %s || Android Vrs: %s" % (ua, vers))

            jscode = get_jscode(1, 'KEYCODE_ENTER', 1)

            url_cf = scrapertools.find_single_match(resp.url, '(http.*\:\/\/(?:www\S*.)?\w+\.\w+(?:\.\w+)?)(?:\/)?') + '|cf_clearance'

            data_assistant = alfa_assistant.get_urls_by_page_finished(resp.url, timeout=timeout, getCookies=True, userAgent=ua,
                                                                        disableCache=True, debug=debug, jsCode=jscode,
                                                                        extraPostDelay=extraPostDelay, clearWebCache=True, 
                                                                        removeAllCookies=True, returnWhenCookieNameFound=url_cf,
                                                                        retryIfTimeout=retryIfTimeout
                                                                        )
            logger.debug("data assistant: %s" % data_assistant)

            domain_ = domain
            split_lst = domain.split(".")

            if len(split_lst) > 2:
                domain = domain.replace(split_lst[0], "")
            
            if not domain.startswith('.'):
                domain = "."+domain
            
            get_ua(data_assistant)

            if isinstance(data_assistant, dict) and data_assistant.get("cookies", None):

                logger.debug("Lista cookies: %s" % data_assistant.get("cookies", []))
                for cookie in data_assistant["cookies"]:
                    cookieslist = cookie.get("cookiesList", None)
                    val = scrapertools.find_single_match(cookieslist, 'cf_clearance=([A-z0-9_-]+)')
                    dom = cookie.get("urls", None)
                    logger.debug("dominios: %s" % dom[0])

                    if 'cf_clearance' in cookieslist and val:
                        
                        dict_cookie = {'domain': domain,
                                       'name': 'cf_clearance',
                                       'value': val}
                        if domain_ in dom[0]:
                            httptools.set_cookies(dict_cookie)
                            rin = {'Server': 'Alfa'}

                            resp.headers.update(dict(rin))
                            logger.debug("cf_clearence=%s" % val)
                            
                            if not retry:
                                freequent_data[1] += 'OK'
                            else:
                                freequent_data[1] += 'OK_R'
                            freequency(freequent_data)

                            return resp

                    else:
                        logger.error("No cf_clearance")
                else:
                    freequent_data[1] += 'NO-CFC'
            else:
                freequent_data[1] += 'ERR'
                logger.error("No Cookies o Error en conexión con Alfa Assistant")

            if not retry:
                config.set_setting('cf_assistant_ua', '')
                logger.debug("No se obtuvieron resultados, reintentando...")
                return get_cl(resp, timeout=timeout-5, extraPostDelay=extraPostDelay, \
                            retry=True, blacklist=True, retryIfTimeout=False, **kwargs)
        elif host == 'a':
            help_window.show_info('cf_2_01')
        
        freequency(freequent_data)
        
        if filetools.exists(PATH_BL):
            bl_data = jsontools.load(filetools.read(PATH_BL))
        else:
            bl_data = {}
        bl_data[domain_full] = time.time()
        filetools.write(PATH_BL, jsontools.dump(bl_data))

    msg = 'Detected a Cloudflare version 2 Captcha challenge,\
        This feature is not available in the opensource (free) version.'
    resp.status_code = msg
    
    raise CloudflareChallengeError(msg)
Exemple #25
0
def get_source(url,
               resp,
               timeout=5,
               debug=False,
               extraPostDelay=5,
               retry=False,
               blacklist=True,
               headers=None,
               retryIfTimeout=True,
               cache=False,
               clearWebCache=False,
               mute=True,
               alfa_s=False,
               elapsed=0,
               **kwargs):
    blacklist_clear = True
    data = ''
    source = False
    if not elapsed: elapsed = time.time()
    elapsed_max = 40
    expiration = config.get_setting('cf_assistant_bl_expiration',
                                    default=30) * 60
    expiration_final = 0
    security_error_blackout = (5 * 60) - expiration

    if debug: alfa_s = False

    if not resp:
        resp = {'status_code': 429, 'headers': {}}
        resp = type('HTTPResponse', (), resp)

    if not alfa_s: logger.debug("ERROR de descarga: %s" % resp.status_code)

    opt = kwargs.get('opt', {})

    domain_full = urlparse.urlparse(url).netloc
    domain = domain_full
    pcb = base64.b64decode(
        config.get_setting('proxy_channel_bloqued')).decode('utf-8')
    if 'hideproxy' in url or 'webproxy' in url or 'hidester' in url or '__cpo=' in url  \
                          or httptools.TEST_ON_AIR or domain in pcb:
        blacklist_clear = False
        blacklist = False

    if timeout + extraPostDelay > 35: timeout = 20

    if blacklist and not retry:
        blacklist_clear = check_blacklist(domain_full)

    host = config.get_system_platform()[:1]
    freequent_data = [domain, 'Cha,0.0.0,0,%s0,BlakL' % host]
    if blacklist_clear:
        freequent_data = [domain, 'Cha,0.0.0,0,%s0,App' % host]
        if not retry:
            freequent_data[1] += 'KO'
        else:
            freequent_data[1] += 'KO_R'

        check_assistant = alfa_assistant.open_alfa_assistant(
            getWebViewInfo=True, retry=True, assistantLatestVersion=False)
        if not isinstance(check_assistant, dict) and not retry:
            alfa_assistant.close_alfa_assistant()
            time.sleep(2)
            check_assistant = alfa_assistant.open_alfa_assistant(
                getWebViewInfo=True, retry=True, assistantLatestVersion=False)
            logger.debug("Reintento en acceder al Assistant: %s - %s" \
                         % ('OK' if isinstance(check_assistant, dict) else 'ERROR', time.time() - elapsed))

        if check_assistant and isinstance(check_assistant, dict):

            if check_assistant.get(
                    'assistantLatestVersion') and check_assistant.get(
                        'assistantVersion'):
                installed_version = check_assistant['assistantVersion'].split(
                    '.')
                available_version = check_assistant[
                    'assistantLatestVersion'].split('.')
                newer = False
                for i, ver in enumerate(available_version):
                    if int(ver) > int(installed_version[i]):
                        newer = True
                        break
                    if int(ver) < int(installed_version[i]):
                        break
                if newer:
                    help_window.show_info('cf_2_02', wait=False)

            ua = get_ua(check_assistant)

            try:
                vers = int(
                    scrapertools.find_single_match(ua, r"Android\s*(\d+)"))
            except:
                vers = 0

            wvbVersion = check_assistant.get('wvbVersion',
                                             '0.0.0').split('.')[0]
            if len(wvbVersion) > 3: wvbVersion = wvbVersion[:2]
            freequent_data[1] = 'Cha,%s,%s,%s%s,' % (check_assistant.get(
                'assistantVersion', '0.0.0'), wvbVersion, host, vers)
            if not retry:
                freequent_data[1] += 'Src'
            else:
                freequent_data[1] += 'Src_R'

            if vers:
                dan = {'User-Agent': ua}
                resp.headers.update(dict(dan))
                ua = None
            else:
                ua = httptools.get_user_agent()

            if not alfa_s:
                logger.debug("UserAgent: %s || Android Vrs: %s" % (ua, vers))

            jscode = None

            url_cf = scrapertools.find_single_match(
                url, '(http.*\:\/\/(?:www\S*.)?\w+\.\w+(?:\.\w+)?)(?:\/)?'
            ) + '|cf_clearance'

            data_assistant = alfa_assistant.get_source_by_page_finished(
                url,
                timeout=timeout,
                getCookies=True,
                userAgent=ua,
                disableCache=cache,
                debug=debug,
                jsCode=jscode,
                extraPostDelay=extraPostDelay,
                clearWebCache=clearWebCache,
                removeAllCookies=True,
                returnWhenCookieNameFound=url_cf,
                retryIfTimeout=retryIfTimeout,
                useAdvancedWebView=True,
                headers=headers,
                mute=mute,
                alfa_s=alfa_s)
            if not alfa_s: logger.debug("data assistant: %s" % data_assistant)

            if isinstance(data_assistant, dict) and data_assistant.get('htmlSources', []) \
                                                and data_assistant['htmlSources'][0].get('source', ''):
                try:
                    data = base64.b64decode(data_assistant['htmlSources'][0]
                                            ['source']).decode('utf-8')
                    source = True
                except:
                    pass

                if source and 'accessing a cross-origin frame' in data:
                    source = False
                    retry = True
                    expiration_final = security_error_blackout
                    freequent_data[1] = 'Cha,%s,%s,%s%s,' % (
                        check_assistant.get('assistantVersion',
                                            '0.0.0'), wvbVersion, host, vers)
                    freequent_data[1] += 'KO_SecE'
                    logger.error('Error SEGURIDAD: %s - %s' %
                                 (expiration_final, data[:100]))

                if source:
                    freequent_data[1] = 'Cha,%s,%s,%s%s,' % (
                        check_assistant.get('assistantVersion',
                                            '0.0.0'), wvbVersion, host, vers)
                    if not retry:
                        freequent_data[1] += 'OK'
                    else:
                        freequent_data[1] += 'OK_R'

            if not source and not retry:
                config.set_setting('cf_assistant_ua', '')
                logger.debug("No se obtuvieron resultados, reintentando...")
                timeout = -1 if timeout < 0 else timeout * 2
                extraPostDelay = -1 if extraPostDelay < 0 else extraPostDelay * 2
                return get_source(url,
                                  resp,
                                  timeout=timeout,
                                  debug=debug,
                                  extraPostDelay=extraPostDelay,
                                  retry=True,
                                  blacklist=blacklist,
                                  retryIfTimeout=retryIfTimeout,
                                  cache=cache,
                                  clearWebCache=clearWebCache,
                                  alfa_s=False,
                                  headers=headers,
                                  mute=mute,
                                  elapsed=elapsed,
                                  **kwargs)

            domain_ = domain
            split_lst = domain.split(".")

            if len(split_lst) > 2:
                domain = domain.replace(split_lst[0], "")

            if not domain.startswith('.'):
                domain = "." + domain

            get_ua(data_assistant)

            if isinstance(data_assistant, dict) and data_assistant.get(
                    "cookies", None):

                if not alfa_s:
                    logger.debug("Lista cookies: %s" %
                                 data_assistant.get("cookies", []))
                for cookie in data_assistant["cookies"]:
                    cookieslist = cookie.get("cookiesList", None)
                    val = scrapertools.find_single_match(
                        cookieslist, 'cf_clearance=([A-z0-9_\-\.]+)')
                    #val = scrapertools.find_single_match(cookieslist, 'cf_clearance=([^;]+)')
                    dom = cookie.get("urls", None)
                    if not alfa_s: logger.debug("dominios: %s" % dom[0])

                    if 'cf_clearance' in cookieslist and val:

                        dict_cookie = {
                            'domain': domain,
                            'name': 'cf_clearance',
                            'value': val
                        }
                        if domain_ in dom[0]:
                            httptools.set_cookies(dict_cookie)
                            rin = {'Server': 'Alfa'}

                            resp.headers.update(dict(rin))
                            freequent_data[1] += 'C'
                            if not alfa_s:
                                logger.debug("cf_clearence=%s" % val)

        elif host == 'a':
            help_window.show_info('cf_2_01')

    freequency(freequent_data)

    if blacklist_clear and (not source or time.time() - elapsed > elapsed_max):
        if filetools.exists(PATH_BL):
            bl_data = jsontools.load(filetools.read(PATH_BL))
        else:
            bl_data = {}
        if time.time() - elapsed > elapsed_max:
            bl_data[domain_full] = time.time() + elapsed_max * 10 * 60
        else:
            bl_data[domain_full] = time.time() + expiration_final
        if not debug and not httptools.TEST_ON_AIR:
            filetools.write(PATH_BL, jsontools.dump(bl_data))
    if not source:
        resp.status_code = 429
    else:
        resp.status_code = 200

    return data, resp
Exemple #26
0
def play(
    item
):  #Permite preparar la descarga de los .torrents y subtítulos externos
    logger.info()
    itemlist = []
    headers = []
    from core import downloadtools
    from core import ziptools
    from core import filetools

    #buscamos la url del .torrent
    patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?">'
    patron += '\s*Torrent:<\/td><td class="(?:[^"]+)?">\s*<img src="(?:[^"]+)?"\s*'
    patron += 'alt="(?:[^"]+)?"\s*border="(?:[^"]+)?"\s*\/>\s*<a onmouseover="'
    patron += '(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?" href="([^"]+)".*?<\/a>'

    data, response, item, itemlist = generictools.downloadpage(
        item.url,
        timeout=timeout,
        patron=patron,
        item=item,
        itemlist=[],
        quote_rep=False,
        check_blocked_IP=True)
    if not data or response.code in [
            999, 99
    ]:  # Si ERROR o lista de errores lo reintentamos con otro Host
        return itemlist  # ... Salimos

    item.url = urlparse.urljoin(host,
                                scrapertools.find_single_match(data, patron))

    #buscamos subtítulos en español
    patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*>\s*Subs.*?<\/td><td class="(?:[^"]+)?"\s*>(.*?)(?:<br\/>)?<\/td><\/tr>'
    data_subt = scrapertools.find_single_match(data, patron)
    if data_subt:
        patron = '<a href="([^"]+)"\s*onmouseover="return overlib\('
        patron += "'Download Spanish subtitles'"
        patron += '\)"\s*onmouseout="(?:[^"]+)?"\s*><img src="(?:[^"]+)?"\s*><\/a>'
        subt = scrapertools.find_single_match(data_subt, patron)
        if subt:
            item.subtitle = urlparse.urljoin(host, subt)

    if item.subtitle:  #Si hay urls de sub-títulos, se descargan
        from core import httptools
        headers.append(["User-Agent", httptools.get_user_agent()
                        ])  #Se busca el User-Agent por defecto
        videolibrary_path = config.get_videolibrary_path(
        )  #Calculamos el path absoluto a partir de la Videoteca
        if videolibrary_path.lower().startswith(
                "smb://"):  #Si es una conexión SMB, usamos userdata local
            videolibrary_path = config.get_data_path(
            )  #Calculamos el path absoluto a partir de Userdata
        videolibrary_path = filetools.join(videolibrary_path, "subtitles")
        #Primero se borra la carpeta de subtitulos para limpiar y luego se crea
        if filetools.exists(videolibrary_path):
            filetools.rmtree(videolibrary_path)
            time.sleep(1)
        if not filetools.exists(videolibrary_path):
            filetools.mkdir(videolibrary_path)
        subtitle_name = 'Rarbg-ES_SUBT.zip'  #Nombre del archivo de sub-títulos
        subtitle_folder_path = filetools.join(videolibrary_path,
                                              subtitle_name)  #Path de descarga
        ret = downloadtools.downloadfile(item.subtitle,
                                         subtitle_folder_path,
                                         headers=headers,
                                         continuar=True,
                                         silent=True)

        if filetools.exists(subtitle_folder_path):
            # Descomprimir zip dentro del addon
            # ---------------------------------
            try:
                unzipper = ziptools.ziptools()
                unzipper.extract(subtitle_folder_path, videolibrary_path)
            except:
                import xbmc
                xbmc.executebuiltin('Extract("%s", "%s")' %
                                    (subtitle_folder_path, videolibrary_path))
                time.sleep(1)

            # Borrar el zip descargado
            # ------------------------
            filetools.remove(subtitle_folder_path)

            #Tomo el primer archivo de subtítulos como valor por defecto
            for raiz, subcarpetas, ficheros in filetools.walk(
                    videolibrary_path):
                for f in ficheros:
                    if f.endswith(".srt"):
                        #f_es = 'rarbg_subtitle.spa.srt'
                        f_es = scrapertools.find_single_match(
                            item.url,
                            '&f=(.*?).torrent$').replace('.', ' ').replace(
                                '-', ' ').lower() + '.spa.srt'
                        if not f_es:
                            f_es = item.infoLabels['originaltitle'] + '.spa.srt'
                            f_es = f_es.replace(':', '').lower()
                        filetools.rename(
                            filetools.join(videolibrary_path, f),
                            filetools.join(videolibrary_path, f_es))
                        item.subtitle = filetools.join(
                            videolibrary_path, f_es)  #Archivo de subtitulos
                        break
                break

    itemlist.append(item.clone())  #Reproducción normal

    return itemlist
Exemple #27
0
def get_cl(resp, timeout=20, debug=False, extraPostDelay=15, retry=True):
    check_assistant = alfa_assistant.open_alfa_assistant(getWebViewInfo=True)
    if check_assistant and isinstance(check_assistant, dict):
        
        ua = get_ua(check_assistant)
        
        try:
            vers = int(scrapertools.find_single_match(ua, r"Android\s*(\d+)"))
        except:
            vers = 0

        if vers:
            dan = {'User-Agent': ua}
            resp.headers.update(dict(dan))
            ua = None
        else:
            ua = httptools.get_user_agent()

        logger.debug("UserAgent: %s || Android Vrs: %s" % (ua, vers))
        
        jscode = get_jscode(1, 'KEYCODE_ENTER', 1)

        data_assistant = alfa_assistant.get_source_by_page_finished(resp.url, timeout=timeout, getCookies=True, userAgent=ua,
                                                                    disableCache=True, debug=debug, jsCode=jscode,
                                                                    extraPostDelay=extraPostDelay, clearWebCache=True, 
                                                                    removeAllCookies=True
                                                                    )
        
        logger.debug("data assistant: %s" % data_assistant)
        
        domain = urlparse(resp.url).netloc
        domain_ = domain
        split_lst = domain.split(".")

        if len(split_lst) > 2:
            domain = domain.replace(split_lst[0], "")
        
        if not domain.startswith('.'):
            domain = "."+domain
        
        get_ua(data_assistant)

        if isinstance(data_assistant, dict) and data_assistant.get("cookies", None):
            
            for cookie in data_assistant["cookies"]:
                cookieslist = cookie.get("cookiesList", None)
                val = scrapertools.find_single_match(cookieslist, 'cf_clearance=([A-z0-9_-]+)')
                dom = cookie.get("urls", None)
                #logger.debug("dominios: %s" % dom[0])
                #logger.debug("Lista cookies: %s" % cookieslist)

                if 'cf_clearance' in cookieslist and val:
                    
                    dict_cookie = {'domain': domain,
                                   'name': 'cf_clearance',
                                   'value': val}
                    if domain_ in dom[0]:
                        httptools.set_cookies(dict_cookie)
                        rin = {'Server': 'Alfa'}

                        resp.headers.update(dict(rin))
                        #logger.debug("cf_clearence=%s" %s val)

                        return resp
                    else:
                        logger.error("No cf_clearance for %s" % domain_)

                else: 
                    logger.error("No cf_clearance")
        else:
            logger.error("No Cookies o Error en conexión con Alfa Assistant")

        if retry:
            config.set_setting('cf_assistant_ua', '')
            logger.debug("No se obtuvieron resultados, reintentando...")
            return get_cl(resp, timeout=timeout-5, extraPostDelay=extraPostDelay, retry=False,
                         )



    msg = 'Detected a Cloudflare version 2 Captcha challenge,\
        This feature is not available in the opensource (free) version.'
    
    resp.status_code = msg

    logger.error('Detected a Cloudflare version 2 Hcaptcha challenge')
    
    return False