コード例 #1
0
ファイル: ver-pelis.py プロジェクト: CYBERxNUKE/xbmc-addon
def play(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'\\','',data)
    item.url = scrapertools.find_single_match(data,'src="([^"]+)"')
    data = httptools.downloadpage(item.url).data
    
    if item.extra!="yes":
      patron = '"label":(.*?),.*?"type":"(.*?)",.*?"file":"(.*?)"'
      matches = re.compile(patron,re.DOTALL).findall(data)
      if not matches:
       patron = '"label":(.*?),.*?"file":"(.*?)"'
       matches = re.compile(patron,re.DOTALL).findall(data)
      
      for dato_a,type, dato_b in matches:
        if 'http' in dato_a:
          url = dato_a
          calidad = dato_b
        else:
          url = dato_b
          calidad = dato_a
        url= url.replace('\\','')
        type=type.replace('\\','')
        itemlist.append(Item(channel=item.channel ,url=url,action="play",title=item.fulltitle+" ("+dato_a+")", folder=False) )    
    else:       
      
       url = scrapertools.find_single_match(data,'window.location="([^"]+)"') 
    
       videolist = servertools.find_video_items(data=url)
       for video in videolist:
        itemlist.append(Item(channel=item.channel ,url=video.url, server=video.server,title="[COLOR floralwhite][B]"+video.server+"[/B][/COLOR]",action="play", folder=False) )
    
    return itemlist
コード例 #2
0
ファイル: pelisplus.py プロジェクト: CYBERxNUKE/xbmc-addon
def lista(item):
    logger.info()
    
    itemlist = []

    if 'series/' in item.extra:
        accion = 'temporadas'
        tipo = 'tvshow'
    else:
        accion = 'findvideos'
        tipo = 'movie'

    data = httptools.downloadpage(item.url).data
    
    if item.title != 'Buscar':
        patron ='<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
        actual = scrapertools.find_single_match(data,'<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" class="page bicon last"><<\/a>')
    else:
        patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon online-play"><\/i>.*?\n<h2 class="title title-.*?">.*?\n<a href="([^"]+)" title="([^"]+)">.*?>'
        actual = ''
    
    matches = re.compile(patron,re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        url = scrapedurl
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        thumbnail = scrapedthumbnail
        
        filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "")
        filtro_list = {"poster_path": filtro_thumb} #Nombre del campo a filtrar y valor en los resultados de la api de tmdb
        filtro_list = filtro_list.items()

        if item.title != 'Buscar':
          itemlist.append(Item(channel=item.channel,contentType=tipo, action=accion, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
                              fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list},
                               contentTitle =scrapedtitle, contentSerieName= scrapedtitle, extra = item.extra))
        else:
          item.extra = item.extra.rstrip('s/')
          if item.extra in url:
            itemlist.append(Item(channel=item.channel,contentType=tipo, action=accion, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
            fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, 
            contentTitle =scrapedtitle, contentSerieName= scrapedtitle, extra = item.extra))
        
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)

    # Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
    for item in itemlist:
      if item.infoLabels['plot']=='':
        data = httptools.downloadpage(item.url).data
        item.fanart = scrapertools.find_single_match(data,'meta property="og:image" content="([^"]+)" \/>')
        item.plot =scrapertools.find_single_match(data,'<span>Sinopsis:<\/span>.([^<]+)<span class="text-detail-hide"><\/span>.<\/p>')

#Paginacion
    if item.title != 'Buscar' and actual !='':
       if itemlist !=[]:
           next_page = str(int(actual)+1)
           next_page_url = host+item.extra+'pag-'+next_page
           import inspect
           itemlist.append(Item(channel = item.channel, action = "lista", title = 'Siguiente >>>', url = next_page_url, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png',extra=item.extra))
    return itemlist
コード例 #3
0
ファイル: copiapop.py プロジェクト: CYBERxNUKE/xbmc-addon
def login(pagina):
    logger.info()

    try:
        user = config.get_setting("%suser" % pagina.split(".")[0], "copiapop")
        password = config.get_setting("%spassword" % pagina.split(".")[0], "copiapop")
        if pagina == "copiapop.com":
            if user == "" and password == "":
                return False, "Para ver los enlaces de copiapop es necesario registrarse en copiapop.com"
            elif user == "" or password == "":
                return False, "Copiapop: Usuario o contraseña en blanco. Revisa tus credenciales"
        else:
            if user == "" or password == "":
                return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales"

        data = httptools.downloadpage("http://%s" % pagina).data
        if re.search(r'(?i)%s' % user, data):
            return True, ""

        token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
        post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
        headers = {'X-Requested-With': 'XMLHttpRequest'}
        url_log = "http://%s/action/Account/Login" % pagina
        data = httptools.downloadpage(url_log, post, headers).data
        if "redirectUrl" in data:
            logger.info("Login correcto")
            return True, ""
        else:
            logger.info("Error en el login")
            return False, "Nombre de usuario no válido. Comprueba tus credenciales"
    except:
        import traceback
        logger.info(traceback.format_exc())
        return False, "Error durante el login. Comprueba tus credenciales"
コード例 #4
0
ファイル: dailymotion.py プロジェクト: CYBERxNUKE/xbmc-addon
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []

    response = httptools.downloadpage(page_url, cookies=False)
    cookie = {'Cookie': response.headers["set-cookie"]}
    data = response.data.replace("\\", "")

    '''
    "240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
    '''

    subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
    qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
    for calidad, urls in qualities:
        if calidad == "auto":
            continue
        patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
        matches = scrapertools.find_multiple_matches(urls, patron)
        for stream_type, stream_url in matches:
            stream_type = stream_type.replace('x-mpegURL', 'm3u8')
            if stream_type == "mp4":
                stream_url = httptools.downloadpage(stream_url, headers=cookie, only_headers=True,
                                                    follow_redirects=False).headers.get("location", stream_url)
            else:
                data_m3u8 = httptools.downloadpage(stream_url).data
                stream_url = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
            video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #5
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("(page_url='%s')" % page_url)

    video_urls = []
    data = httptools.downloadpage(page_url).data

    mediaurls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
    if not mediaurls:
        id_file = page_url.rsplit("/",1)[1]
        key = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*"([^"]+)"')
        if not key:
            varkey = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*([^;]+);')
            key = scrapertools.find_single_match(data, varkey+'\s*=\s*"([^"]+)"')

        # Primera url, se extrae una url erronea necesaria para sacar el enlace
        url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=0&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (key, id_file)
        data = httptools.downloadpage(url).data
        
        url_error = scrapertools.find_single_match(data, 'url=([^&]+)&')
        url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=1&errorUrl=%s&errorCode=404&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (url_error, key, id_file)
        data = httptools.downloadpage(url).data
        mediaurls = scrapertools.find_multiple_matches(data, 'url=([^&]+)&')

    for i, mediaurl in enumerate(mediaurls):
        title = scrapertools.get_filename_from_url(mediaurl)[-4:]+" Mirror %s [vidgg]" % str(i+1)
        mediaurl += "|User-Agent=Mozilla/5.0"
        video_urls.append( [title, mediaurl])

    for video_url in video_urls:
        logger.info(" %s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #6
0
ファイル: qserie.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()
    itemlist=[]
    data = httptools.downloadpage(item.url).data
    
    anterior = scrapertools.find_single_match(data,'<a class="left" href="([^"]+)" title="Cap.tulo Anterior"></a>')
    siguiente = scrapertools.find_single_match(data,'<a class="right" href="([^"]+)" title="Cap.tulo Siguiente"></a>')
    titulo = scrapertools.find_single_match(data,'<h1 class="tithd bold fs18px lnht30px ico_b pdtop10px">([^<]+)</h1> ')
    existe = scrapertools.find_single_match(data,'<center>La pel.cula que quieres ver no existe.</center>')
    
    from core import servertools
    itemlist.extend(servertools.find_video_items(data=data))
    for videoitem in itemlist:
       if 'youtube' in videoitem.url:
          itemlist.remove(videoitem)
    for videoitem in itemlist:
       videoitem.channel=item.channel
       videoitem.action="play"
       videoitem.folder=False
       videoitem.fanart =item.fanart
       videoitem.title = titulo+" "+videoitem.server
    if item.extra2 != 'todos':
       data = httptools.downloadpage(anterior).data
       existe = scrapertools.find_single_match(data,'<center>La pel.cula que quieres ver no existe.</center>')
       if not existe:
           itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Anterior' , url=anterior, thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png', folder =True ))
    
       data = httptools.downloadpage(siguiente).data
       existe = scrapertools.find_single_match(data,'<center>La pel.cula que quieres ver no existe.</center>')
       if  not existe:
           itemlist.append( Item(channel=item.channel, action="findvideos" , title='Capitulo Siguiente' , url=siguiente, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png', folder =True ))
        
    return itemlist    
コード例 #7
0
def lista (item):
    logger.info ()
	
    itemlist = []
    if item.extra == 'buscar':
        data = httptools.downloadpage(host+'/index.php?'+'categoria=0&keysrc='+item.text).data
    else:
        data = httptools.downloadpage(item.url).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    if item.extra == 'masvistas':
        patron ='<div class=bloquecenmarcado><a title=.*? target=_blank href=(.*?) class=game><img src=(.*?) alt=(.*?) title= class=bloquecenimg \/>.*?<strong>(.*?)<\/strong>'
    else:
        patron = '<div class=fotonoticia><a.*?target=_blank href=(.*?)><img src=(.*?) alt=(.*?) \/>.*?class=textonoticia>.*?\/><br \/>(.*?)<\/div>'
    matches = re.compile(patron,re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
        url = host+scrapedurl
        thumbnail = host+scrapedthumbnail
        plot= scrapertools.htmlclean(scrapedplot)
        plot = plot.decode('iso8859-1').encode('utf-8')
        contentTitle = scrapedtitle
        title = contentTitle
        title = title.decode('iso8859-1').encode('utf-8')
        fanart =''
        itemlist.append( Item(channel=item.channel, action='findvideos' , title=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, contentTitle = contentTitle))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)       
 #Paginacion

    if itemlist !=[]:
        actual_page_url = item.url
        next_page = scrapertools.find_single_match(data,'class=current>.*?<\/span><a href=(.*?)>.*?<\/a>')
        import inspect
        if next_page !='' and item.extra != 'masvistas':
           itemlist.append(Item(channel = item.channel, action = "lista", title = 'Siguiente >>>', url = host+next_page, thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
    return itemlist
コード例 #8
0
def login(check_login=True):
    logger.info()

    try:
        user = config.get_setting("verseriesynovelasuser", "verseriesynovelas")
        password = config.get_setting("verseriesynovelaspassword", "verseriesynovelas")
        if user == "" and password == "":
            return False, "Para ver los enlaces de este canal es necesario registrarse en www.verseriesynovelas.tv"
        elif user == "" or password == "":
            return False, "Usuario o contraseña en blanco. Revisa tus credenciales"
        if check_login:
            data = httptools.downloadpage("http://www.verseriesynovelas.tv/").data
            if user in data:
                return True, ""

        post = "log=%s&pwd=%s&redirect_to=http://www.verseriesynovelas.tv/wp-admin/&action=login" % (user, password)
        data = httptools.downloadpage("http://www.verseriesynovelas.tv/iniciar-sesion", post=post).data
        if "La contraseña que has introducido" in data:
            logger.info("pelisalacarta.channels.verseriesynovelas Error en el login")
            return False, "Contraseña errónea. Comprueba tus credenciales"
        elif "Nombre de usuario no válido" in data:
            logger.info("pelisalacarta.channels.verseriesynovelas Error en el login")
            return False, "Nombre de usuario no válido. Comprueba tus credenciales"            
        else:
            logger.info("pelisalacarta.channels.verseriesynovelas Login correcto")
            return True, ""
    except:
        import traceback
        logger.info(traceback.format_exc())
        return False, "Error durante el login. Comprueba tus credenciales"
コード例 #9
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % (page_url))

    video_urls = []
    ## Carga la página para coger las cookies
    data = httptools.downloadpage(page_url).data

    ## Nueva url
    url = page_url.replace("embed/", "").replace(".html", ".json")
    ## Carga los datos y los headers
    response = httptools.downloadpage(url)
    data = jsontools.load_json(response.data)

    ## La cookie video_key necesaria para poder visonar el video
    cookie_video_key = scrapertools.find_single_match(response.headers["set-cookie"], '(video_key=[a-f0-9]+)')

    ## Formar url del video + cookie video_key
    for videos in data['videos']:
        media_url = videos['url'] + "|Referer=https://my1.imgsmail.ru/r/video2/uvpv3.swf?75&Cookie=" + cookie_video_key
        if not media_url.startswith("http"):
            media_url = "http:" + media_url
        quality = " %s" % videos['key']
        video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + quality + " [mail.ru]", media_url])
    try:
        video_urls.sort(key=lambda video_urls:int(video_urls[0].rsplit(" ",2)[1][:-1]))
    except:
        pass

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #10
0
ファイル: copiapop.py プロジェクト: CYBERxNUKE/xbmc-addon
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % page_url)

    video_urls = []
    data = httptools.downloadpage(page_url).data
    host = "http://copiapop.com"
    host_string = "copiapop"
    if "diskokosmiko.mx" in page_url:
        host = "http://diskokosmiko.mx"
        host_string = "diskokosmiko"

    url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
    if url:
        url = host + url
        fileid = url.rsplit("f=", 1)[1]
        token = scrapertools.find_single_match(data, '<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
        post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
        headers = {'X-Requested-With': 'XMLHttpRequest'}
        data = httptools.downloadpage(url, post, headers).data
        data = jsontools.load_json(data)
        mediaurl = data.get("DownloadUrl")
        extension = data.get("Extension")
    
        video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])

    for video_url in video_urls:
        logger.info(" %s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #11
0
def play(item):
    logger.info()
    itemlist = []
    
    location = ""
    i = 0
    while not location:
        try:
            data = httptools.downloadpage(item.url).data
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            if not url_redirect:
                import StringIO
                compressedstream = StringIO.StringIO(data)
                import gzip
                gzipper = gzip.GzipFile(fileobj=compressedstream)
                data = gzipper.read()
                gzipper.close()
                url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            location = httptools.downloadpage(url_redirect, follow_redirects=False).headers["location"]
        except:
            pass
        i += 1
        if i == 6:
            return itemlist

    enlaces = servertools.findvideosbyserver(location, item.server)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
コード例 #12
0
ファイル: seriesblanco.py プロジェクト: CYBERxNUKE/xbmc-addon
def play(item):
    logger.info("{0} - {1} = {2}".format(item.show, item.title, item.url))

    if item.url.startswith(HOST):
        data = httptools.downloadpage(item.url).data

        ajaxLink = re.findall("loadEnlace\((\d+),(\d+),(\d+),(\d+)\)", data)
        ajaxData = ""
        for serie, temp, cap, linkID in ajaxLink:
            logger.debug("Ajax link request: Sherie = {0} - Temp = {1} - Cap = {2} - Link = {3}".format(serie, temp, cap, linkID))
            ajaxData += httptools.downloadpage(HOST + '/ajax/load_enlace.php?serie=' + serie + '&temp=' + temp + '&cap=' + cap + '&id=' + linkID).data

        if ajaxData:
            data = ajaxData

        patron = "onclick='window.open\(\"([^\"]+)\"\);'/>"
        url = scrapertools.find_single_match(data, patron)
    else:
        url = item.url

    itemlist = servertools.find_video_items(data=url)

    titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
    if titulo:
        titulo += " [{language}]".format(language=item.language)

    for videoitem in itemlist:
        if titulo:
            videoitem.title = titulo
        else:
            videoitem.title = item.title
        videoitem.channel = item.channel

    return itemlist
コード例 #13
0
ファイル: animeflv_ru.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()

    itemlist = []

    _id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
    post = "embed_id=%s" % _id
    data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
    dict_data = jsontools.load_json(data)

    headers = dict()
    headers["Referer"] = item.url
    data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
    dict_data = jsontools.load_json(data)

    list_videos = dict_data["playlist"][0]["sources"]

    if isinstance(list_videos, list):
        for video in list_videos:
            itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
                                 title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
                                 thumbnail=item.thumbnail))

    else:
        for video in list_videos.values():
            itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
                                 title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
                                 thumbnail=item.thumbnail))

    return itemlist
コード例 #14
0
ファイル: estrenosgo.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()
    itemlist = []
    list_opciones = []
    IDIOMAS = {"banderita1": "Español", "banderita2": "VOSE", "banderita3": "Latino"}

    url = "http://estrenosli.org/ver-online-" + item.url

    data = httptools.downloadpage(url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

    patron = '<div class="content"><a href="([^"]+).*?'
    patron += '<div class="content_mini"><span class="([^"]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, banderita in matches:
        idioma = ""
        if banderita in IDIOMAS:
            idioma = " [%s]" % IDIOMAS[banderita]

        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

        if item.extra == 'multi-episodie':
            patron = '<div class="linksDescarga"><span class="titulo">Video Online:([^<]+).*?<a href="([^"]+)'
            matches = re.compile(patron, re.DOTALL).findall(data)
            for capitulo, url in matches:
                s = servertools.findvideos(url, skip=True)
                if s:
                    itemlist.append(item.clone(url=s[0][1], action="play", folder=False, server=s[0][2],
                                           title="Ver %s en %s%s" % (capitulo.strip(), s[0][2].capitalize(), idioma),
                                           thumbnail2=item.thumbnail,
                                           thumbnail="http://media.tvalacarta.info/servers/server_" + s[0][2] + ".png"))
        else:
            for s in servertools.findvideos(data):
                itemlist.append(item.clone(url=s[1], action="play", folder=False, server=s[2],
                                       title="Ver en %s%s" % (s[2].capitalize(), idioma),
                                       thumbnail2=item.thumbnail,
                                       thumbnail="http://media.tvalacarta.info/servers/server_" + s[2] + ".png"))


    # Insertar items "Buscar trailer" y "Añadir a la biblioteca"
    if itemlist and item.extra == "movie":
        if item.contentQuality:
            title = "%s [%s]" % (item.contentTitle, item.contentQuality)
        else:
            title = item.contentTitle

        itemlist.insert(0, item.clone(channel = "trailertools", action="buscartrailer",
                                      text_color=color3, title=title, viewmode="list"))

        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
                                 action="add_pelicula_to_library", url=item.url, text_color="green",
                                 contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))


    return itemlist
コード例 #15
0
ファイル: allvid.py プロジェクト: Hernanarce/pelisalacarta
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)

    data = httptools.downloadpage(page_url).data
    redirect_url = scrapertools.find_single_match(data, '<iframe src="([^"]+)')
    data = httptools.downloadpage(redirect_url).data    
    if "File was deleted" in data or "Not Found" in data or "video is no longer available" in data:
        return False, "[Allvid] El archivo no existe o ha sido borrado"

    return True, ""
コード例 #16
0
def get_video_url(page_url, premium = False, user="", password="", video_password=""):
    logger.info("url="+page_url)

    video_urls = []
    
    if "crunchyroll.com" in page_url:
        media_id = page_url.rsplit("-", 1)[1]
    else:
        media_id = scrapertools.find_single_match(page_url, 'media_id=(\d+)')

    url = "http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s" \
          "&video_format=0&video_quality=0&auto_play=0&aff=af-12299-plwa" % media_id
    post = "current_page=%s" % page_url
    data = httptools.downloadpage(url, post, headers=GLOBAL_HEADER, replace_headers=True).data
    

    if "<msg>Media not available</msg>" in data:
        data = httptools.downloadpage(proxy+url, post, headers=GLOBAL_HEADER, replace_headers=True).data

    media_url = scrapertools.find_single_match(data, '<file>(.*?)</file>').replace("&amp;", "&")
    if not media_url:
        return video_urls
    quality = scrapertools.find_single_match(data, '<height>(.*?)</height>')
    filename = scrapertools.get_filename_from_url(media_url)[-4:]
    
    try:
        from Crypto.Cipher import AES
        idiomas = ['Español \(España\)', 'Español\]', 'English', 'Italiano', 'Français', 'Português', 'Deutsch']
        index_sub = int(config.get_setting("crunchyrollsub"))
        idioma_sub = idiomas[index_sub]
        link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[%s" % idioma_sub)
        if not link_sub and index_sub == 0:
            link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español\]")
        elif not link_sub and index_sub == 1:
            link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[Español \(España\)")

        if not link_sub:
            link_sub = scrapertools.find_single_match(data, "link='([^']+)' title='\[English")
        data_sub = httptools.downloadpage(link_sub.replace("&amp;", "&"), headers=GLOBAL_HEADER, replace_headers=True).data

        id_sub = scrapertools.find_single_match(data_sub, "subtitle id='([^']+)'")
        iv = scrapertools.find_single_match(data_sub, '<iv>(.*?)</iv>')
        data_sub = scrapertools.find_single_match(data_sub, '<data>(.*?)</data>')
        file_sub = decrypt_subs(iv, data_sub, id_sub)
    except:
        import traceback
        logger.info(traceback.format_exc())
        file_sub = ""

    video_urls.append(["%s  %sp [crunchyroll]" % (filename, quality), media_url, 0, file_sub])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0],video_url[1]))

    return video_urls
コード例 #17
0
ファイル: cinefox.py プロジェクト: CYBERxNUKE/xbmc-addon
def episodios(item):
    logger.info()
    itemlist = []

    if item.extra == "ultimos":
        data = httptools.downloadpage(item.url).data
        item.url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
        item.url += "/episodios"

    data = httptools.downloadpage(item.url).data
    data_season = data[:]

    if "episodios" in item.extra or not __menu_info__ or item.path:
        action = "findvideos"
    else:
        action = "menu_info_episode"

    seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
    for i, url in enumerate(seasons):
        if i != 0:
            data_season = httptools.downloadpage(url, add_referer=True).data
        patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
        matches = scrapertools.find_multiple_matches(data_season, patron)
        for scrapedurl, episode, scrapedtitle in matches:
            new_item = item.clone(action=action, url=scrapedurl, text_color=color2, contentType="episode")
            new_item.contentSeason = episode.split("x")[0]
            new_item.contentEpisodeNumber = episode.split("x")[1]
            
            new_item.title = episode + " - " + scrapedtitle
            new_item.extra = "episode"
            if "episodios" in item.extra or item.path:
                new_item.extra = "episode|"
            itemlist.append(new_item)

    if "episodios" not in item.extra and not item.path:
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    itemlist.reverse()
    if "episodios" not in item.extra and not item.path:
        id = scrapertools.find_single_match(item.url, '/(\d+)/')
        data_trailer = httptools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id).data
        item.infoLabels["trailer"] = jsontools.load_json(data_trailer)["video"]["url"]
        itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                       text_color="magenta"))
        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, action="add_serie_to_library", text_color=color5,
                                 title="Añadir serie a la biblioteca", show=item.show, thumbnail=item.thumbnail,
                                 url=item.url, fulltitle=item.fulltitle, fanart=item.fanart, extra="episodios###episodios",
                                 contentTitle=item.fulltitle))

    return itemlist
コード例 #18
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    if item.extra == "newest" and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = httptools.downloadpage(item.url).data
    if "valida el captcha" in data:
        logueado, error = login(check_login=False)
        data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

    bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>')
    for match in bloque:
        patron = 'data-th="Calidad">(.*?)<.*?' \
                 '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \
                 '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/link/enlaces.php.*?)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for quality, server, url in matches:
            if server == "streamin":
                server = "streaminto"
            if server== "waaw":
                server = "netutv"
            if server == "ul":
                server = "uploadedto"
            try:
                servers_module = __import__("servers."+server)
                title = "Ver vídeo en "+server+"  ["+quality+"]"
                if "Español.png" in match:
                    title += " [CAST]"
                if "VOS.png" in match:
                    title += " [VOSE]"
                if "Latino.png" in match:
                    title += " [LAT]"
                if "VO.png" in match:
                    title += " [V.O]"
                itemlist.append(item.clone(action="play", title=title, url=url, server=server))
            except:
                pass

    if not itemlist: 
        itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace"))
    if item.extra != "episodios":
        url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"')
        if url_lista != "":
            itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista,
                                       text_color="red", context=""))

    return itemlist
コード例 #19
0
ファイル: pelismagnet.py プロジェクト: CYBERxNUKE/xbmc-addon
def episodios(item):
    logger.info()
    itemlist = []

    response = httptools.downloadpage("https://kproxy.com/")
    url = "https://kproxy.com/doproxy.jsp"
    post = "page=%s&x=34&y=14" % urllib.quote(item.url)
    response = httptools.downloadpage(url, post, follow_redirects=False).data
    url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
    data = httptools.downloadpage(url).data

    data = jsontools.load_json(data)
    for i in data.get("temporadas", []):

        titulo = "{temporada} ({total} Episodios)".format(temporada=i.get("nomtemporada", ""),
                                                          total=len(i.get("capituls", "0")))
        itemlist.append(Item(channel=item.channel, action="episodios", title=titulo, url=item.url,
                             server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, plot=data.get("info", ""),
                             folder=False))

        for j in i.get("capituls", []):

            numero = j.get("infocapitul", "")
            if not numero:
                numero = "{temp}x{cap}".format(temp=i.get("numerotemporada", ""), cap=j.get("numerocapitul", ""))

            titulo = j.get("nomcapitul", "")
            if not titulo:
                titulo = "Capítulo {num}".format(num=j.get("numerocapitul", ""))

            calidad = ""
            if j.get("links", {}).get("calitat", ""):
                calidad = " [{calidad}]".format(calidad=j.get("links", {}).get("calitat", ""))

            title = "     {numero} {titulo}{calidad}".format(numero=numero, titulo=titulo, calidad=calidad)

            if j.get("links", {}).get("magnet", ""):
                url = j.get("links", {}).get("magnet", "")
            else:
                return [Item(channel=item.channel, title='No hay enlace magnet disponible para este capitulo')]

            plot = i.get("overviewcapitul", "")
            if plot is None:
                plot = ""

            infoLabels = item.infoLabels
            if plot:
                infoLabels["plot"] = plot
            infoLabels["season"] = i.get("numerotemporada")
            infoLabels["episode"] = j.get("numerocapitul")
            itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, server="torrent", infoLabels=infoLabels,
                                 thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, contentTitle=item.contentTitle,
                                 contentSeason=i.get("numerotemporada"), contentEpisodeNumber=j.get("numerocapitul")))

    return itemlist
コード例 #20
0
ファイル: vixto.py プロジェクト: CYBERxNUKE/xbmc-addon
def episodios(item):
    logger.info()
    itemlist = list()

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data, '<strong>Temporada:(.*?)</div>')
    matches = scrapertools.find_multiple_matches(bloque, 'href="([^"]+)">(.*?)</a>')

    for scrapedurl, scrapedtitle in matches:
        title = "Temporada %s" % scrapedtitle

        new_item = item.clone(action="", title=title, text_color=color2)
        new_item.infoLabels["season"] = scrapedtitle
        new_item.infoLabels["mediatype"] = "season"
        data_season = httptools.downloadpage(scrapedurl).data
        data_season = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data_season)
        patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \
                 '(.*?)</a>'
        matches = scrapertools.find_multiple_matches(data_season, patron)

        elementos = []
        for url, status, title in matches:
            if not "Enlaces Disponibles" in status:
                continue
            elementos.append(title)
            item_epi = item.clone(action="findvideos", url=url, text_color=color1)
            item_epi.infoLabels["season"] = scrapedtitle
            episode = scrapertools.find_single_match(title, 'Capitulo (\d+)')
            titulo = scrapertools.find_single_match(title, 'Capitulo \d+\s*-\s*(.*?)$')
            item_epi.infoLabels["episode"] = episode
            item_epi.infoLabels["mediatype"] = "episode"
            item_epi.title = "%sx%s  %s" % (scrapedtitle, episode.zfill(2), titulo)

            itemlist.insert(0, item_epi)
        if elementos:
            itemlist.insert(0, new_item)

    if item.infoLabels["tmdb_id"] and itemlist:
        try:
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    if itemlist:
        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", text_color="green",
                                 filtro=True, action="add_serie_to_library", fulltitle=item.fulltitle,
                                 extra="episodios", url=item.url, infoLabels=item.infoLabels, show=item.show))
    else:
        itemlist.append(item.clone(title="Serie sin episodios disponibles", action="", text_color=color3))
    return itemlist
コード例 #21
0
ファイル: pelismagnet.py プロジェクト: CYBERxNUKE/xbmc-addon
def series(item):
    logger.info()
    itemlist = []

    response = httptools.downloadpage("https://kproxy.com/")
    url = "https://kproxy.com/doproxy.jsp"
    post = "page=%s&x=34&y=14" % urllib.quote(item.url)
    response = httptools.downloadpage(url, post, follow_redirects=False).data
    url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
    data = httptools.downloadpage(url).data

    lista = jsontools.load_json(data)
    if item.extra == "next":
        lista_ = lista[25:]
    else:
        lista_ = lista[:25]

    for i in lista_:

        punt = i.get("puntuacio", "")
        valoracion = ""
        if punt and not 0:
            valoracion = "  (Val: {punt})".format(punt=punt)

        title = "{nombre}{val}".format(nombre=i.get("nom", ""), val=valoracion)
        url = "{url}?id={id}".format(url=api_temp, id=i.get("id", ""))

        thumbnail = ""
        fanart = ""
        if i.get("posterurl", ""):
            thumbnail = "http://image.tmdb.org/t/p/w342{file}".format(file=i.get("posterurl", ""))
        if i.get("backurl", ""):
            fanart = "http://image.tmdb.org/t/p/w1280{file}".format(file=i.get("backurl", ""))

        plot = i.get("info", "")
        if plot is None:
            plot = ""

        infoLabels = {'plot': plot, 'year': i.get("year"), 'tmdb_id': i.get("id"), 'mediatype': 'tvshow'}

        itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, server="torrent",
                             thumbnail=thumbnail, fanart=fanart, infoLabels=infoLabels, contentTitle=i.get("nom"),
                             show=i.get("nom")))

    from core import tmdb
    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    if len(lista_) == 25 and item.extra == "next":
        url = re.sub(r'page=(\d+)', r'page=' + str(int(re.search('\d+', item.url).group()) + 1), item.url)
        itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", url=url))
    elif len(lista_) == 25:
        itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", url=item.url, extra="next"))

    return itemlist
コード例 #22
0
ファイル: hdfull.py プロジェクト: CYBERxNUKE/xbmc-addon
def login():
    logger.info()

    data = agrupa_datos( httptools.downloadpage(host).data )

    patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
    sid = scrapertools.find_single_match(data, patron)

    post = urllib.urlencode({'__csrf_magic':sid})+"&username="******"&password="******"&action=login"

    httptools.downloadpage(host, post=post)
コード例 #23
0
ファイル: ver-pelis.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info("pelisalacarta.ver-pelis findvideos")
    itemlist = []
    th = Thread(target=get_art(item))
    th.setDaemon(True)
    th.start()
    data = httptools.downloadpage(item.url).data
    data_post = scrapertools.find_single_match(data,"type: 'POST'.*?id: (.*?),slug: '(.*?)'")
    if data_post:
     post='id='+data_post[0]+'&slug='+data_post[1]
     data_info=httptools.downloadpage('http://ver-pelis.me/ajax/cargar_video.php', post=post).data
     enlaces = scrapertools.find_multiple_matches(data_info,"</i> (\w+ \w+).*?<a onclick=\"load_player\('([^']+)','([^']+)', ([^']+),.*?REPRODUCIR\">([^']+)</a>")
     for server,id_enlace,name,number, idioma_calidad in enlaces:
         
        if "SUBTITULOS" in idioma_calidad and not "P" in idioma_calidad:
           idioma_calidad =idioma_calidad.replace("SUBTITULOS","VO")
           idioma_calidad=idioma_calidad.replace("VO","[COLOR orangered] VO[/COLOR]") 
        elif "SUBTITULOS" in idioma_calidad and "P" in idioma_calidad:
             idioma_calidad= "[COLOR indianred] "+idioma_calidad+"[/COLOR]"
       
        elif "LATINO" in idioma_calidad:
               idioma_calidad=idioma_calidad.replace("LATINO","[COLOR red]LATINO[/COLOR]")
        elif "Español" in idioma_calidad:
               idioma_calidad=idioma_calidad.replace("Español","[COLOR crimson]ESPAÑOL[/COLOR]")
        if "HD" in idioma_calidad:
            idioma_calidad=idioma_calidad.replace("HD","[COLOR crimson] HD[/COLOR]")
        elif "720" in idioma_calidad:
            idioma_calidad=idioma_calidad.replace("720","[COLOR firebrick] 720[/COLOR]")
        elif "TS" in idioma_calidad:
            idioma_calidad=idioma_calidad.replace("TS","[COLOR brown] TS[/COLOR]")
            
        elif "CAM" in idioma_calidad:
            idioma_calidad=idioma_calidad.replace("CAM","[COLOR darkkakhi] CAM[/COLOR]")
        
        url = "http://ver-pelis.me/ajax/video.php?id="+id_enlace+"&slug="+name+"&quality="+number
        
        if not "Ultra" in server:
           server =  "[COLOR cyan][B]"+server+"[/B][/COLOR]"
           extra= "yes"
        else:
           server =  "[COLOR yellow][B]"+server+"[/B][/COLOR]" 
           extra=""
        title =server.strip()+"  "+idioma_calidad   
        itemlist.append( Item(channel=item.channel,action="play", title = title, url=url,fanart=item.fanart,thumbnail= item.thumbnail,fulltitle=item.title, extra=extra,folder=True) )
     if item.library  and config.get_library_support() and len(itemlist) > 0 :
        infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
                      'title': item.infoLabels['title']}
        itemlist.append(Item(channel=item.channel, title="Añadir esta película a la biblioteca",
            action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFf7f7f7",
            thumbnail='http://imgur.com/gPyN1Tf.png'))
    else:
          itemlist.append( Item(channel=item.channel,action="", title ="[COLOR red][B]Upps!..Archivo no encontrado...[/B][/COLOR]" ,thumbnail=item.thumbnail) )
    return itemlist
コード例 #24
0
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)
    data = httptools.downloadpage(page_url).data
    if "File Not Found" in data:
        return False, "[Youwatch] File cancellato"

    url_redirect = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
    data = httptools.downloadpage(url_redirect).data
    if "We're sorry, this video is no longer available" in data:
        return False, "[Youwatch] File cancellato"

    return True, ""
コード例 #25
0
ファイル: pymovie.py プロジェクト: Hernanarce/pelisalacarta
def findvideos(item):
	logger.info()
	itemlist =[]
	audio = {'Latino':'[COLOR limegreen]LATINO[/COLOR]','Español':'[COLOR yellow]ESPAÑOL[/COLOR]','Ingles':'[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', 'Latino-Ingles':'DUAL'}
	data = httptools.downloadpage(item.url).data
			
	if item.extra != 'series':
	   patron ='data-video="([^"]+)" class="reproductorVideo"><ul><li>([^<]+)<\/li><li>([^<]+)<\/li>'
	   tipotitle = item.contentTitle
	elif item.extra == 'series':
	   tipotitle = str(item.contentSeasonNumber)+'x'+str(item.contentEpisodeNumber)+' '+item.contentSerieName
	   patron = '<li class="enlaces-l"><a href="([^"]+)" target="_blank"><ul><li>([^<]+)<.*?>([^<]+)<.*?>Reproducir<'
	
	matches = re.compile(patron,re.DOTALL).findall(data)

	if item.extra != 'documental':
		n=0
		
		for scrapedurl, scrapedcalidad, scrapedaudio in matches:
		   if 'series' in item.extra:  
		      datab = httptools.downloadpage(host+scrapedurl).data
		      url = scrapertools.find_single_match(datab,'class="reproductor"><iframe src="([^"]+)"')
		      print url+'esta es la direccion'
		   else:
		      url = scrapedurl
		   
		   title = tipotitle
		   idioma = audio[scrapedaudio]
		   itemlist.extend(servertools.find_video_items(data=url))
		   if n < len(itemlist):
		      itemlist[n].title = tipotitle+ ' ('+idioma+' ) '+'('+itemlist[n].server+' )'
		   n = n+1
	else:
		url = scrapertools.find_single_match(data,'class="reproductor"><iframe src="([^"]+)"')
		itemlist.extend(servertools.find_video_items(data=url))




	for videoitem in itemlist:
	   if item.extra == 'documental':
	    videoitem.title = item.title+' ('+videoitem.server+')'
	   videoitem.channel=item.channel
	   videoitem.action="play"
	   videoitem.folder=False


	if config.get_library_support() and len(itemlist) > 0 and item.extra !='series':
          itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle))
	
	return itemlist
コード例 #26
0
ファイル: cinefox.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()
    itemlist = []

    if not "|" in item.extra and not __menu_info__:
        data = httptools.downloadpage(item.url, add_referer=True).data
        year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass
    
        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
        itemlist.extend(get_enlaces(item, url, "Online"))
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = httptools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id).data
            trailer_url = jsontools.load_json(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                       text_color="magenta", context=""))

            if config.get_library_support() and not "|" in item.extra:
                itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
                                     title="Añadir película a la biblioteca", url=item.url, thumbnail=item.thumbnail,
                                     fanart=item.fanart, fulltitle=item.fulltitle,
                                     extra="media|"))
    else:
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
        type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

    return itemlist
コード例 #27
0
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}
    data = httptools.downloadpage(page_url, headers=header, cookies=False).data
    if 'We’re Sorry!' in data:
        data = httptools.downloadpage(page_url.replace("/embed/", "/f/"), headers=header, cookies=False).data
        if 'We’re Sorry!' in data:
            return False, "[Openload] File eliminato" 

    return True, ""
コード例 #28
0
ファイル: youpeliculas.py プロジェクト: CYBERxNUKE/xbmc-addon
def lista (item):
    logger.info ()
    itemlist = []

    if item.extra['page_current']==0:
        data = httptools.downloadpage(host+'/you/load_data', item.extra['post']).data
    else:
        data = httptools.downloadpage(host+'/you/load_data', item.extra['post']+'&page_current='+str(item.extra['page_current'])).data
		
    matches = re.compile('<li(.*?)</li>',re.DOTALL).findall(data)
    for li in matches:
        # pelicula
        patron1 = 'data-id="([^"]+)" data-type="movie" data-imdb=[^<]+<div class="movie_image[^"]+"[^<]+<div class="[^"]+"[^<]+</div[^<]+<a href="([^"]+)" title="Ver (.*?) Película OnLine".*?<img src="([^"]+)" width=".*?<div class="movie_container">'
        match = re.compile(patron1,re.DOTALL).findall(li)
        for vidId, scrapedurl, scrapedtitle, scrapedthumbnail in match:
            itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, thumbnail=scrapedthumbnail, contentTitle = scrapedtitle, extra='vid='+vidId+'&ep='+vidId+'&type=movie' ))
        # Compilado de peliculas
        patron2 = 'data-type="movie" data-imdb=[^<]+<div class="movie_image[^"]+"[^<]+<div class="[^"]+"[^<]+</div[^<]+<a href="'+host+'/([^"]+)" title="Ver (.*?) Películas OnLine".*?<img src="([^"]+)".*?<div class="movie_container">'
        match = re.compile(patron2,re.DOTALL).findall(li)
        for scrapedurl, scrapedtitle, scrapedthumbnail in match:
            itemlist.append(Item(channel=item.channel, action='lista' , title=scrapedtitle+' (LISTADO)', thumbnail=scrapedthumbnail, contentTitle = scrapedtitle+' (LISTADO)', extra={'post':'type=seasons&category='+scrapedurl, 'page_current':0, 'max_pages':1} ))
        # series
        patron3 = 'data-id="([^"]+)" data-type="[^"]+" data-imdb=[^<]+<div class="movie_image[^"]+"[^<]+<div class="[^"]+"[^<]+</div[^<]+<a href="([^"]+)" title="Ver (.*?) Serie OnLine".*?<img src="([^"]+)".*?Temporada: <span>([^<]+)</span><br/>Episodio: <span>([^<]+)</span>'    
        match = re.compile(patron3,re.DOTALL).findall(li)
        for serieId, scrapedurl, scrapedtitle, scrapedthumbnail, tempo, cap in match:
            itemlist.append(Item(channel=item.channel, action='episodios', title=scrapedtitle+'[Temp. '+tempo+'] (Cap. '+cap+')', url=scrapedurl, contentTitle = scrapedtitle+'[Temp. '+tempo+'] (Cap. '+cap+')', thumbnail=scrapedthumbnail, extra=serieId, temporada=tempo, moreTemp=True ))
        # temporadas
        patron4 = 'data-id="([^"]+)" data-type="serie" data-imdb=[^<]+<div class="movie_image[^"]+"[^<]+<div class="[^"]+"[^<]+</div[^<]+<a href="([^"]+)" title="Ver (.*?) Serie OnLine".*?<img src="([^"]+)".*?<div class="lasted_te">Episodio: <span>([^<]+)</span>'    
        match = re.compile(patron4,re.DOTALL).findall(li)
        for serieId, scrapedurl, scrapedtitle, scrapedthumbnail, cap in match:
            itemlist.append(Item(channel=item.channel, action='episodios', title=scrapedtitle+' (Cap. '+cap+')', url=scrapedurl, contentTitle=scrapedtitle+' (Cap. '+cap+')', thumbnail=scrapedthumbnail, extra=serieId, temporada=scrapertools.find_single_match(scrapedtitle,'Temp. (.*?)]'), moreTemp=False ))	
        # otros casos
        patron5 = 'data-id="([^"]+)" data-type="movie" data-imdb=[^<]+<div class="movie_image[^"]+"[^<]+<div class="[^"]+"[^<]+</div[^<]+<a href="([^"]+)" title="Ver (.*?) Serie OnLine".*?<img src="([^"]+)".*?<div class="lasted_te">Episodio: <span>([^<]+)</span>'    
        match = re.compile(patron5,re.DOTALL).findall(li)
        for serieId, scrapedurl, scrapedtitle, scrapedthumbnail, cap in match:
            itemlist.append(Item(channel=item.channel, action='episodios', title=scrapedtitle+' (Cap. '+cap+')', url=scrapedurl, contentTitle=scrapedtitle+' (Cap. '+cap+')', thumbnail=scrapedthumbnail, extra=serieId, temporada='1', moreTemp=False ))	
        #logger.info ('peliculasalacarta.channel.verpeliculasnuevas data-total'+li)
		
    #Paginacion
    if itemlist !=[]:
        patron = '<buttom class=".*?load_more" data-total="([^"]+)" data-pages="([^"]+)"><i class="[^"]+"></i> Ver más</buttom>'
        matches = re.compile(patron, re.DOTALL).findall(data)
        if matches:
            itemlist.append(item.clone(title="Pagina Siguiente", action='lista', extra={'post':item.extra['post']+'&total='+matches[0][0], 'page_current':1, 'max_pages': int(matches[0][1]) }))
        else:
            if item.extra['page_current']+1<item.extra['max_pages']:
                itemlist.append(item.clone(title="Pagina Siguiente", action='lista', extra={'post':item.extra['post'], 'page_current':item.extra['page_current']+1, 'max_pages': item.extra['max_pages'] }))
			
    return itemlist
コード例 #29
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url='%s')" % page_url)

    data = httptools.downloadpage(page_url).data
    url_redirect = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
    data = httptools.downloadpage(url_redirect).data

    url = scrapertools.get_match(data, '{file:"([^"]+)"')
    video_url = "%s|Referer=%s" % (url, url_redirect)
    video_urls = [[scrapertools.get_filename_from_url(url)[-4:] + " [youwatch]", video_url]]

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
コード例 #30
0
ファイル: animeflv.py プロジェクト: CYBERxNUKE/xbmc-addon
def findvideos(item):
    logger.info()

    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
    list_videos.extend(scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"'))
    # logger.info("data=%s " % list_videos)

    aux_url = []
    for e in list_videos:
        if e.startswith("https://s3.animeflv.com/embed.php?"):
            server = scrapertools.find_single_match(e, 'server=(.*?)&')
            e = e.replace("embed", "check").replace("https", "http")
            data = httptools.downloadpage(e).data.replace("\\", "")
            if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data:
                import time
                time.sleep(3)
                data = httptools.downloadpage(e).data.replace("\\", "")

            video_urls = []
            if server == "gdrive":
                data = jsontools.load_json(data)
                for s in data.get("sources", []):
                    video_urls.append([s["label"], s["type"], s["file"]])

                if video_urls:
                    video_urls.sort(key=lambda v: int(v[0]))
                    itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
                                               video_urls=video_urls))
            else:
                url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
                if url:
                    itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))

        else:
            aux_url.append(e)

    from core import servertools
    itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
    for videoitem in itemlist:
        videoitem.fulltitle = item.fulltitle
        videoitem.channel = item.channel
        videoitem.thumbnail = item.thumbnail

    return itemlist
コード例 #31
0
def findvideos(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    selector_url = scrapertools.find_multiple_matches(
        data, 'class="metaframe rptss" src="([^"]+)"')

    for lang in selector_url:
        data = get_source('https:' + lang)
        urls = scrapertools.find_multiple_matches(data,
                                                  'data-playerid="([^"]+)">')
        subs = ''
        lang = scrapertools.find_single_match(lang, 'lang=(.*)?')
        language = IDIOMAS[lang]

        if item.contentType == 'episode':
            quality = 'SD'
        else:
            quality = item.quality

        for url in urls:
            final_url = httptools.downloadpage('https:' + url).data
            if language == 'VOSE':
                sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
                subs = 'https:%s' % sub
            if 'index' in url:
                try:
                    file_id = scrapertools.find_single_match(
                        url, 'file=(.*?)&')
                    post = {'link': file_id}
                    post = urllib.urlencode(post)
                    hidden_url = 'https://streamango.poseidonhd.co/repro/plugins/gkpluginsphp.php'
                    dict_vip_url = httptools.downloadpage(hidden_url,
                                                          post=post).json
                    url = dict_vip_url['link']
                except:
                    pass
            else:
                try:

                    if 'openload' in url:
                        file_id = scrapertools.find_single_match(
                            url, 'h=(\w+)')
                        post = {'h': file_id}
                        post = urllib.urlencode(post)
                        hidden_url = 'https://streamango.poseidonhd.co/repro/openload/api.php'
                        json_data = httptools.downloadpage(
                            hidden_url, post=post, follow_redirects=False).json
                        url = scrapertools.find_single_match(
                            data_url, "VALUES \('[^']+','([^']+)'")
                        if not url:
                            url = json_data['url']
                        if not url:
                            continue
                    else:
                        new_data = httptools.downloadpage('https:' + url).data
                        file_id = scrapertools.find_single_match(
                            new_data, 'value="([^"]+)"')
                        post = {'url': file_id}
                        post = urllib.urlencode(post)
                        hidden_url = 'https://streamango.poseidonhd.co/repro/r.php'
                        data_url = httptools.downloadpage(
                            hidden_url, post=post, follow_redirects=False)
                        url = data_url.headers['location']
                except:
                    pass
            url = url.replace(" ", "%20")
            itemlist.append(
                item.clone(title='[%s] [%s]',
                           url=url,
                           action='play',
                           subtitle=subs,
                           language=language,
                           quality=quality,
                           infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    # Requerido para Filtrar enlaces

    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist,
                                                __comprueba_enlaces_num__)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    itemlist = sorted(itemlist, key=lambda it: it.language)

    if item.contentType != 'episode':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
コード例 #32
0
def findvid_film(item):
    def load_links(itemlist, re_txt, color, desc_txt):
        streaming = scrapertools.find_single_match(data, re_txt)
        patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
        matches = re.compile(patron, re.DOTALL).findall(streaming)
        for scrapedurl, scrapedtitle in matches:
            logger.debug("##### findvideos %s ## %s ## %s ##" %
                         (desc_txt, scrapedurl, scrapedtitle))
            title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=title,
                     url=scrapedurl,
                     server=scrapedtitle,
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))

    logger.info("[thegroove360.cineblog01] findvid_film")

    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url, headers=headers).data
    data = scrapertools.decodeHtmlentities(data)

    # Extract the quality format
    patronvideos = '>([^<]+)</strong></div>'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)
    QualityStr = ""
    for match in matches:
        QualityStr = scrapertools.unescape(match.group(1))[6:]

    # STREAMANGO
    # matches = []
    # u = scrapertools.find_single_match(data, '(?://|\.)streamango\.com/(?:f/|embed/)?[0-9a-zA-Z]+')
    # if u: matches.append((u, 'Streamango'))

    # Estrae i contenuti - Streaming
    load_links(itemlist, '<strong>Streaming:</strong>(.*?)<table height="30">',
               "orange", "Streaming")

    # Estrae i contenuti - Streaming HD
    load_links(itemlist,
               '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">',
               "yellow", "Streaming HD")

    # Estrae i contenuti - Streaming 3D
    load_links(itemlist,
               '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">',
               "pink", "Streaming 3D")

    # Estrae i contenuti - Download
    load_links(itemlist, '<strong>Download:</strong>(.*?)<table height="30">',
               "aqua", "Download")

    # Estrae i contenuti - Download HD
    load_links(
        itemlist,
        '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">',
        "azure", "Download HD")

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(item=item)

    return itemlist
コード例 #33
0
def get_source(url):
    logger.info()
    data = httptools.downloadpage(url).data
    data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    return data
コード例 #34
0
def menu_info(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
        data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
    item.infoLabels["year"] = scrapertools.find_single_match(
        data, 'class="e_new">(\d{4})')
    item.infoLabels["plot"] = scrapertools.find_single_match(
        data, 'itemprop="description">([^<]+)</div>')
    item.infoLabels["genre"] = ", ".join(
        scrapertools.find_multiple_matches(
            data, '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if __modo_grafico__:
        tmdb.set_infoLabels_item(item, __modo_grafico__)

    action = "findvideos"
    title = "Ver enlaces"
    if item.contentType == "tvshow":
        action = "episodios"
        title = "Ver capítulos"
    itemlist.append(item.clone(action=action, title=title))

    carpeta = "CINE"
    tipo = "película"
    action = "add_pelicula_to_library"
    extra = ""
    if item.contentType == "tvshow":
        carpeta = "SERIES"
        tipo = "serie"
        action = "add_serie_to_library"
        extra = "episodios###library"

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support():
        title = "Añadir %s a la videoteca" % tipo
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, carpeta)
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        namedir = dirpath.replace(path, '')[1:]
                        for f in filename:
                            if f != namedir + ".nfo" and f != "tvshow.nfo":
                                continue
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(
                                filetools.join(dirpath, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "%s ya en tu videoteca. [%s] ¿Añadir?" % (
                                tipo.capitalize(), ",".join(canales))
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(
            item.clone(action=action,
                       title=title,
                       text_color=color5,
                       extra=extra))

    token_auth = config.get_setting("token_trakt", "tvmoviedb")
    if token_auth and item.infoLabels["tmdb_id"]:
        extra = "movie"
        if item.contentType != "movie":
            extra = "tv"
        itemlist.append(
            item.clone(channel="tvmoviedb",
                       title="[Trakt] Gestionar con tu cuenta",
                       action="menu_trakt",
                       extra=extra))
    itemlist.append(
        item.clone(channel="trailertools",
                   action="buscartrailer",
                   title="Buscar Tráiler",
                   text_color="magenta",
                   context=""))

    itemlist.append(item.clone(action="", title=""))
    ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
    if not ficha:
        ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')

    itemlist.extend(acciones_fichas(item, sid, ficha, season=True))
    itemlist.append(
        item.clone(action="acciones_cuenta",
                   title="Añadir a una lista",
                   text_color=color3,
                   ficha=ficha))

    return itemlist
コード例 #35
0
def fichas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    # data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)

    fichas_marca = {
        '1': 'Siguiendo',
        '2': 'Pendiente',
        '3': 'Favorita',
        '4': 'Vista',
        '5': 'Abandonada'
    }
    patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src-data="([^"]+)".*?' \
             '<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \
             '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedthumbnail, marca, serie, episodio, scrapedtitle in matches:
        tipo = "movie"
        scrapedurl = host + scrapedurl.rsplit("-dc=")[0]
        if not "-dc=" in scrapedurl:
            scrapedurl += "-dc="
        action = "findvideos"
        if __menu_info__:
            action = "menu_info"
        if serie:
            tipo = "tvshow"
        if episodio:
            title = "%s - %s" % (episodio.replace("X", "x"), scrapedtitle)
        else:
            title = scrapedtitle

        if marca:
            title += "  [COLOR %s][%s][/COLOR]" % (color4, fichas_marca[marca])

        new_item = Item(channel=item.channel,
                        action=action,
                        title=title,
                        url=scrapedurl,
                        thumbnail=scrapedthumbnail,
                        contentTitle=scrapedtitle,
                        contentType=tipo,
                        text_color=color2)
        if new_item.contentType == "tvshow":
            new_item.show = scrapedtitle
            if not __menu_info__:
                new_item.action = "episodios"

        itemlist.append(new_item)

    if itemlist and (item.extra == "listas_plus" or item.extra == "sigo"):
        follow = scrapertools.find_single_match(
            data, '<div onclick="seguir_lista.*?>(.*?)<')
        title = "Seguir Lista"
        if follow == "Siguiendo":
            title = "Dejar de seguir lista"
        item.extra = ""
        url = host + "/data.php?mode=seguir_lista&apikey=%s&sid=%s&lista=%s" % (
            apikey, sid, item.url.rsplit("/l", 1)[1])
        itemlist.insert(
            0,
            item.clone(action="acciones_cuenta",
                       title=title,
                       url=url,
                       text_color=color4,
                       lista=item.title,
                       folder=False))

    next_page = scrapertools.find_single_match(data,
                                               'href="([^"]+)" class="next"')
    if next_page:
        next_page = host + next_page.replace("&amp;", "&")
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 title=">> Página Siguiente",
                 url=next_page))

        try:
            total = int(
                scrapertools.find_single_match(
                    data, '<span class="page-dots">.*href.*?>(\d+)'))
        except:
            total = 0
        if not config.get_setting("last_page", item.channel) and config.is_xbmc() and total > 2 \
                and item.extra != "newest":
            itemlist.append(
                item.clone(action="select_page",
                           title="Ir a página... (Total:%s)" % total,
                           url=next_page,
                           text_color=color5))

    return itemlist
コード例 #36
0
ファイル: trakt_tools.py プロジェクト: gacj22/WizardGacj22
def token_trakt(item):
    from platformcode import platformtools

    headers = {
        'Content-Type': 'application/json',
        'trakt-api-key': client_id,
        'trakt-api-version': '2'
    }
    try:
        if item.extra == "renew":
            refresh = config.get_setting("refresh_token_trakt", "trakt")
            url = "http://api-v2launch.trakt.tv/oauth/device/token"
            post = {
                'refresh_token': refresh,
                'client_id': client_id,
                'client_secret': client_secret,
                'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
                'grant_type': 'refresh_token'
            }
            post = jsontools.dump(post)
            data = httptools.downloadpage(url, post=post, headers=headers).data
            data = jsontools.load(data)
        elif item.action == "token_trakt":
            url = "http://api-v2launch.trakt.tv/oauth/device/token"
            post = "code=%s&client_id=%s&client_secret=%s" % (
                item.device_code, client_id, client_secret)
            data = httptools.downloadpage(url, post=post, headers=headers).data
            data = jsontools.load(data)
        else:
            import time
            dialog_auth = platformtools.dialog_progress(
                config.get_localized_string(60251),
                config.get_localized_string(60252) % item.verify_url,
                config.get_localized_string(60253) % item.user_code,
                config.get_localized_string(60254))

            # Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código
            while True:
                time.sleep(item.intervalo)
                try:
                    if dialog_auth.iscanceled():
                        config.set_setting("trakt_sync", False)
                        return

                    url = "http://api-v2launch.trakt.tv/oauth/device/token"
                    post = {
                        'code': item.device_code,
                        'client_id': client_id,
                        'client_secret': client_secret
                    }
                    post = jsontools.dump(post)
                    data = httptools.downloadpage(url,
                                                  post=post,
                                                  headers=headers).data
                    data = jsontools.load(data)
                    if "access_token" in data:
                        # Código introducido, salimos del bucle
                        break
                except:
                    pass

            try:
                dialog_auth.close()
            except:
                pass

        token = data["access_token"]
        refresh = data["refresh_token"]

        config.set_setting("token_trakt", token, "trakt")
        config.set_setting("refresh_token_trakt", refresh, "trakt")
        if not item.folder:
            platformtools.dialog_notification(
                config.get_localized_string(60255),
                config.get_localized_string(60256))
            if config.is_xbmc():
                import xbmc
                xbmc.executebuiltin("Container.Refresh")
            return

    except:
        import traceback
        logger.error(traceback.format_exc())
        if not item.folder:
            return platformtools.dialog_notification(
                config.get_localized_string(60527),
                config.get_localized_string(60258))
        token = ""

    itemlist = []
    if token:
        itemlist.append(
            item.clone(config.get_localized_string(60256), action=""))
    else:
        itemlist.append(
            item.clone(config.get_localized_string(60260), action=""))

    return itemlist
コード例 #37
0
def acciones_cuenta(item):
    logger.info()
    itemlist = []

    if "Tus fichas" in item.title:
        itemlist.append(
            item.clone(title="Capítulos",
                       url="tf_block_c a",
                       contentType="tvshow"))
        itemlist.append(
            item.clone(title="Series", url="tf_block_s", contentType="tvshow"))
        itemlist.append(item.clone(title="Películas", url="tf_block_p"))
        itemlist.append(item.clone(title="Documentales", url="tf_block_d"))
        return itemlist
    elif "Añadir a una lista" in item.title:
        data = httptools.downloadpage(host + "/c_listas.php?apikey=%s&sid=%s" %
                                      (apikey, sid)).data
        data = xml2dict(data)
        itemlist.append(item.clone(title="Crear nueva lista", folder=False))
        if data["Data"]["TusListas"] != "\t":
            import random
            data = data["Data"]["TusListas"]["Item"]
            if type(data) is not list:
                data = [data]
            for child in data:
                image = ""
                title = "%s (%s fichas)" % (child["Title"],
                                            child["FichasInList"])
                images = []
                for i in range(1, 5):
                    if "sinimagen.png" not in child["Poster%s" % i]:
                        images.append(child["Poster%s" % i].replace(
                            "/100/", "/400/"))
                if images:
                    image = images[random.randint(0, len(images) - 1)]
                url = host + "/data.php?mode=add_listas&apikey=%s&sid=%s&ficha_id=%s" % (
                    apikey, sid, item.ficha)
                post = "lista_id[]=%s" % child["Id"]
                itemlist.append(
                    item.clone(title=title,
                               url=url,
                               post=post,
                               thumbnail=image,
                               folder=False))

        return itemlist
    elif "Crear nueva lista" in item.title:
        from platformcode import platformtools
        nombre = platformtools.dialog_input(
            "", "Introduce un nombre para la lista")
        if nombre:
            dict_priv = {0: 'Pública', 1: 'Privada'}
            priv = platformtools.dialog_select("Privacidad de la lista",
                                               ['Pública', 'Privada'])
            if priv != -1:
                url = host + "/data.php?mode=create_list&apikey=%s&sid=%s" % (
                    apikey, sid)
                post = "name=%s&private=%s" % (nombre, priv)
                data = httptools.downloadpage(url, post)
                platformtools.dialog_notification(
                    "Lista creada correctamente",
                    "Nombre: %s - %s" % (nombre, dict_priv[priv]))
                platformtools.itemlist_refresh()
        return
    elif re.search(r"(?i)Seguir Lista", item.title):
        from platformcode import platformtools
        data = httptools.downloadpage(item.url)
        platformtools.dialog_notification("Operación realizada con éxito",
                                          "Lista: %s" % item.lista)
        return
    elif item.post:
        from platformcode import platformtools
        data = httptools.downloadpage(item.url, item.post).data
        platformtools.dialog_notification("Ficha añadida a la lista",
                                          "Lista: %s" % item.title)
        platformtools.itemlist_refresh()
        return

    data = httptools.downloadpage("https://playmax.mx/tusfichas.php").data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    bloque = scrapertools.find_single_match(
        data,
        item.url + '">(.*?)(?:<div class="tf_blocks|<div class="tf_o_move">)')
    matches = scrapertools.find_multiple_matches(
        bloque, '<div class="tf_menu_mini">([^<]+)<(.*?)<cb></cb></div>')
    for category, contenido in matches:
        itemlist.append(
            item.clone(action="", title=category, text_color=color3))

        patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="([^"]+)".*?serie="([^"]*)".*?' \
                 '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
        entradas = scrapertools.find_multiple_matches(contenido, patron)
        for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:
            tipo = "movie"
            scrapedurl = host + scrapedurl
            scrapedthumbnail = host + scrapedthumbnail
            action = "findvideos"
            if __menu_info__:
                action = "menu_info"
            if serie:
                tipo = "tvshow"
            if episodio:
                title = "      %s - %s" % (episodio.replace("X",
                                                            "x"), scrapedtitle)
            else:
                title = "      " + scrapedtitle

            new_item = Item(channel=item.channel,
                            action=action,
                            title=title,
                            url=scrapedurl,
                            thumbnail=scrapedthumbnail,
                            contentTitle=scrapedtitle,
                            contentType=tipo,
                            text_color=color2)
            if new_item.contentType == "tvshow":
                new_item.show = scrapedtitle
                if not __menu_info__:
                    new_item.action = "episodios"

            itemlist.append(new_item)

    return itemlist
コード例 #38
0
ファイル: pelispedia.py プロジェクト: mrgaturus/addon
def play(item):
    logger.info("url=%s" % item.url)
    itemlist = []

    if item.url.startswith("https://pelispedia.video/v.php"):

        headers = {'Referer': item.referer}
        resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
        
        for h in resp.headers:
            ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
            if ck:
                gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
                token = generar_token(gsv, 'b0a8c83650f18ccc7c87b16e3c460474'+'yt'+'b0a8c83650f18ccc7c87b16e3c460474'+'2653')
                playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
                if playparms:
                    link = playparms[0]
                    subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
                else:
                    link = scrapertools.find_single_match(item.url, 'id=([^;]*)')
                    subtitle = ''
                # ~ logger.info("gsv: %s token: %s ck: %s link: %s" % (gsv, token, ck, link))

                post = "link=%s&token=%s" % (link, token)
                headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck}
                data = httptools.downloadpage("https://pelispedia.video/plugins/gkpedia.php", post=post, headers=headers, cookies=False).data
                
                mp4 = scrapertools.find_single_match(data, '"link":"([^"]*)')
                if mp4:
                    mp4 = mp4.replace('\/', '/')
                    if 'chomikuj.pl/' in mp4: mp4 += "|Referer=%s" % item.referer
                    itemlist.append(['.mp4', mp4, 0, subtitle])
                
                break


    elif item.url.startswith("https://load.pelispedia.vip/embed/"):
        
        headers = {'Referer': item.referer}
        resp = httptools.downloadpage(item.url, headers=headers, cookies=False)

        for h in resp.headers:
            ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
            if ck:
                gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
                token = generar_token(gsv, '4fe554b59d760c9986c903b07af8b7a4'+'yt'+'4fe554b59d760c9986c903b07af8b7a4'+'785446346')
                url = item.url.replace('/embed/', '/stream/') + '/' + token
                # ~ logger.info("gsv: %s token: %s ck: %s" % (gsv, token, ck))

                headers = {'Referer': item.url, 'Cookie': '__cfduid=' + ck}
                data = httptools.downloadpage(url, headers=headers, cookies=False).data
                
                url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
                srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
                if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'

                if url != '' and srv != '':
                    itemlist.append(item.clone(url=url, server=srv.lower()))

                elif '<title>Vidoza</title>' in data or '|fastplay|' in data:
                    if '|fastplay|' in data:
                        packed = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(.*?)</script>")
                        from lib import jsunpack
                        data = jsunpack.unpack(packed)
                        data = data.replace("\\'", "'")

                    matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"')
                    subtitle = ''
                    for fil, lbl in matches:
                        if fil.endswith('.srt') and not fil.endswith('empty.srt'):
                            subtitle = fil
                            if not subtitle.startswith('http'):
                                domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)')
                                subtitle = domi + subtitle
                            break

                    for fil, lbl in matches:
                        if not fil.endswith('.srt'):
                            itemlist.append([lbl, fil, 0, subtitle])

                break


    else:
        itemlist = servertools.find_video_items(data=item.url)
        for videoitem in itemlist:
            videoitem.title = item.title
            videoitem.channel = __channel__

    logger.info("retorna itemlist: %s" % itemlist)
    return itemlist
コード例 #39
0
ファイル: pepecine.py プロジェクト: koko200/pelisalacarta
def get_only_episodio(item):
    logger.info()
    itemlist = []
    plot={}
    
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",httptools.downloadpage(item.url).data)
    patron ='vars.title =(.*?)};'
    try:
        logger.debug(scrapertools.get_match(data,patron) +'}')
        data_dict= jsontools.load_json(scrapertools.get_match(data,patron) +'}')
    except:
        return itemlist # Devolvemos lista vacia

    try:
        from core.tmdb import Tmdb
        oTmdb= Tmdb(id_Tmdb= data_dict['tmdb_id'],tipo="tv")
    except:
        pass

    infoLabels = item.infoLabels
    if data_dict.has_key("actor"):
        cast=[]
        rol=[]
        for actor in data_dict["actor"]:
            cast.append(actor['name'])
            rol.append(actor['pivot']['char_name'])
        infoLabels['cast'] = cast
        infoLabels['castandrole'] = zip(cast, rol)

    if data_dict.has_key("writer"):
        writers_list=[]
        for writer in data_dict["writer"]:
            writers_list.append(writer['name'])
        infoLabels['writer'] = ", ".join(writers_list)

    if data_dict.has_key("director"):
        director_list=[]
        for director in data_dict["director"]:
            director_list.append(director['name'])
        infoLabels['director'] = ", ".join(director_list)


    infoLabels['season'], infoLabels['episode']= item.extra.split('x')
    try:
        # añadimos sinopsis e imagenes del capitulo
        datos_tmdb=oTmdb.get_episodio(temporada= infoLabels['season'],capitulo= infoLabels['episode'])
        if datos_tmdb["episodio_sinopsis"] !="": infoLabels['plot']= datos_tmdb["episodio_sinopsis"]
        if datos_tmdb["episodio_imagen"] !="": item.thumbnail= datos_tmdb["episodio_imagen"]
        #if datos_tmdb["episodio_titulo"] !="": title = title + " [COLOR 0xFFFFE6CC]" + datos_tmdb["episodio_titulo"].replace('\t','') + "[/COLOR]"
    except:
            pass
    
    def cap(l): 
        try:
            temporada_link = int(l["season"])
            capitulo_link = int(l['episode'])
        except:
            return False
        return True if temporada_link== int(infoLabels['season'])  and capitulo_link == int(infoLabels['episode']) else False    

    item.url= str(filter(cap, data_dict["link"])) #filtramos enlaces por capitulo

    item.infoLabels = infoLabels
    item.extra=str(data_dict['tmdb_id'])
    
    return findvideos(item)
コード例 #40
0
def episodios(item):  # Questa def. deve sempre essere nominata episodios
    logger.info('[filmsenzalimiticc.py] episodios')
    itemlist = []

    # Trova le Stagioni

    # Carica la pagina
    data = httptools.downloadpage(item.url, headers=headers).data

    # Estrae i contenuti
    patron = r'<iframe src="([^"]+)".*?>'
    url = scrapertools.find_single_match(data, patron)

    # Carica la pagina
    data = httptools.downloadpage(url).data.replace('\t', '').replace('\n', '')

    # Estrae i contenuti
    section_stagione = scrapertools.find_single_match(
        data, r'Stagioni<\/a>(.*?)<\/ul>')
    patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
    seasons = re.compile(patron, re.DOTALL).findall(section_stagione)

    for scrapedseason_url, scrapedseason in seasons:

        # Trova gli Episodi

        season_url = urlparse.urljoin(url, scrapedseason_url)

        # Carica la pagina
        data = httptools.downloadpage(season_url).data.replace('\t',
                                                               '').replace(
                                                                   '\n', '')

        # Estrae i contenuti
        section_episodio = scrapertools.find_single_match(
            data, r'Episodio<\/a>(.*?)<\/ul>')
        patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
        episodes = re.compile(patron, re.DOTALL).findall(section_episodio)

        for scrapedepisode_url, scrapedepisode in episodes:
            episode_url = urlparse.urljoin(url, scrapedepisode_url)

            title = scrapedseason + 'x' + scrapedepisode.zfill(2)

            itemlist.append(
                Item(channel=item.channel,
                     action='findvideos',
                     contentType='episode',
                     title=title,
                     url=episode_url,
                     fulltitle=title + ' - ' + item.show,
                     show=item.show,
                     thumbnail=item.thumbnail))

    # Link Aggiungi alla Libreria
    if config.get_videolibrary_support() and len(itemlist) != 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR lightblue][B]Aggiungi Serie alla videoteca[/B][/COLOR]',
                url=item.url,
                action='add_serie_to_library',
                extra='episodios' + '###' + item.extra,
                show=item.show))

    return itemlist
コード例 #41
0
ファイル: livesportws.py プロジェクト: gacj22/WizardGacj22
def mainlist(item):
    logger.info("deportesalacarta.livesportsws lista")
    itemlist = []
    import xbmc
    check=xbmc.getInfoLabel('ListItem.Title')
    
    if item.channel != __channel__:
        item.channel = __channel__
    else:
       if not xbmc.Player().isPlaying():
          xbmc.executebuiltin('xbmc.PlayMedia('+song+')')
    
    
    
    """
        Lo que ocurre con
        url = http://translate.googleusercontent.com/translate_c?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&usg=ALkJrhgzJfI1TDn3BxGgPbjgAHHS7J0i9g
        Redirecciones:
        1. http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/
        2. http://translate.googleusercontent.com/translate_p?nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&depth=2&usg=ALkJrhgAAAAAVupk4tLINTbmU7JrcQdl0G4V3LtnRM1n
        3. http://translate.googleusercontent.com/translate_c?depth=2&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&usg=ALkJrhhhRDwHSDRDN4t27cX5CYZLFFQtmA
        Lo que significa que necesitamos una key nueva cada vez en el argumento "usg" y para llegar a la url 3 debemos hacer la petición 1 y 2 con 'follow_redirects=False' o con la convinación de 'follow_redirects=False' y 'header_to_get="location"'
        """
    
    #### Opción 1: 'follow_redirects=False'
    ## Petición 1
    url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://livesport.ws/football"
    data = dhe(httptools.downloadpage(url, follow_redirects=False).data)#.decode('cp1251').encode('utf8')
    ## Petición 2
    url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
    data = dhe(httptools.downloadpage(url, follow_redirects=False).data)#.decode('cp1251').encode('utf8')
    ## Petición 3
    url = scrapertools.get_match(data, 'URL=([^"]+)"')
    data = dhe(httptools.downloadpage(url).data)#.decode('cp1251').encode('utf8')
    """
        #### Opción 2: 'follow_redirects=False' y 'header_to_get="location"'
        ## Petición 1
        url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/"
        data = dhe( scrapertools.downloadpage(url,follow_redirects=False) )#.decode('cp1251').encode('utf8')
        ## Petición 2
        url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
        url = scrapertools.get_header_from_response(url, header_to_get="location")
        ## Petición 3
        data = dhe( scrapertools.cachePage(url ) )#.decode('cp1251').encode('utf8')
        """
    
    
    
    patrondata = '</h1></div>(.*?)</h2>'
    matchesdata = re.compile(patrondata,re.DOTALL).findall(data)
    for bloque_data in matchesdata:
        
        for bloque_data in matchesdata:
            patrondaygame = '<span class=text>.*?<span class=text>(.*?)</span></a>(.*?)</span> --></li></ul></div>'
            matchesdaygame = re.compile(patrondaygame,re.DOTALL).findall(bloque_data)

            for day , bloque_games in matchesdaygame:
                day = re.sub(r"</span>|<i class=ico><span>de</span></i>|<span class=text>|de","",day)
                day = day.replace("actuales","Hoy")
                day = scrapertools.htmlclean(day)
                dia = scrapertools.get_match(day, '(\d+)')
                mes = re.sub(r"(?i)de |hoy |ayer |mañana |el |día ", "", day)
                mes_ = scrapertools.find_single_match(mes, '\d+\s*([A-z]+)')
                if not mes_:
                    mes_ = scrapertools.find_single_match(mes, '([A-z]+)\s*\d+,')
                mes = mes_.title()
                mes = month_convert(mes)
                mes = str(mes).zfill(2)
                
                if "hoy" in day or "Hoy" in day:
                    day = day.replace(day,"[COLOR yellow][B]"+day+"[/B][/COLOR]")
                elif "Ayer" in day or "ayer" in day:
                      day = day.replace(day,"[COLOR darkgoldenrod][B]"+day+"[/B][/COLOR]")
                else:
                     day = day.replace(day,"[COLOR greenyellow][B]"+day+"[/B][/COLOR]")
                itemlist.append( Item(channel=__channel__, title=day,action="mainlist",url="",fanart="http://www.easywallprints.com/upload/designs/background-with-soccer-balls-zoom-1.jpg",thumbnail="http://s6.postimg.org/3yl2y4adt/livesportagenda.png",folder=False) )
                
                patron = 'es&u=(.*?)&usg.*?id=event-(.*?)>(.*?)</i>.*?<span class=competition>.*?<span class=competition>(.*?)</span></a>.*?<i class="separator">.*?</span>(.*?)</span>.*?src=(.*?)>.*?src=(.*?)>.*?text-align: left">.*?</span>(.*?)</span>.*?<i class="live-broadcasting-status-(\d)"'#'<a class="link" href="([^"]+)" title="(.*?)".*?<span class="liga"><span>(.*?)</span></span>.*?<span class="date"><span>(.*?)</span></span>'
                matches = re.compile(patron,re.DOTALL).findall(bloque_games)
                for url_info,id_event, hora,competition,team1,thumbnail,fanart,team2 , status in matches:
                    team1 = re.sub(r"-"," ",team1)
                    team2=  re.sub(r"-"," ",team2)
                    competition = re.sub(r"\.","",competition)
                    
                    
                    if status == "4":
                        continue
                    
                    if "00:" in hora:
                        hora = hora.replace("00:","24:")
                    
                    if not "LIVE" in hora:
                       time= re.compile('(\d+):(\d+)',re.DOTALL).findall(hora)
                       for horas, minutos in time:
                           wrong_time =int(horas)
                           value = 1
                           correct_time = wrong_time - value
                           correct_time = str(correct_time)
                           hora = correct_time +":"+ minutos
                           
                           
                    
                
                    if "OFFLINE" in hora:
                        extra = hora
                        title = team1+"-"+team2+"____"
                        title = title.title()
                        fulltitle =title.replace(title,"[COLOR burlywood][B]"+title+"[/B][/COLOR]")
                        title= title.replace(title,"[COLOR burlywood]"+title+"[/COLOR]")
                        action = "mainlist"
                        folder = False
                        evento = ""
                        time = ""
                        fecha = ""
                    else:
                        if "hoy" in day or "Hoy" in day:
                            title = team1+" - "+team2
                            title = title.title()
                            fulltitle =title.replace(title,"[COLOR deepskyblue][B]"+title+"[/B][/COLOR]")
                            if "LIVE" in hora:
                               import time
                               
                               time = "live"
                               fecha = dia+"/"+str(mes)
                               fecha = fecha.strip()
                               evento = team1+" vs "+team2
                               extra= hora
                               hora = u'\u006C\u0456\u0475\u04BC!!'.encode('utf-8')
                               hora = hora.replace(hora,"[COLOR crimson][B]"+hora+"[/B][/COLOR]")
                               
                            else:
                                evento = team1+" vs "+team2
                                time = hora.strip()
                                fecha = dia+"/"+str(mes)
                                fecha = fecha.strip()
                                extra = hora
                                hora = hora.replace(hora,"[COLOR aquamarine][B]"+hora+"[/B][/COLOR]")
                          
                            title = hora+ "  " + title.replace(title,"[COLOR deepskyblue]"+title+"[/COLOR]")+ "[COLOR floralwhite]"+" "+"("+competition+")"+"[/COLOR]"
                            action = "enlaces"
                            folder = True
                        else:
                            title = team1+" - "+team2
                            evento = team1+" vs "+team2
                            time = hora
                            fecha = dia+"/"+mes
                            title = title.title()
                            fulltitle =title.replace(title,"[COLOR mediumaquamarine][B]"+title+"[/B][/COLOR]")
                            title = "[COLOR aquamarine][B]"+hora+"[/B][/COLOR]"+ "  " + title.replace(title,"[COLOR mediumaquamarine]"+title+"[/COLOR]")+ "[COLOR paleturquoise]"+" "+"("+competition+")"+"[/COLOR]"
                            action = "enlaces"
                            folder = True
                            extra = hora
                            
                    post_id = scrapertools.get_match(url_info,'http.*?livesport.ws\/(.*?)-')
                    url = "http://livesport.ws/engine/modules/sports/sport_refresh.php?from=event&event_id="+id_event+"&tab_id=0&post_id="+post_id
                    
                    itemlist.append( Item(channel=__channel__, title="     "+title,action=action,url=url,thumbnail =urlparse.urljoin(host,thumbnail),fanart =urlparse.urljoin(host,fanart),fulltitle = fulltitle,extra =extra,date=fecha, time=time, evento=evento, context="info_partido",deporte="futbol",folder=folder) )

    return itemlist
コード例 #42
0
def video(item):
    logger.info('[filmsenzalimiticc.py] video')
    itemlist = []

    # Carica la pagina
    data = httptools.downloadpage(item.url).data.replace('\n',
                                                         '').replace('\t', '')

    # Estrae i contenuti
    patron = r'<div class="mediaWrap mediaWrapAlt">.*?<a href="([^"]+)".*?src="([^"]+)".*?<p>([^"]+) (\(.*?)streaming<\/p>.*?<p>\s*(\S+).*?<\/p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
        scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedyear = scrapertools.decodeHtmlentities(scrapedyear)
        scrapedquality = scrapertools.decodeHtmlentities(scrapedquality)

        year = scrapedyear.replace('(', '').replace(')', '')
        infolabels = {}
        if year:
            infolabels['year'] = year

        title = scrapedtitle + ' ' + scrapedyear + ' [' + scrapedquality + ']'

        # Seleziona fra Serie TV e Film
        if item.contentType == 'movie':
            azione = 'findvideos'
            tipologia = 'movie'
        if item.contentType == 'tvshow':
            azione = 'episodios'
            tipologia = 'tv'

        itemlist.append(
            Item(channel=item.channel,
                 action=azione,
                 contentType=item.contentType,
                 title=title,
                 fulltitle=scrapedtitle,
                 text_color='azure',
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 infoLabels=infolabels,
                 show=scrapedtitle))

    # Next page
    next_page = scrapertools.find_single_match(
        data, '<a class="nextpostslink".*?href="([^"]+)">')

    if next_page != '':
        itemlist.append(
            Item(
                channel=item.channel,
                action='film',
                title='[COLOR lightgreen]' +
                config.get_localized_string(30992) + '[/COLOR]',
                url=next_page,
                contentType=item.contentType,
                thumbnail=
                'http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'
            ))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    return itemlist
コード例 #43
0
def lista(item):
    logger.info()
    itemlist = []

    # Descarga la pagina
    data = httptools.downloadpage(item.url).data

    action = "play"
    if config.get_setting("menu_info", "freecambay"):
        action = "menu_info"

    # Extrae las entradas
    patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
        if duration:
            scrapedtitle = "%s - %s" % (duration, scrapedtitle)
        if '>HD<' in quality:
            scrapedtitle += "  [COLOR red][HD][/COLOR]"

        itemlist.append(
            item.clone(action=action,
                       title=scrapedtitle,
                       url=scrapedurl,
                       thumbnail=scrapedthumbnail,
                       fanart=scrapedthumbnail))

    # Extrae la marca de siguiente página
    if item.extra:
        next_page = scrapertools.find_single_match(
            data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
        if next_page:
            if "from_videos=" in item.url:
                next_page = re.sub(r'&from_videos=(\d+)',
                                   '&from_videos=%s' % next_page, item.url)
            else:
                next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
                            "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
            itemlist.append(
                item.clone(action="lista",
                           title=">> Página Siguiente",
                           url=next_page))
    else:
        next_page = scrapertools.find_single_match(
            data, '<li class="next">.*?href="([^"]*)"')
        if next_page and not next_page.startswith("#"):
            next_page = urlparse.urljoin(host, next_page)
            itemlist.append(
                item.clone(action="lista",
                           title=">> Página Siguiente",
                           url=next_page))
        else:
            next_page = scrapertools.find_single_match(
                data, '<li class="next">.*?from:(\d+)')
            if next_page:
                if "from=" in item.url:
                    next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page,
                                       item.url)
                else:
                    next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
                        item.url, next_page)
                itemlist.append(
                    item.clone(action="lista",
                               title=">> Página Siguiente",
                               url=next_page))

    return itemlist
コード例 #44
0
ファイル: noodlemagazine.py プロジェクト: SistemaRayoXP/addon
def test_video_exists(page_url):
    logger.info()
    data = httptools.downloadpage(page_url).data
    if "not found" in data:
        return False, "[noodlemagazine] El video ha sido borrado o no existe"
    return True, ""
コード例 #45
0
def findvideos(item):
    logger.info()

    itemlist = []
    itemlist2 = []
    headers = {'Referer': item.url}

    server_url = {
        'gamovideo': 'http://gamovideo.com/embed-%s.html',
        'gounlimited': 'https://gounlimited.to/embed-%s.html',
        'streamplay': 'https://streamp1ay.me/player-%s.html',
        'powvideo': 'https://powvldeo.net/iframe-%s-1536x701.html',
        'vidcloud': 'https://vidcloud.co/player?fid=%s&page=embed',
        'vidlox': 'https://vidlox.me/embed-%s.html',
        'clipwatching': 'https://clipwatching.com/embed-%s.html',
        'jetload': 'https://jetload.net/e/%s',
        'mixdrop': 'https://mixdrop.co/e/%s'
    }

    data = get_source(item.url)
    s_id = scrapertools.find_single_match(
        data, r'id="loadVideos".*?secid="(\w\d+)"')

    if s_id:
        import requests
        url = host + 'json/loadVIDEOS'
        header = {
            'User-Agent':
            'Mozilla/5.0 (Android 10; Mobile; rv:70.0) Gecko/70.0 Firefox/70.0'
        }
        session = requests.Session()
        page = session.post(url, data={'id': s_id}, headers=header).json()

        if page.get('status', '') == 200:
            data2 = page['result']
            patron = r"C_One\(this, (\d+), '([^']+)'.*?"
            patron += r'src=".*?/img/(\w+)'
            matches = re.compile(patron, re.DOTALL).findall(data2)
            for language, url, server in matches:

                req = httptools.downloadpage(url,
                                             headers=headers,
                                             follow_redirects=False)
                location = req.headers.get('location', None)

                if location:
                    url = location
                else:
                    new_data = req.data.replace("'", '"')
                    url = scrapertools.find_single_match(
                        new_data, 'file": "([^"]+)"')
                if not url:
                    continue
                try:
                    server = server.split(".")[0]
                except:
                    server = ""

                if 'betaserver' in server:
                    server = 'directo'

                lang = IDIOMAS.get(language, 'VO')

                quality = 'Oficial'

                title = '%s [%s] [%s]' % (server.capitalize(), lang, quality)

                itemlist.append(
                    Item(channel=item.channel,
                         title=title,
                         url=url,
                         action='play',
                         language=lang,
                         quality=quality,
                         server=server,
                         headers=headers,
                         infoLabels=item.infoLabels,
                         p_lang=language))

    patron = '<li><a href="([^"]+)".*?<img.*?>([^<]+)<b>([^<]+)<.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, server, quality, language in matches:
        if '/sc_' in url:
            continue
        if url != '':

            try:
                server = server.split(".")[0].replace('1', 'l')
            except:
                continue

            _id = scrapertools.find_single_match(url, r'link/\w+_(.*)')

            url = server_url.get(server, url)

            if not url.startswith(host):
                url = url % _id

            language = scrapertools.find_single_match(language, r'/(\d+)\.png')
            lang = IDIOMAS.get(language, 'VO')

            title = '%s [%s] [%s]' % (server.capitalize(), lang, quality)

            itemlist2.append(
                Item(channel=item.channel,
                     title=title,
                     url=url,
                     action='play',
                     language=lang,
                     quality=quality,
                     server=server,
                     headers=headers,
                     infoLabels=item.infoLabels,
                     p_lang=language))

    itemlist2.sort(key=lambda i: (i.p_lang, i.server))

    itemlist.extend(itemlist2)

    if not itemlist:
        itemlist.append(
            Item(channel=item.channel,
                 folder=False,
                 text_color='tomato',
                 title='[I] Aún no hay enlaces disponibles [/I]'))
        return itemlist

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    return itemlist
コード例 #46
0
ファイル: trakt_tools.py プロジェクト: gacj22/WizardGacj22
def get_trakt_watched(id_type, mediatype, update=False):
    logger.info()

    id_list = []
    id_dict = dict()

    token_auth = config.get_setting("token_trakt", "trakt")

    if token_auth:
        sync_path = os.path.join(config.get_data_path(), 'settings_channels',
                                 'trakt')

        if os.path.exists(sync_path) and not update:
            trakt_node = jsontools.get_node_from_file('trakt', "TRAKT")
            if mediatype == 'shows':
                return trakt_node['shows']
            if mediatype == 'movies':
                return trakt_node['movies']

        else:
            token_auth = config.get_setting("token_trakt", "trakt")
            if token_auth:
                try:
                    token_auth = config.get_setting("token_trakt", "trakt")
                    headers = [['Content-Type', 'application/json'],
                               ['trakt-api-key', client_id],
                               ['trakt-api-version', '2']]
                    if token_auth:
                        headers.append(
                            ['Authorization',
                             "Bearer %s" % token_auth])
                        url = "https://api.trakt.tv/sync/watched/%s" % mediatype
                        #data = httptools.downloadpage(url, headers=headers, replace_headers=True).data
                        data = httptools.downloadpage(url,
                                                      headers=headers).data
                        watched_dict = jsontools.load(data)

                        if mediatype == 'shows':

                            dict_show = dict()
                            for item in watched_dict:
                                temp = []
                                id_ = str(item['show']['ids']['tmdb'])
                                season_dict = dict()
                                for season in item['seasons']:
                                    ep = []
                                    number = str(season['number'])
                                    # season_dict = dict()
                                    for episode in season['episodes']:
                                        ep.append(str(episode['number']))
                                    season_dict[number] = ep
                                    temp.append(season_dict)
                                dict_show[id_] = season_dict
                                id_dict = dict_show
                            return id_dict

                        elif mediatype == 'movies':
                            for item in watched_dict:
                                id_list.append(
                                    str(item['movie']['ids'][id_type]))
                except:
                    pass

    return id_list
コード例 #47
0
ファイル: gigasize.py プロジェクト: proyeus1972/addon
def test_video_exists(page_url):
    logger.info("(page_url='%s')" % page_url)
    data = httptools.downloadpage(page_url).data
    if '<h2 class="error">Download error</h2>' in data:
        return False, "El enlace no es válido<br/>o ha sido borrado de gigasize"
    return True, ""
コード例 #48
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)
    pfxfx = ""
    data = flashx_data
    data = data.replace("\n", "")
    cgi_counter = scrapertools.find_single_match(
        data,
        """(?is)src=.(https://www.flashx.../counter.cgi.*?[^(?:'|")]+)""")
    cgi_counter = cgi_counter.replace("%0A", "").replace("%22", "")
    playnow = scrapertools.find_single_match(data,
                                             'https://www.flashx.../dl[^"]+')
    # Para obtener el f y el fxfx
    js_fxfx = "https://www." + scrapertools.find_single_match(
        data.replace("//", "/"),
        """(?is)(flashx.../js\w+/c\w+.*?[^(?:'|")]+)""")
    if len(js_fxfx) > 15:
        data_fxfx = httptools.downloadpage(js_fxfx).data
        mfxfx = scrapertools.find_single_match(data_fxfx,
                                               'get.*?({.*?})').replace(
                                                   "'", "").replace(" ", "")
        matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
        for f, v in matches:
            pfxfx += f + "=" + v + "&"
    logger.info("mfxfxfx1= %s" % js_fxfx)
    logger.info("mfxfxfx2= %s" % pfxfx)
    if pfxfx == "":
        pfxfx = "f=fail&fxfx=6"
    coding_url = 'https://www.flashx.co/flashx.php?%s' % pfxfx

    # Obligatorio descargar estos 2 archivos, porque si no, muestra error
    httptools.downloadpage(coding_url, cookies=False)
    httptools.downloadpage(cgi_counter, cookies=False)

    ts = int(time.time())
    flash_ts = scrapertools.find_single_match(flashx_hash_f, '-(\d{10})-')
    wait_time = int(flash_ts) - ts
    platformtools.dialog_notification(
        'Cargando flashx', 'Espera de %s segundos requerida' % wait_time)

    try:
        time.sleep(wait_time)
    except:
        time.sleep(6)

    data = httptools.downloadpage(playnow, post=flashx_post).data
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(
            data, 'try to reload the page.*?href="([^"]+)"')
        try:
            data = httptools.downloadpage(url_reload).data
            data = httptools.downloadpage(playnow, post=flashx_post).data
        # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
        except:
            pass

    matches = scrapertools.find_multiple_matches(
        data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
    video_urls = []
    for match in matches:
        try:
            match = jsunpack.unpack(match)
            match = match.replace("\\'", "'")
            media_urls = scrapertools.find_multiple_matches(
                match, "{src:'([^']+)'.*?,label:'([^']+)'")
            subtitle = ""
            for media_url, label in media_urls:
                if media_url.endswith(".srt") and label == "Spanish":
                    try:
                        from core import filetools
                        data = httptools.downloadpage(media_url)
                        subtitle = os.path.join(config.get_data_path(),
                                                'sub_flashx.srt')
                        filetools.write(subtitle, data)
                    except:
                        import traceback
                        logger.info("Error al descargar el subtítulo: " +
                                    traceback.format_exc())

            for media_url, label in media_urls:
                if not media_url.endswith("png") and not media_url.endswith(
                        ".srt"):
                    video_urls.append([
                        "." + media_url.rsplit('.', 1)[1] + " [flashx]",
                        media_url, 0, subtitle
                    ])

            for video_url in video_urls:
                logger.info("%s - %s" % (video_url[0], video_url[1]))
        except:
            pass

    return video_urls
コード例 #49
0
def findvideos(item):
    servidor = {
        "http://uptobox.com/": "uptobox",
        "http://userscloud.com/": "userscloud",
        "https://my.pcloud.com/publink/show?code=": "pcloud",
        "http://thevideos.tv/": "thevideos",
        "http://ul.to/": "uploadedto",
        "http://turbobit.net/": "turbobit",
        "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
        "http://www.mediafire.com/download/": "mediafire",
        "https://www.youtube.com/watch?v=": "youtube",
        "http://thevideos.tv/embed-": "thevideos",
        "//www.youtube.com/embed/": "youtube",
        "http://ok.ru/video/": "okru",
        "http://ok.ru/videoembed/": "okru",
        "http://www.cinemaqualidade.com/protect/v.html?i=":
        "cinemaqualidade.com",
        "http://usersfiles.com/": "usersfiles",
        "https://depositfiles.com/files/": "depositfiles",
        "http://www.nowvideo.sx/video/": "nowvideo",
        "http://vidbull.com/": "vidbull",
        "http://filescdn.com/": "filescdn",
        "https://www.yourupload.com/watch/": "yourupload",
        "http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo",
        "https://openload.co/embed/": "openload",
        "https://streamango.com/embed/f/": "streamango",
        "https://www.rapidvideo.com/embed/": "rapidvideo",
    }

    logger.info()
    itemlist = []
    duplicados = []

    if 'cinemaqualidade' in item.url:
        lang = 'portugues'
    elif 'espana' in item.url:
        lang = 'castellano'
    elif 'cinecalidad' in item.url:
        lang = 'latino'

    data = httptools.downloadpage(item.url).data
    patron = 'target=_blank.*? service=.*? data="(.*?)"><li>(.*?)<\/li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    server_url = {
        'YourUpload': 'https://www.yourupload.com/embed/',
        'Openload': 'https://openload.co/embed/',
        'TVM': 'https://thevideo.me/embed-',
        'Streamango': 'https://streamango.com/embed/',
        'RapidVideo': 'https://www.rapidvideo.com/embed/',
        'Trailer': '',
        'BitTorrent': '',
        'Mega': '',
        'MediaFire': ''
    }
    dec_value = scrapertools.find_single_match(
        data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')

    torrent_link = scrapertools.find_single_match(
        data, '<a href="/protect/v\.php\?i=([^"]+)"')
    if torrent_link != '':
        import urllib
        base_url = '%s/protect/v.php' % host
        post = {'i': torrent_link, 'title': item.title}
        post = urllib.urlencode(post)
        headers = {'Referer': item.url}
        protect = httptools.downloadpage(base_url + '?' + post,
                                         headers=headers).data
        url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
        server = 'torrent'

        title = item.contentTitle + ' (%s)' % server
        quality = 'default'
        language = IDIOMAS[lang]

        new_item = Item(channel=item.channel,
                        action='play',
                        title=title,
                        fulltitle=item.contentTitle,
                        url=url,
                        language=language,
                        thumbnail=item.thumbnail,
                        quality=quality,
                        server=server)
        itemlist.append(new_item)

    for video_cod, server_id in matches:
        if server_id not in ['MediaFire', 'Trailer', '']:
            video_id = dec(video_cod, dec_value)

        if server_id in server_url:
            server = server_id.lower()
            thumbnail = item.thumbnail
            if server_id == 'TVM':
                server = 'thevideome'
                url = server_url[server_id] + video_id + '.html'
            else:
                url = server_url[server_id] + video_id
        title = item.contentTitle + ' (%s)' % server
        quality = 'default'

        if server_id not in ['Mega', 'MediaFire', 'Trailer']:

            language = [IDIOMAS[lang], 'vose']
            if url not in duplicados:
                new_item = Item(channel=item.channel,
                                action='play',
                                title=title,
                                fulltitle=item.contentTitle,
                                url=url,
                                language=language,
                                thumbnail=thumbnail,
                                quality=quality,
                                server=server)
                itemlist.append(new_item)
                duplicados.append(url)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    # itemlist.append(trailer_item)
    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
コード例 #50
0
def listas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = xml2dict(data)
    if item.extra == "listas":
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas más seguidas",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=1",
                 extra="listas_plus"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas con más fichas",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=2",
                 extra="listas_plus"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Listas aleatorias",
                 action="listas",
                 text_color=color1,
                 url=item.url + "&orden=3",
                 extra="listas_plus"))
        if data["Data"]["ListasSiguiendo"] != "\t":
            itemlist.append(
                Item(channel=item.channel,
                     title="Listas que sigo",
                     action="listas",
                     text_color=color1,
                     url=item.url,
                     extra="sigo"))
        if data["Data"]["TusListas"] != "\t":
            itemlist.append(
                Item(channel=item.channel,
                     title="Mis listas",
                     action="listas",
                     text_color=color1,
                     url=item.url,
                     extra="mislistas"))

        return itemlist

    elif item.extra == "sigo":
        data = data["Data"]["ListasSiguiendo"]["Item"]
    elif item.extra == "mislistas":
        data = data["Data"]["TusListas"]["Item"]
    else:
        data = data["Data"]["Listas"]["Item"]

    if type(data) is not list:
        data = [data]
    import random
    for child in data:
        image = ""
        title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
        images = []
        for i in range(1, 5):
            if "sinimagen.png" not in child["Poster%s" % i]:
                images.append(child["Poster%s" % i].replace("/100/", "/400/"))
        if images:
            image = images[random.randint(0, len(images) - 1)]
        url = host + "/l%s" % child["Id"]
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 url=url,
                 text_color=color3,
                 thumbnail=image,
                 title=title,
                 extra=item.extra))

    if len(itemlist) == 20:
        start = scrapertools.find_single_match(item.url, 'start=(\d+)')
        end = int(start) + 20
        url = re.sub(r'start=%s' % start, 'start=%s' % end, item.url)
        itemlist.append(item.clone(title=">> Página Siguiente", url=url))

    return itemlist
コード例 #51
0
def episodios(item):
    logger.info()
    itemlist = []

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

    if not item.infoLabels["tmdb_id"]:
        item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
            data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
        item.infoLabels["year"] = scrapertools.find_single_match(
            data, 'class="e_new">(\d{4})')
    if not item.infoLabels["genre"]:
        item.infoLabels["genre"] = ", ".join(
            scrapertools.find_multiple_matches(
                data, '<a itemprop="genre"[^>]+>([^<]+)</a>'))
    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(
            data, 'itemprop="description">([^<]+)</div>')

    dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'")
    patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \
             '.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo'
    matches = scrapertools.find_multiple_matches(data, patron)
    lista_epis = []
    for c_id, episodio, title, ficha, status in matches:
        episodio = episodio.replace("X", "x")
        if episodio in lista_epis:
            continue
        lista_epis.append(episodio)
        url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % (
            ficha, c_id, dc)
        title = "%s - %s" % (episodio, title)
        if "_mc a" in status:
            title = "[COLOR %s]%s[/COLOR] %s" % (
                color5, u"\u0474".encode('utf-8'), title)

        new_item = Item(channel=item.channel,
                        action="findvideos",
                        title=title,
                        url=url,
                        thumbnail=item.thumbnail,
                        fanart=item.fanart,
                        show=item.show,
                        infoLabels=item.infoLabels,
                        text_color=color2,
                        referer=item.url,
                        contentType="episode")
        try:
            new_item.infoLabels["season"], new_item.infoLabels[
                "episode"] = episodio.split('x', 1)
        except:
            pass
        itemlist.append(new_item)

    itemlist.sort(key=lambda it:
                  (it.infoLabels["season"], it.infoLabels["episode"]),
                  reverse=True)
    if __modo_grafico__:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    library_path = config.get_videolibrary_path()
    if config.get_videolibrary_support() and not item.extra:
        title = "Añadir serie a la videoteca"
        if item.infoLabels["imdb_id"] and not library_path.lower().startswith(
                "smb://"):
            try:
                from core import filetools
                path = filetools.join(library_path, "SERIES")
                files = filetools.walk(path)
                for dirpath, dirname, filename in files:
                    if item.infoLabels["imdb_id"] in dirpath:
                        for f in filename:
                            if f != "tvshow.nfo":
                                continue
                            from core import videolibrarytools
                            head_nfo, it = videolibrarytools.read_nfo(
                                filetools.join(dirpath, dirname, f))
                            canales = it.library_urls.keys()
                            canales.sort()
                            if "playmax" in canales:
                                canales.pop(canales.index("playmax"))
                                canales.insert(0, "[COLOR red]playmax[/COLOR]")
                            title = "Serie ya en tu videoteca. [%s] ¿Añadir?" % ",".join(
                                canales)
                            break
            except:
                import traceback
                logger.error(traceback.format_exc())
                pass

        itemlist.append(
            item.clone(action="add_serie_to_library",
                       title=title,
                       text_color=color5,
                       extra="episodios###library"))
    if itemlist and not __menu_info__:
        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        itemlist.extend(acciones_fichas(item, sid, ficha))

    return itemlist
コード例 #52
0
def findvideos(item):
    ## Kodi 17+
    ## Openload as default server

    import base64

    itemlist = []

    ## Urls
    urlServer = "https://openload.co/embed/%s/"
    urlApiGetKey = "https://serieslan.com/idv.php?i=%s"

    ## JS
    def txc(key, str):
        s = range(256)
        j = 0
        res = ''
        for i in range(256):
            j = (j + s[i] + ord(key[i % len(key)])) % 256
            x = s[i]
            s[i] = s[j]
            s[j] = x
        i = 0
        j = 0
        for y in range(len(str)):
            i = (i + 1) % 256
            j = (j + s[i]) % 256
            x = s[i]
            s[i] = s[j]
            s[j] = x
            res += chr(ord(str[y]) ^ s[(s[i] + s[j]) % 256])
        return res

    data = httptools.downloadpage(item.url).data
    pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">'
    idv, ide = scrapertools.find_single_match(data, pattern)
    thumbnail = scrapertools.find_single_match(
        data,
        '<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
    show = scrapertools.find_single_match(
        data, '<span>Episodio: <\/span>([^"]*)<\/p><p><span>Idioma')
    thumbnail = host + thumbnail
    data = httptools.downloadpage(urlApiGetKey % idv,
                                  headers={
                                      'Referer': item.url
                                  }).data
    video_url = urlServer % (txc(ide, base64.decodestring(data)))
    server = "openload"
    if " SUB" in item.title:
        lang = "VOS"
    elif " Sub" in item:
        lang = "VOS"
    else:
        lang = "Latino"
    title = "Enlace encontrado en " + server + " [" + lang + "]"
    itemlist.append(
        Item(channel=item.channel,
             action="play",
             title=title,
             show=show,
             url=video_url,
             plot=item.plot,
             thumbnail=thumbnail,
             server=server,
             folder=False))

    return itemlist
コード例 #53
0
ファイル: mega.py プロジェクト: xvacat/addon
def test_video_exists(page_url):
    types = "Archivo"
    gen = "o"
    msg = "El link tiene algún problema."
    id_video = None
    get = ""
    seqno = random.randint(0, 0xFFFFFFFF)
    url = page_url.split("#")[1]
    f_id = url.split("!")[1]
    id_video = None
    if "|" in url:
        url, id_video = url.split("|")
    post = {'a': 'g', 'g': 1, 'p': f_id}
    isfolder = False
    if "/#F!" in page_url:
        get = "&n=" + f_id
        post = {"a": "f", "c": 1, "r": 0}
        isfolder = True
        types = "Carpeta"
        gen = "a"
        if id_video:
            #Aqui ya para hacer un check se complica, no hay una manera directa aún teniendo la id del video dentro de la carpeta
            return True, ""

    codes = {
        -1: 'Se ha producido un error interno en Mega.nz',
        -2: 'Error en la petición realizada, Cod -2',
        -3:
        'Un atasco temporal o malfuncionamiento en el servidor de Mega impide que se procese su link',
        -4:
        'Ha excedido la cuota de transferencia permitida. Vuelva a intentarlo más tarde',
        -6: types + ' no encontrad' + gen + ', cuenta eliminada',
        -9: types + ' no encontrad' + gen,
        -11: 'Acceso restringido',
        -13: 'Está intentando acceder a un archivo incompleto',
        -14: 'Una operación de desencriptado ha fallado',
        -15: 'Sesión de usuario expirada o invalida, logueese de nuevo',
        -16: types + ' no disponible, la cuenta del uploader fue baneada',
        -17: 'La petición sobrepasa su cuota de transferiencia permitida',
        -18:
        types + ' temporalmente no disponible, intentelo de nuevo más tarde'
    }
    api = 'https://g.api.mega.co.nz/cs?id=%d%s' % (seqno, get)
    req_api = httptools.downloadpage(api, post=json.dumps([post])).data
    if isfolder:
        req_api = json.loads(req_api)
    else:
        try:
            req_api = json.loads(req_api)[0]
        except:
            req_api = json.loads(req_api)
    logger.error(req_api)
    if isinstance(req_api, (int, long)):
        if req_api in codes:
            msg = codes[req_api]
        return False, msg
    else:
        #Comprobación limite cuota restante
        from megaserver import Client
        c = Client(url=page_url, is_playing_fnc=platformtools.is_playing)
        global files
        files = c.get_files()
        if files == 509:
            msg1 = "[B][COLOR tomato]El video excede el limite de visionado diario que Mega impone a los usuarios Free."
            msg1 += " Prueba en otro servidor o canal.[/B][/COLOR]"
            return False, msg1
        elif isinstance(files, (int, long)):
            return False, "Error codigo %s" % str(files)

        return True, ""
コード例 #54
0
def findvideos(item):
    logger.info()
    itemlist = []

    if item.contentType == "movie":
        # Descarga la página
        data = httptools.downloadpage(item.url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        if not item.infoLabels["tmdb_id"]:
            item.infoLabels["tmdb_id"] = scrapertools.find_single_match(
                data, '<a href="https://www.themoviedb.org/'
                '[^/]+/(\d+)')
            item.infoLabels["year"] = scrapertools.find_single_match(
                data, 'class="e_new">(\d{4})')

        if __modo_grafico__:
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        if not item.infoLabels["plot"]:
            item.infoLabels["plot"] = scrapertools.find_single_match(
                data, 'itemprop="description">([^<]+)</div>')
        if not item.infoLabels["genre"]:
            item.infoLabels["genre"] = ", ".join(
                scrapertools.find_multiple_matches(
                    data, '<a itemprop="genre"[^>]+>'
                    '([^<]+)</a>'))

        ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
        if not ficha:
            ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
        cid = "0"
    else:
        ficha, cid = scrapertools.find_single_match(item.url,
                                                    'ficha=(\d+)&c_id=(\d+)')

    url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (
        apikey, sid, ficha, cid)
    data = httptools.downloadpage(url).data
    data = xml2dict(data)

    for k, v in data["Data"].items():
        try:
            if type(v) is dict:
                if k == "Online":
                    order = 1
                elif k == "Download":
                    order = 0
                else:
                    order = 2

                itemlist.append(
                    item.clone(action="",
                               title=k,
                               text_color=color3,
                               order=order))
                if type(v["Item"]) is str:
                    continue
                elif type(v["Item"]) is dict:
                    v["Item"] = [v["Item"]]
                for it in v["Item"]:
                    try:
                        thumbnail = "%s/styles/prosilver/imageset/%s.png" % (
                            host, it['Host'])
                        title = "   %s - %s/%s" % (it['Host'].capitalize(),
                                                   it['Quality'], it['Lang'])
                        calidad = int(
                            scrapertools.find_single_match(
                                it['Quality'], '(\d+)p'))
                        calidadaudio = it['QualityA'].replace("...", "")
                        subtitulos = it['Subtitles'].replace(
                            "Sin subtítulos", "")
                        if subtitulos:
                            title += " (%s)" % subtitulos
                        if calidadaudio:
                            title += "  [Audio:%s]" % calidadaudio

                        likes = 0
                        if it["Likes"] != "0" or it["Dislikes"] != "0":
                            likes = int(it["Likes"]) - int(it["Dislikes"])
                            title += "  (%s ok, %s ko)" % (it["Likes"],
                                                           it["Dislikes"])
                        if type(it["Url"]) is dict:
                            for i, enlace in enumerate(it["Url"]["Item"]):
                                titulo = title + "  (Parte %s)" % (i + 1)
                                itemlist.append(
                                    item.clone(title=titulo,
                                               url=enlace,
                                               action="play",
                                               calidad=calidad,
                                               thumbnail=thumbnail,
                                               order=order,
                                               like=likes,
                                               ficha=ficha,
                                               cid=cid,
                                               folder=False))
                        else:
                            url = it["Url"]
                            itemlist.append(
                                item.clone(title=title,
                                           url=url,
                                           action="play",
                                           calidad=calidad,
                                           thumbnail=thumbnail,
                                           order=order,
                                           like=likes,
                                           ficha=ficha,
                                           cid=cid,
                                           folder=False))
                    except:
                        pass
        except:
            pass

    if not config.get_setting("order_web", "playmax"):
        itemlist.sort(key=lambda it: (it.order, it.calidad, it.like),
                      reverse=True)
    else:
        itemlist.sort(key=lambda it: it.order, reverse=True)
    if itemlist:
        itemlist.extend(acciones_fichas(item, sid, ficha))

    if not itemlist and item.contentType != "movie":
        url = url.replace("apikey=%s&" % apikey, "")
        data = httptools.downloadpage(url).data
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)

        patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>'
        estrenos = scrapertools.find_multiple_matches(data, patron)
        for info in estrenos:
            info = "Estreno en " + scrapertools.htmlclean(info)
            itemlist.append(item.clone(action="", title=info))

    if not itemlist:
        itemlist.append(
            item.clone(action="", title="No hay enlaces disponibles"))

    return itemlist
コード例 #55
0
def episodios(item):
    logger.info()

    itemlist = []
    data = httptools.downloadpage(item.url).data
    # obtener el numero total de episodios
    total_episode = 0

    patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
    matches = scrapertools.find_multiple_matches(data, patron_caps)
    # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
    patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
    scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(
        data, patron_info)
    scrapedthumbnail = host + scrapedthumbnail

    for cap, link, name in matches:

        title = ""
        pat = "as/sd"
        # varios episodios en un enlace
        if len(name.split(pat)) > 1:
            i = 0
            for pos in name.split(pat):
                i = i + 1
                total_episode += 1
                season, episode = renumbertools.numbered_for_tratk(
                    item.channel, item.show, 1, total_episode)
                if len(name.split(pat)) == i:
                    title += "{0}x{1:02d} ".format(season, episode)
                else:
                    title += "{0}x{1:02d}_".format(season, episode)
        else:
            total_episode += 1
            season, episode = renumbertools.numbered_for_tratk(
                item.channel, item.show, 1, total_episode)

            title += "{0}x{1:02d} ".format(season, episode)

        url = host + "/" + link
        if "disponible" in link:
            title += "No Disponible aún"
        else:
            title += name
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     url=url,
                     show=show,
                     plot=scrapedplot,
                     thumbnail=scrapedthumbnail))

    if config.get_library_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la biblioteca de Kodi",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=show))

    return itemlist
コード例 #56
0
ファイル: pepecine.py プロジェクト: koko200/pelisalacarta
def get_temporadas(item):
    logger.info()

    itemlist = []
    infoLabels = {}


    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",httptools.downloadpage(item.url).data)
    patron ='vars.title =(.*?)};'
    try:
        data_dict= jsontools.load_json(scrapertools.get_match(data,patron) +'}')
    except:
        return itemlist # Devolvemos lista vacia
    
    if item.extra == "serie_add":
        item.extra=str(data_dict['tmdb_id'])
        item.url=str(data_dict["link"])
        infoLabels['titleraw'] = data_dict["title"]
        infoLabels['tvshowtitle'] = data_dict["title"]
        infoLabels['title_id'] = data_dict['id']
        item.infoLabels = infoLabels
        itemlist= get_episodios(item)
    else:
        infoLabels = item.infoLabels
        if data_dict.has_key("actor"):
            cast=[]
            rol=[]
            for actor in data_dict["actor"]:
                cast.append(actor['name'])
                rol.append(actor['pivot']['char_name'])
            infoLabels['cast'] = cast
            infoLabels['castandrole'] = zip(cast,rol)
            
        if data_dict.has_key("writer"):    
            writers_list=[]
            for writer in data_dict["writer"]:
                writers_list.append(writer['name'])
            infoLabels['writer'] = ", ".join(writers_list )
        
        if data_dict.has_key("director"):  
            director_list=[]
            for director in data_dict["director"]:
                director_list.append(director['name'])    
            infoLabels['director'] = ", ".join(director_list )
    
        if len(data_dict["season"]) == 1: 
            # Si solo hay una temporada ...
            item.extra=str(data_dict['tmdb_id'])
            item.url=str(data_dict["link"])
            item.infoLabels = infoLabels
            itemlist= get_episodios(item)
        else: #... o si hay mas de una temporada y queremos el listado por temporada...
            item.extra=str(data_dict['tmdb_id'])
            item.viewcontent = "seasons"
            data_dict["season"].sort(key=lambda x:(x['number'])) # ordenamos por numero de temporada
            for season in data_dict["season"]:
                url= filter(lambda l: l["season"]== season['number'],data_dict["link"]) #filtramos enlaces por temporada
                if url:
                    if season['overview']: infoLabels['plot']=season['overview']
                    if season['number']: infoLabels['season']=season['number']
                    if season["poster"]: item.thumbnail=re.compile("/w\d{3}/").sub("/w500/",season["poster"])
                    if season["release_date"]: infoLabels['premiered']= season['release_date']

                    item.infoLabels = infoLabels
                    title=item.title + ' ' + season["title"].lower().replace('season','temporada').capitalize()
                    
                    itemlist.append( Item( channel=item.channel, action="get_episodios", title=title, url=str(url),
                                           extra=item.extra, fanart=item.fanart, text_color="0xFFFFCE9C",
                                           thumbnail=item.thumbnail, viewmode="movie_with_plot",
                                           infoLabels=item.infoLabels) )
            
            if config.get_library_support() and itemlist:
                url= urlparse.urljoin(__url_base__,"episodio-online/" + str(data_dict['id']))
                itemlist.append( Item(channel=item.channel,
                                      title="Añadir esta serie a la biblioteca", url=url,
                                      action="add_serie_to_library", extra='episodios###serie_add',
                                      show= data_dict["title"], text_color="0xFFe5ffcc",
                                      thumbnail = 'https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png'))

    return itemlist      
コード例 #57
0
ファイル: pepecine.py プロジェクト: koko200/pelisalacarta
def listado(item):
    logger.info()
    itemlist = []

    try:
        data_dict = jsontools.load_json(httptools.downloadpage(item.url).data)
    except:
        return itemlist # Devolvemos lista vacia

    #Filtrado y busqueda
    if item.filtro:
        for i in data_dict["result"][:]:
            if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \
                (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()):
                    data_dict["result"].remove(i)


    if not item.page:
        item.page = 0

    offset= int(item.page) * 60
    limit= offset + 60
       
    for i in data_dict["result"][offset:limit]:
        infoLabels = InfoLabels()
        idioma = ''

        if item.extra == "movie":
            action= "get_movie"
            #viewcontent = 'movies'
            infoLabels["title"]= i["title"]
            title= '%s (%s)' % (i["title"], i['year'] )
            url= urlparse.urljoin(__url_base__,"ver-pelicula-online/" + str(i["id"]))

        elif item.extra=="series": 
            action="get_temporadas"
            #viewcontent = 'seasons'
            title= i["title"]
            infoLabels['tvshowtitle']= i["title"]
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))

        else: #item.extra=="series_novedades": 
            action="get_only_episodio"
            #viewcontent = 'episodes'
            infoLabels['season']=i['season']
            infoLabels['episode']=i['episode'].zfill(2)
            item.extra= "%sx%s" %(infoLabels["season"], infoLabels["episode"])
            infoLabels['tvshowtitle']= i["title"]
            flag= scrapertools.find_single_match(i["label"],'(\s*\<img src=.*\>)')
            idioma=i["label"].replace(flag,"")
            title = '%s %s (%s)' %(i["title"], item.extra, idioma)
            url= urlparse.urljoin(__url_base__,"episodio-online/" + str(i["id"]))
        
        if i.has_key("poster") and i["poster"]: 
            thumbnail=re.compile("/w\d{3}/").sub("/w500/",i["poster"])
        else:
            thumbnail= item.thumbnail
        if i.has_key("background") and i["background"]: 
            fanart= i["background"]
        else:
            fanart= item.fanart
        
        # Rellenamos el diccionario de infoLabels
        infoLabels['title_id']=i['id'] # title_id: identificador de la pelicula/serie en pepecine.com
        infoLabels['titleraw']= i["title"] # titleraw: titulo de la pelicula/serie sin formato
        if i['genre']: infoLabels['genre']=i['genre']
        if i['year']: infoLabels['year']=i['year']
        if i['tagline']: infoLabels['plotoutline']=i['tagline']
        if i['plot']: 
            infoLabels['plot']=i['plot']
        else:
            infoLabels['plot']=""
        if i['runtime']: infoLabels['duration']=int(i['runtime'])*60
        if i['imdb_rating']:
            infoLabels['rating']=i['imdb_rating']
        elif i['tmdb_rating']:
            infoLabels['rating']=i['tmdb_rating']
        if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id']
        if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id']



        newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra,
                         fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", #viewcontent=viewcontent,
                         language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels)
        newItem.year=i['year']
        newItem.contentTitle=i['title']
        if 'season' in infoLabels and infoLabels['season']:
            newItem.contentSeason = infoLabels['season']
        if 'episode' in infoLabels and infoLabels['episode']:
            newItem.contentEpisodeNumber = infoLabels['episode']
        itemlist.append(newItem)
    
    # Paginacion
    if len(data_dict["result"]) > limit:
        itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1) )
    
    return itemlist      
コード例 #58
0
def play(item):
    logger.info("[thegroove360.cineblog01] play")
    itemlist = []

    ### Handling new cb01 wrapper
    if host[9:] + "/film/" in item.url:
        iurl = httptools.downloadpage(item.url,
                                      only_headers=True,
                                      follow_redirects=False).headers.get(
                                          "location", "")
        logger.info("/film/ wrapper: %s" % iurl)
        if iurl:
            item.url = iurl

    if '/goto/' in item.url:
        item.url = item.url.split('/goto/')[-1].decode('base64')

    item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')

    logger.debug(
        "##############################################################")
    if "go.php" in item.url:
        data = httptools.downloadpage(item.url, headers=headers).data
        try:
            data = scrapertools.get_match(data,
                                          'window.location.href = "([^"]+)";')
        except IndexError:
            try:
                # data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
                # In alternativa, dato che a volte compare "Clicca qui per proseguire":
                data = scrapertools.get_match(
                    data,
                    r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
            except IndexError:
                data = httptools.downloadpage(
                    item.url, only_headers=True,
                    follow_redirects=False).headers.get("location", "")
        logger.debug("##### play go.php data ##\n%s\n##" % data)
    elif "/link/" in item.url:
        data = httptools.downloadpage(item.url, headers=headers).data
        from lib import jsunpack

        try:
            data = scrapertools.get_match(
                data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
            data = jsunpack.unpack(data)
            logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
        except IndexError:
            logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)

        data = scrapertools.find_single_match(
            data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')

        if data.startswith('/'):
            data = urlparse.urljoin("http://swzz.xyz", data)
            data = httptools.downloadpage(data, headers=headers).data
        logger.debug("##### play /link/ data ##\n%s\n##" % data)
    else:
        data = item.url
        logger.debug("##### play else data ##\n%s\n##" % data)
    logger.debug(
        "##############################################################")

    try:
        itemlist = servertools.find_video_items(data=data)

        for videoitem in itemlist:
            videoitem.title = item.show
            videoitem.fulltitle = item.fulltitle
            videoitem.show = item.show
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__
    except AttributeError:
        logger.error("vcrypt data doesn't contain expected URL")

    return itemlist
コード例 #59
0
def acciones_fichas(item, sid, ficha, season=False):
    marcarlist = []
    new_item = item.clone()
    new_item.infoLabels.pop("duration", None)
    estados = [{
        'following': 'seguir'
    }, {
        'favorite': 'favorita'
    }, {
        'view': 'vista'
    }, {
        'slope': 'pendiente'
    }]
    url = "https://playmax.mx/ficha.php?apikey=%s&sid=%s&f=%s" % (apikey, sid,
                                                                  ficha)
    data = httptools.downloadpage(url).data
    data = xml2dict(data)

    try:
        marked = data["Data"]["User"]["Marked"]
        if new_item.contentType == "episode":
            for epi in data["Data"]["Episodes"][
                    "Season_%s" % new_item.infoLabels["season"]]["Item"]:
                if int(epi["Episode"]) == new_item.infoLabels["episode"]:
                    epi_marked = epi["EpisodeViewed"].replace("yes", "ya")
                    epi_id = epi["Id"]
                    marcarlist.append(
                        new_item.clone(action="marcar",
                                       title="Capítulo %s visto. ¿Cambiar?" %
                                       epi_marked,
                                       text_color=color3,
                                       epi_id=epi_id))
                    break
    except:
        pass

    try:
        tipo = new_item.contentType.replace("movie", "Película").replace(
            "episode", "Serie").replace("tvshow", "Serie")
        for status in estados:
            for k, v in status.items():
                if k != marked:
                    title = "Marcar %s como %s" % (tipo.lower(), v)
                    action = "marcar"
                else:
                    title = "%s marcada como %s" % (tipo, v)
                    action = ""
                if k == "following" and tipo == "Película":
                    continue
                elif k == "following" and tipo == "Serie":
                    title = title.replace("seguir", "seguida")
                    if k != marked:
                        title = "Seguir serie"
                        action = "marcar"
                    marcarlist.insert(
                        1,
                        new_item.clone(action=action,
                                       title=title,
                                       text_color=color4,
                                       ficha=ficha,
                                       folder=False))
                    continue

                marcarlist.append(
                    new_item.clone(action="marcar",
                                   title=title,
                                   text_color=color3,
                                   ficha=ficha,
                                   folder=False))
    except:
        pass

    try:
        if season and item.contentType == "tvshow":
            seasonlist = []
            for k, v in data["Data"]["Episodes"].items():
                vistos = False
                season = k.rsplit("_", 1)[1]
                if type(v) is str:
                    continue
                elif type(v["Item"]) is not list:
                    v["Item"] = [v["Item"]]

                for epi in v["Item"]:
                    if epi["EpisodeViewed"] == "no":
                        vistos = True
                        seasonlist.append(
                            new_item.clone(
                                action="marcar",
                                title="Marcar temporada %s como vista" %
                                season,
                                text_color=color1,
                                season=int(season),
                                ficha=ficha,
                                folder=False))
                        break

                if not vistos:
                    seasonlist.append(
                        new_item.clone(
                            action="marcar",
                            title="Temporada %s ya vista. ¿Revertir?" % season,
                            text_color=color1,
                            season=int(season),
                            ficha=ficha,
                            folder=False))

            seasonlist.sort(key=lambda it: it.season, reverse=True)
            marcarlist.extend(seasonlist)
    except:
        pass
    return marcarlist
コード例 #60
0
def indices(item):
    logger.info()
    itemlist = []

    tipo = "2"
    if item.contentType == "tvshow":
        tipo = "1"
    if "Índices" in item.title:
        if item.contentType == "tvshow":
            itemlist.append(
                item.clone(title="Populares",
                           action="fichas",
                           url=host + "/catalogo.php?tipo[]=1&ad=2&"
                           "ordenar=pop&con_dis=on"))
        itemlist.append(
            item.clone(title="Más vistas",
                       action="fichas",
                       url=host + "/catalogo.php?tipo[]=%s&ad=2&"
                       "ordenar=siempre&con_dis=on" % tipo))
        itemlist.append(
            item.clone(title="Mejor valoradas",
                       action="fichas",
                       url=host + "/catalogo.php?tipo[]=%s&ad=2&"
                       "ordenar=valoracion&con_dis=on" % tipo))
        itemlist.append(item.clone(title="Géneros",
                                   url=host + "/catalogo.php"))
        itemlist.append(item.clone(title="Idiomas",
                                   url=host + "/catalogo.php"))
        if item.contentType == "movie":
            itemlist.append(
                item.clone(title="Por calidad", url=host + "/catalogo.php"))
        itemlist.append(item.clone(title="Por año"))
        itemlist.append(
            item.clone(title="Por país", url=host + "/catalogo.php"))

        return itemlist

    if "Géneros" in item.title:
        data = httptools.downloadpage(item.url).data
        patron = '<div class="sel gen" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(data, patron)
        for value, genero in matches:
            url = item.url + "?tipo[]=%s&generos[]=%s&ad=2&ordenar=novedades&con_dis=on" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=genero, url=url))
    elif "Idiomas" in item.title:
        data = httptools.downloadpage(item.url).data
        bloque = scrapertools.find_single_match(
            data, 'oname="Idioma">Cualquier(.*?)<input')
        patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for value, idioma in matches:
            url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_idioma=%s" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=idioma, url=url))
    elif "calidad" in item.title:
        data = httptools.downloadpage(item.url).data
        bloque = scrapertools.find_single_match(
            data, 'oname="Calidad">Cualquier(.*?)<input')
        patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for value, calidad in matches:
            url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_calidad=%s" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=calidad,
                                       url=url))
    elif "país" in item.title:
        data = httptools.downloadpage(item.url).data
        bloque = scrapertools.find_single_match(
            data, 'oname="País">Todos(.*?)<input')
        patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for value, pais in matches:
            url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&pais=%s" % (
                tipo, value)
            itemlist.append(item.clone(action="fichas", title=pais, url=url))
    else:
        from datetime import datetime
        year = datetime.now().year
        for i in range(year, 1899, -1):
            url = "%s/catalogo.php?tipo[]=%s&del=%s&al=%s&año=personal&ad=2&ordenar=novedades&con_dis=on" \
                  % (host, tipo, i, i)
            itemlist.append(item.clone(action="fichas", title=str(i), url=url))

    return itemlist