コード例 #1
0
ファイル: biblioteca.py プロジェクト: jurrKodi/pelisalacarta
def mark_content_as_watched(item):
    logger.info("pelisalacarta.channels.biblioteca mark_content_as_watched")
    # logger.debug("item:\n" + item.tostring('\n'))

    if filetools.exists(item.nfo):
        url_scraper = filetools.read(item.nfo, 0, 1)
        it = Item().fromjson(filetools.read(item.nfo, 1))

        if item.contentType == "movie":
            name_file = os.path.splitext(os.path.basename(item.nfo))[0]
        elif item.contentType == "episode":
            name_file = item.contentSeason + "x" + item.contentEpisodeNumber
        else:
            name_file = item.contentTitle

        if not hasattr(it, "library_playcounts"):
            it.library_playcounts = {}
        it.library_playcounts.update({name_file: item.playcount})

        # se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
        if item.contentType != "movie":
            it = check_season_playcount(it, item.contentSeason)

        # Guardamos los cambios en item.nfo
        if filetools.write(item.nfo, url_scraper + it.tojson()):
            item.infoLabels["playcount"] = item.playcount

            if item.contentType == "tvshow":
                # Actualizar toda la serie
                new_item = item.clone(contentSeason=-1)
                mark_season_as_watched(new_item)

            if config.is_xbmc():
                library.mark_content_as_watched_on_kodi(item, item.playcount)
                platformtools.itemlist_refresh()
コード例 #2
0
def mark_content_as_watched(item):
    logger.info()
    # logger.debug("item:\n" + item.tostring('\n'))

    if filetools.exists(item.nfo):
        head_nfo = filetools.read(item.nfo, 0, 1)
        it = Item().fromjson(filetools.read(item.nfo, 1))

        if item.contentType == 'movie':
            name_file = os.path.splitext(os.path.basename(item.nfo))[0]
        elif item.contentType == 'episode':
            name_file = "%sx%s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2))
        else:
            name_file = item.contentTitle

        if not hasattr(it, 'library_playcounts'):
            it.library_playcounts = {}
        it.library_playcounts.update({name_file: item.playcount})

        # se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
        if item.contentType != 'movie':
            it = check_season_playcount(it, item.contentSeason)

        # Guardamos los cambios en item.nfo
        if filetools.write(item.nfo, head_nfo + it.tojson()):
            item.infoLabels['playcount'] = item.playcount

            if item.contentType == 'tvshow':
                # Actualizar toda la serie
                new_item = item.clone(contentSeason=-1)
                mark_season_as_watched(new_item)

            if config.is_xbmc():
                library.mark_content_as_watched_on_kodi(item, item.playcount)
                platformtools.itemlist_refresh()
コード例 #3
0
ファイル: favoritos.py プロジェクト: neno1978/pelisalacarta
def mainlist(item):
    logger.info("pelisalacarta.core.favoritos mainlist")
    itemlist = []
    #bookmarkpath = config.get_setting("bookmarkpath") #TODO si solo se usa para esto podriamos eliminarlo

    for name,thumb,data in read_favourites():
        if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
            url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)'  % config.PLUGIN_NAME)\
                .replace("&quot", "")

            item = Item().fromurl(url)
            item.title = name
            item.thumbnail = thumb
            item.isFavourite = True

            item.context = [{"title": config.get_localized_string(30154), #"Quitar de favoritos"
                             "action": "delFavourite",
                             "channel": "favoritos",
                             "from_title": item.title},
                            {"title": "Renombrar",
                             "action": "renameFavourite",
                             "channel": "favoritos",
                             "from_title": item.title}
                            ]
            #logger.debug(item.tostring('\n'))
            itemlist.append(item)

    return itemlist
コード例 #4
0
def newest(categoria):
    itemlist = []
    item = Item()
    try:
        if categoria == 'peliculas':
            item.url = "http://www.zpeliculas.com"

        elif categoria == 'infantiles':
            item.url = "http://www.zpeliculas.com/peliculas/p-animacion/"

        else:
            return []

        itemlist = peliculas(item)
        if itemlist[-1].extra == "next_page":
            itemlist.pop()

    # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
コード例 #5
0
ファイル: animeflv.py プロジェクト: CYBERxNUKE/xbmc-addon
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile('<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<p>(.*?)</p>.+?'
                         '<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
    itemlist = []

    for thumbnail, _type, plot, url, title in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title, plot=plot)
        if _type != "Película":
            new_item.show = title
            new_item.context = renumbertools.context
        else:
            new_item.contentType = "movie"
            new_item.contentTitle = title

        itemlist.append(new_item)

    return itemlist
コード例 #6
0
ファイル: cinefox.py プロジェクト: kampanita/pelisalacarta
def newest(categoria):
    logger.info("pelisalacarta.channels.cinefox newest")
    itemlist = []
    item = Item()
    try:
        if categoria == "peliculas":
            item.url = "http://www.cinefox.cc/catalogue?type=peliculas"
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

        if categoria == "series":
            item.url = "http://www.cinefox.cc/ultimos-capitulos"
            itemlist = ultimos(item)

            if itemlist[-1].action == "ultimos":
                itemlist.pop()

    # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
コード例 #7
0
def create_tvshows_from_json(_actualizado):
    logger.info("pelisalacarta.platformcode.library_service create_tvshows_from_json")
    fname = filetools.join(config.get_data_path(), library.TVSHOW_FILE)

    if filetools.exists(fname):
        if not _actualizado:
            platformtools.dialog_ok("Biblioteca: Actualizando formato",
                                    "Espere por favor mientras se completa el proceso")

        try:
            data = jsontools.loads(filetools.read(fname))
            for tvshow in data:
                for channel in data[tvshow]["channels"]:

                    serie = Item(contentSerieName=data[tvshow]["channels"][channel]["tvshow"],
                                 url=data[tvshow]["channels"][channel]["url"], channel=channel, action="episodios",
                                 title=data[tvshow]["name"], active=True)
                    if not tvshow.startswith("t_"):
                        serie.infoLabels["tmdb_id"] = tvshow
                    library.save_library_tvshow(serie, list())

            filetools.rename(fname, "series.json.old")

        except EnvironmentError:
            logger.info("ERROR al leer el archivo: {0}".format(fname))
コード例 #8
0
def download_url(url,titulo,server):

    url = url.replace("\\","")

    print "Analizando enlace "+url

    # Averigua el servidor
    if server=="":
        itemlist = servertools.find_video_items(data=url)
        if len(itemlist)==0:
            print "No se puede identificar el enlace"
            return

        item = itemlist[0]
        print "Es un enlace en "+item.server
    else:
        item = Item()
        item.server = server

    # Obtiene las URL de descarga
    video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server,url)
    if len(video_urls)==0:
        print "No se ha encontrado nada para descargar"
        return

    # Descarga el de mejor calidad, como hace pelisalacarta
    print "Descargando..."
    print video_urls
    devuelve = downloadtools.downloadbest(video_urls,titulo,continuar=True)
コード例 #9
0
def mainlist(item):
    logger.info()
    itemlist = []

    for name, thumb, data in read_favourites():
        if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data:
            url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME)\
                .replace("&quot", "")

            item = Item().fromurl(url)
            item.title = name
            item.thumbnail = thumb
            item.isFavourite = True

            item.context = [{"title": config.get_localized_string(30154),  # "Quitar de favoritos"
                             "action": "delFavourite",
                             "channel": "favoritos",
                             "from_title": item.title},
                            {"title": "Rinomina",
                             "action": "renameFavourite",
                             "channel": "favoritos",
                             "from_title": item.title}
                            ]
            # logger.debug(item.tostring('\n'))
            itemlist.append(item)

    return itemlist
コード例 #10
0
ファイル: html.py プロジェクト: ChopChopKodi/pelisalacarta
 def extract_item(self,path):
     if path:
       item = Item()
       item.fromurl(path)
     else:
       item = Item(channel="channelselector", action="mainlist")
     return item
コード例 #11
0
ファイル: animeflv_ru.py プロジェクト: CYBERxNUKE/xbmc-addon
def novedades_anime(item):
    logger.info()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
    data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')

    matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
    itemlist = []

    for thumbnail, url, title in matches:

        url = urlparse.urljoin(HOST, url)
        thumbnail = urlparse.urljoin(HOST, thumbnail)
        title = clean_title(title)

        new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
                        fulltitle=title)

        new_item.show = title
        new_item.context = renumbertools.context

        itemlist.append(new_item)

    return itemlist
コード例 #12
0
ファイル: cinetux.py プロジェクト: ChopChopKodi/pelisalacarta
def newest(categoria):
    logger.info("pelisalacarta.channels.cinetux newest")
    itemlist = []
    item = Item()
    try:
        if categoria == "peliculas":
            item.url = CHANNEL_HOST
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

        if categoria == "documentales":
            item.url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/")
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

    # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
    except:
        import sys

        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
コード例 #13
0
ファイル: divxatope.py プロジェクト: kampanita/pelisalacarta
def newest(categoria):
    itemlist = []
    item = Item()
    try:
        if categoria == 'peliculas':
            item.url = "http://www.divxatope.com/categoria/peliculas"

        elif categoria == 'series':
            item.url = "http://www.divxatope.com/categoria/series"

        else:
            return []

        itemlist = lista(item)
        if itemlist[-1].title == ">> Página siguiente":
            itemlist.pop()


        # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo
        dict_aux = {}
        for i in itemlist:
            if not i.url in dict_aux:
                dict_aux[i.url] = i
            else:
                itemlist.remove(i)

    # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    #return dict_aux.values()
    return itemlist
コード例 #14
0
def play_from_library(item):
    logger.info("[launcher.py] play_from_library")
    elegido = Item(url="")
    channelmodule = ImportarCanal(item.channel)
    if hasattr(channelmodule, 'findvideos'):
        itemlist = channelmodule.findvideos(item)
    else:
        itemlist= findvideos(item)
    
    if len(itemlist)>0:
        #Mostrar cuadro de seleccion de servers
        opciones = []
        for item in itemlist:
            opciones.append(item.title)
    
        import xbmcgui
        dia = xbmcgui.Dialog()
        seleccion = dia.select(config.get_localized_string(30163), opciones)
        elegido = itemlist[seleccion]

        if seleccion==-1:
            return
    else:
        elegido = item
    
    if hasattr(channelmodule, 'play'):
      logger.info("[launcher.py] executing channel 'play' method")
      logger.info(channelmodule.__file__)
      itemlist = channelmodule.play(elegido)
      elegido= itemlist[0]
    else:
      logger.info("[launcher.py] no channel 'play' method, executing core method")
    
    elegido.channel= 'library'
    play(elegido)
コード例 #15
0
ファイル: oranline.py プロジェクト: jurrKodi/pelisalacarta
def newest(categoria):
    logger.info("pelisalacarta.channels.oranline newest")
    itemlist = []
    item = Item()
    try:
        if categoria == 'peliculas':
            item.url = urlparse.urljoin(host, "ultimas-peliculas-online/")
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

        if categoria == 'documentales':
            item.url = urlparse.urljoin(host, "category/documental/")
            itemlist = peliculas(item)

            if itemlist[-1].action == "peliculas":
                itemlist.pop()

    # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
コード例 #16
0
ファイル: biblioteca.py プロジェクト: jurrKodi/pelisalacarta
def mark_tvshow_as_updatable(item):
    logger.info("pelisalacarta.channels.biblioteca mark_tvshow_as_updatable")
    url_scraper = filetools.read(item.nfo, 0, 1)
    it = Item().fromjson(filetools.read(item.nfo, 1))
    it.active = item.active
    filetools.write(item.nfo, url_scraper + it.tojson())

    platformtools.itemlist_refresh()
コード例 #17
0
ファイル: __init__.py プロジェクト: hmemar/xbmc-tvalacarta
def playvideo(sender, itemtext):
    Log("[__init__.py] playvideo")
    item = Item()
    item.deserialize(itemtext)

    dir = MediaContainer(viewGroup="InfoList")

    if item.action == "play":
        try:
            Log("[__init__.py] playvideo ejecutando metodo play del canal #" + item.channel + "#")
            exec "from tvalacarta.channels import " + item.channel
            exec "itemlist = " + item.channel + "." + item.action + "(item)"
            item = itemlist[0]
            item.title = encodingsafe(item.title)
            item.plot = encodingsafe(item.plot)
        except:
            Log("[__init__.py] playvideo error al ejecutar metodo play del canal")
            import sys

            for line in sys.exc_info():
                Log("%s" % line)

    from core import config

    Log("[__init__.py] playvideo url=" + item.url + ", server=" + item.server)

    video_urls = []
    video_password = ""
    url = item.url
    server = item.server.lower()
    try:
        # Extrae todos los enlaces posibles
        exec "from servers import " + server + " as server_connector"
        video_urls = server_connector.get_video_url(page_url=url, video_password=video_password)
    except:
        import sys

        for line in sys.exc_info():
            Log("%s" % line)

    for video_url in video_urls:
        wait_time = 0
        if video_url[1].startswith("http"):
            dir.Append(
                Function(
                    VideoItem(playvideonormal, title="Ver " + video_url[0], subtitle="", summary="", thumb=""),
                    mediaurl=video_url[1],
                )
            )
        else:
            dir.Append(
                Function(
                    RTMPVideoItem(playvideonormal, title="Ver " + video_url[0], subtitle="", summary="", thumb=""),
                    mediaurl=video_url[1],
                )
            )

    return dir
コード例 #18
0
def ExtraerItem():
  itemserializado = sys.argv[2].replace("?","")

  item = Item()
  if itemserializado:
    item.deserialize(itemserializado)
  else:
    item = Item(channel="channelselector", action="mainlist")
  return item
コード例 #19
0
ファイル: views.py プロジェクト: hx002/sanguo-server
def merge(request):
    req = request._proto
    item = Item(request._char_id)
    new_id = item.gem_merge(req.id)

    response = MergeGemResponse()
    response.ret = 0
    response.new_id = new_id
    return pack_msg(response)
コード例 #20
0
ファイル: resource.py プロジェクト: hx002/sanguo-server
def _check_equipment(char_id, ids, func_name=""):
    from core.item import Item
    item = Item(char_id)
    for _id in ids:
        if not item.has_equip(_id):
            raise SanguoException(EQUIPMENT_NOT_EXIST, char_id, func_name, 'Equipment {0} Not Exist'.format(_id))

    yield

    item.equip_remove(ids)
コード例 #21
0
def ExtraerItem():
    logger.info("[launcher.py] - ExtraerItem")
    item = Item()
    itemserializado = sys.argv[2].replace("?","")
    if itemserializado:
      item.deserialize(itemserializado)
      #logger.info("[launcher.py] - ExtraerItem: " + item.tostring() )
    else:
      item = Item(channel="channelselector", action="mainlist")
    return item
コード例 #22
0
def ProcessRequest(Socket):
  sys.argv[threading.current_thread().name]={"Socket":str(Socket), "Thread": threading.current_thread()}
  sys.argv[str(Socket)]["Host"] = "http://"+ myip + ":" + str(PORT)
  MostrarInfo()
  ReloadModules()
          
  try:
    itemserializado = sys.argv[sys.argv[threading.current_thread().name]["Socket"]]["Request"]
    if itemserializado:
      item = Item()
      item.deserialize(itemserializado)
    else:
      item = Item(channel="channelselector", action="mainlist")
    logger.info("-----------------------------------------------------------------------")
    logger.info("Item Recibido: " + item.tostring())
    logger.info("-----------------------------------------------------------------------")
    
    if (item.channel=="channelselector" and item.action=="mainlist") or (item.channel=="novedades" and item.action=="mainlist") or (item.channel=="buscador" and item.action=="mainlist") or (item.channel=="channelselector" and item.action=="channeltypes"):
      WindowMode = 0
    elif item.channel=="channelselector" and item.action=="listchannels":
      WindowMode = 1
    else:
      WindowMode = 2
      
    itemlist = navigation.NextItem(item)
    if type(itemlist)==list: 
      if not (item.channel=="channelselector" and item.action=="mainlist") and not itemlist[0].action=="go_back":
        itemlist.insert(0,Item(title="Atrás", action="go_back",thumbnail="%sthumb_atras.png"))


      for x in range(len(itemlist)):
        nitem, title, thumbnail = navigation.ItemInfo(item, itemlist[x], WindowMode)
        guitools.AddItem(nitem, title, thumbnail, WindowMode)        
      guitools.CloseDirectory(item)
      
    del sys.argv[threading.current_thread().name]
    MostrarInfo()
    
  except Exception as e:
    import traceback
    from platformcode import cliente
    logger.error(traceback.format_exc())
    from core import scrapertools
    patron = 'File "'+os.path.join(config.get_runtime_path(),"pelisalacarta","channels","").replace("\\","\\\\")+'([^.]+)\.py"'
    canal = scrapertools.find_single_match(traceback.format_exc(),patron)
    if canal:
      cliente.Dialogo().MostrarOK(
        "Se ha producido un error en el canal " + canal,
        "Esto puede ser devido a varias razones: \n - El servidor no está disponible, o no esta respondiendo.\n - Cambios en el diseño de la web.\n - Etc...\nComprueba el log para ver mas detalles del error.")
    else:
      cliente.Dialogo().MostrarOK(
        "Se ha producido un error en pelisalacarta",
        "Comprueba el log para ver mas detalles del error." )
    del sys.argv[threading.current_thread().name]
    MostrarInfo()
コード例 #23
0
ファイル: pepecine.py プロジェクト: CYBERxNUKE/xbmc-addon
def newest(categoria):
    itemlist = []
    item = Item()
    try:
        if categoria == 'peliculas':
            item.url = urlparse.urljoin(__url_base__,"plugins/ultimas-peliculas-updated.php")
            item.extra = "movie"

        elif categoria == 'infantiles':
            item.url = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php")
            item.filtro=("genero","animación")
            item.extra = "movie"

        elif categoria == 'series':
            item.url = urlparse.urljoin(__url_base__,"plugins/ultimos-capitulos-updated.php")
            item.extra="series_novedades"

        else:
            return []

        item.action = "listado"
        itemlist = listado(item)
        if itemlist[-1].action == "listado":
            itemlist.pop()

    # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("{0}".format(line))
        return []

    return itemlist
コード例 #24
0
ファイル: resource.py プロジェクト: hx002/sanguo-server
def _check_stuffs(char_id, stuffs, func_name=""):
    from core.item import Item
    item = Item(char_id)
    for _id, _amount in stuffs:
        if not item.has_stuff(_id, _amount):
            raise SanguoException(STUFF_NOT_ENOUGH, char_id, func_name, 'Stuff {0} Not Enough/Exist'.format(_id))

    yield

    for _id, _amount in stuffs:
        item.stuff_remove(_id, _amount)
コード例 #25
0
ファイル: views.py プロジェクト: hx002/sanguo-server
def stuff_use(request):
    req = request._proto
    item = Item(request._char_id)

    res = item.stuff_use(req.id, req.amount)
    response = StuffUseResponse()
    response.ret = 0
    if res:
        response.attachment.MergeFrom(res)

    return pack_msg(response)
コード例 #26
0
ファイル: tests.py プロジェクト: hx002/sanguo-server
    def setUp(self):
        char_initialize(1, 1, 1, 'a')
        self.char_id = 1
        self.session = crypto.encrypt('1:1:{0}'.format(self.char_id))

        item = Item(self.char_id)
        eid = item.equip_add(1)
        self.equip_id = eid

        gems = [(1, 10), (2, 1)]
        item.gem_add(gems)
コード例 #27
0
ファイル: resource.py プロジェクト: hx002/sanguo-server
def _check_gems(char_id, gems, func_name=""):
    from core.item import Item
    item = Item(char_id)
    for _id, _amount in gems:
        if not item.has_gem(_id, _amount):
            raise SanguoException(GEM_NOT_ENOUGH, char_id, func_name, 'Gem {0} Not Enough/Exist'.format(_id))

    yield

    for _id, _amount in gems:
        item.gem_remove(_id, _amount)
コード例 #28
0
def create_tvshows_from_xml():
    logger.info("fusionse.platformcode.library_service create_tvshows_from_xml")

    fname = filetools.join(config.get_data_path(), library.TVSHOW_FILE_OLD)
    if filetools.exists(fname):
        platformtools.dialog_ok("Libreria: Si aggiornerà al nuovo formato",
                                 "Selezionare il nome corretto di ogni serie, se non siete sicuri potete 'Annulla'.",
                                 "Ci sono nuove opzioni per la 'Libreria' in 'configurazione'.")

        filetools.rename(library.TVSHOWS_PATH, "SERIES_OLD")

        if not filetools.exists(library.TVSHOWS_PATH):
            filetools.mkdir(library.TVSHOWS_PATH)

            if filetools.exists(library.TVSHOWS_PATH):
                try:
                    data = filetools.read(fname)
                    for line in data.splitlines():
                        aux = line.rstrip('\n').split(",")
                        tvshow = aux[0].strip()
                        url = aux[1].strip()
                        channel = aux[2].strip()

                        serie = Item(contentSerieName=tvshow, url=url, channel=channel, action="episodios",
                                     title=tvshow, active=True)

                        patron = "^(.+)[\s]\((\d{4})\)$"
                        matches = re.compile(patron, re.DOTALL).findall(serie.contentSerieName)

                        if matches:
                            serie.infoLabels['title'] = matches[0][0]
                            serie.infoLabels['year'] = matches[0][1]
                        else:
                            serie.infoLabels['title'] = tvshow

                        library.save_library_tvshow(serie, list())

                    filetools.rename(fname, "series.xml.old")

                    # Por ultimo limpia la libreria, por que las rutas anteriores ya no existen
                    library.clean()

                except EnvironmentError:
                    logger.info("ERROR al leer el archivo: {0}".format(fname))

            else:
                logger.info("ERROR, no se ha podido crear la nueva carpeta de SERIES")
        else:
            logger.info("ERROR, no se ha podido renombrar la antigua carpeta de SERIES")

        return True

    return False
コード例 #29
0
ファイル: views.py プロジェクト: hx002/sanguo-server
def strengthen_equip(request):
    req = request._proto

    item = Item(request._char_id)
    equip_msgs = item.equip_level_up(req.id, req.quick)

    response = StrengthEquipResponse()
    response.ret = 0
    for m in equip_msgs:
        equip_msg = response.equips.add()
        equip_msg.MergeFrom(m)
    return pack_msg(response)
コード例 #30
0
def create_tvshows_from_xml():
    logger.info("pelisalacarta.platformcode.library_service create_tvshows_from_xml")

    fname = filetools.join(config.get_data_path(), library.TVSHOW_FILE_OLD)
    if filetools.exists(fname):
        platformtools.dialog_ok("Biblioteca: Se va a actualizar al nuevo formato",
                                "Seleccione el nombre correcto de cada serie, si no está seguro pulse 'Cancelar'.",
                                "Hay nuevas opciones en 'Biblioteca' y en la 'configuración' del addon.")

        filetools.rename(library.TVSHOWS_PATH,  "SERIES_OLD")

        if not filetools.exists(library.TVSHOWS_PATH):
            filetools.mkdir(library.TVSHOWS_PATH)

            if filetools.exists(library.TVSHOWS_PATH):
                try:
                    data = filetools.read(fname)
                    for line in data.splitlines():
                        aux = line.rstrip('\n').split(",")
                        tvshow = aux[0].strip()
                        url = aux[1].strip()
                        channel = aux[2].strip()

                        serie = Item(contentSerieName=tvshow, url=url, channel=channel, action="episodios",
                                     title=tvshow, active=True)

                        patron = "^(.+)[\s]\((\d{4})\)$"
                        matches = re.compile(patron, re.DOTALL).findall(serie.contentSerieName)

                        if matches:
                            serie.infoLabels['title'] = matches[0][0]
                            serie.infoLabels['year'] = matches[0][1]
                        else:
                            serie.infoLabels['title'] = tvshow

                        library.save_library_tvshow(serie, list())

                    filetools.rename(fname, "series.xml.old")

                    # Por ultimo limpia la libreria, por que las rutas anteriores ya no existen
                    library.clean()

                except EnvironmentError:
                    logger.info("ERROR al leer el archivo: {0}".format(fname))

            else:
                logger.info("ERROR, no se ha podido crear la nueva carpeta de SERIES")
        else:
            logger.info("ERROR, no se ha podido renombrar la antigua carpeta de SERIES")

        return True

    return False
コード例 #31
0
def episodios(item):
    # Necesario para las actualizaciones automaticas
    return completo(Item(url=item.url, show=item.show, extra="serie_add"))
コード例 #32
0
def entradas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    if "Destacados" in item.title:
        itemlist.append(
            item.clone(
                title=
                "Aviso: Si una película no tiene (imagen/carátula) NO va a funcionar",
                action="",
                text_color=color4))

    for child in data["a"]:
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = child['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_480.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "SD"
        elif child['height'] < 1080:
            quality = "720p"
        elif child['height'] >= 1080:
            quality = "1080p"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")
        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        #if child['year']:
        #    title += " (" + child['year'] + ")"
        #title += quality
        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 server="",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 video_urls=video_urls,
                 text_color=color3,
                 quality=quality))

    return itemlist
コード例 #33
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    capitulos = []
    if data.get("b"):
        for child in data["b"]:
            for child2 in child["a"]:
                capitulos.append([child["season"], child2, child["id"]])
    else:
        for child in data.get("a", []):
            capitulos.append(['', child, ''])

    for season, child, id_season in capitulos:
        infoLabels = item.infoLabels.copy()

        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season or not season.isdigit():
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0

        if not child['episode']:
            episode = scrapertools.find_single_match(child['name'],
                                                     '\d+x(\d+)')
            if not episode:
                episode = "0"
            infoLabels['episode'] = int(episode)
        else:
            infoLabels['episode'] = int(child['episode'])
        infoLabels['mediatype'] = "episode"

        url = host % "movie/%s/movie.js" % child["id"]
        thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        if id_season:
            fanart = host % "list/%s/background_1080.jpg" % id_season
        else:
            fanart = item.fanart

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        try:
            title = fulltitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        except:
            title = fulltitle = child['id'].replace("-", " ")
        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 contentTitle=fulltitle,
                 viewmode="movie",
                 show=item.show,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="episodios",
                 text_color=color3))

    itemlist.sort(key=lambda it:
                  (it.infoLabels["season"], it.infoLabels["episode"]),
                  reverse=True)
    if itemlist and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 infoLabels=item.infoLabels,
                 show=item.show,
                 extra="episodios"))

    return itemlist
コード例 #34
0
def findvideos(item):
    logger.info()
    itemlist = []

    if not item.video_urls:
        data = httptools.downloadpage(item.url)
        if not data.sucess:
            itemlist.append(
                item.clone(title="Película no disponible", action=""))
            return itemlist
        data = jsontools.load(data.data)

        item.video_urls = []
        for k, v in data.get("video", {}).items():
            for vid in v:
                item.video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

    if item.video_urls:
        import random
        import base64

        item.video_urls.sort(key=lambda it: (it[1], random.random()),
                             reverse=True)
        i = 0
        actual_quality = ""
        for vid, quality in item.video_urls:
            title = "Ver vídeo en %sp" % quality
            if quality != actual_quality:
                i = 0
                actual_quality = quality

            if i % 2 == 0:
                title += " [COLOR purple]Mirror %s[/COLOR] - %s" % (
                    str(i + 1), item.fulltitle)
            else:
                title += " [COLOR green]Mirror %s[/COLOR] - %s" % (
                    str(i + 1), item.fulltitle)
            url = vid % "%s" % base64.b64decode(
                "dHQ9MTQ4MDE5MDQ1MSZtbT1NRzZkclhFand6QmVzbmxSMHNZYXhBJmJiPUUwb1dVVVgx"
                "WTBCQTdhWENpeU9paUE=")
            url += '|User-Agent=%s' % httptools.get_user_agent
            itemlist.append(
                item.clone(title=title, action="play", url=url, video_urls=""))
            i += 1

        if itemlist and item.extra == "" and config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir enlaces a la videoteca",
                     text_color=color5,
                     contentTitle=item.fulltitle,
                     url=item.url,
                     action="add_pelicula_to_library",
                     infoLabels={'title': item.fulltitle},
                     extra="findvideos",
                     fulltitle=item.fulltitle))
    itemlist = servertools.get_servers_itemlist(itemlist)
    return itemlist
コード例 #35
0
def submenu(item):
    logger.info()
    itemlist = []

    if "Series" in item.title:
        itemlist.append(
            Item(channel=item.channel,
                 title="Nuevos Capítulos",
                 action="nuevos_cap",
                 url=host % "list/nuevos-capitulos" + ext,
                 text_color=color2,
                 thumbnail=host %
                 "list/nuevos-capitulos/thumbnail_167x250.jpg",
                 fanart=host % "list/nuevos-capitulos/background_1080.jpg",
                 viewmode="movie"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Nuevas Temporadas",
                 action="nuevos_cap",
                 url=host % "list/nuevos-capitulos" + ext,
                 text_color=color2,
                 thumbnail=host %
                 "list/nuevos-capitulos/thumbnail_167x250.jpg",
                 fanart=host % "list/nuevos-capitulos/background_1080.jpg",
                 viewmode="movie"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Series más vistas",
                 action="series",
                 text_color=color2,
                 url=host % "list/series" + ext,
                 viewmode="movie_with_plot",
                 thumbnail=item.thumbnail,
                 fanart=item.fanart,
                 contentTitle="Series"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Lista de Series A-Z",
                 action="series",
                 text_color=color2,
                 url=host % "list/series" + ext,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart,
                 contentTitle="Series",
                 viewmode="movie_with_plot"))
    else:
        itemlist.append(
            Item(channel=item.channel,
                 title="Novedades",
                 action="entradas",
                 url=host % "list/ultimas-peliculas" + ext,
                 text_color=color2,
                 thumbnail=host %
                 "list/ultimas-peliculas/thumbnail_167x250.jpg",
                 fanart=host % "list/ultimas-peliculas/background_1080.jpg",
                 viewmode="movie_with_plot"))
        # itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas",
        #                      url=host % "list/000-novedades" + ext, text_color=color2,
        #                      thumbnail=host % "list/screener/thumbnail_167x250.jpg",
        #                      fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Más vistas",
                 action="entradas",
                 url=host % "list/peliculas-mas-vistas" + ext,
                 text_color=color2,
                 thumbnail=host %
                 "list/peliculas-mas-vistas/thumbnail_167x250.jpg",
                 fanart=host % "list/peliculas-mas-vistas/background_1080.jpg",
                 viewmode="movie_with_plot"))
        itemlist.append(
            Item(channel=item.channel,
                 title="Categorías",
                 action="cat",
                 url=host % "list/peliculas" + ext,
                 text_color=color2,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    return itemlist
コード例 #36
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def menuseries(item):
    logger.info()

    itemlist = []

    if account:
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html(
                     "[COLOR orange][B]Siguiendo[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=shows&action=following&start=-28&limit=28",
                 folder=True))
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html(
                     "[COLOR orange][B]Para Ver[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=shows&action=watch&start=-28&limit=28",
                 folder=True))

    itemlist.append(
        Item(channel=item.channel,
             action="series_abc",
             title="A-Z",
             folder=True))

    itemlist.append(
        Item(channel=item.channel,
             action="novedades_episodios",
             title="Últimos Emitidos",
             url=host +
             "/a/episodes?action=latest&start=-24&limit=24&elang=ALL",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="novedades_episodios",
             title="Episodios Estreno",
             url=host +
             "/a/episodes?action=premiere&start=-24&limit=24&elang=ALL",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="novedades_episodios",
             title="Episodios Actualizados",
             url=host +
             "/a/episodes?action=updated&start=-24&limit=24&elang=ALL",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="Últimas series",
             url=host + "/series",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="Rating IMDB",
             url=host + "/series/imdb_rating",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="generos_series",
             title="Series por Género",
             url=host,
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="listado_series",
             title="Listado de todas las series",
             url=host + "/series/list",
             folder=True))
    if account:
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html(
                     "[COLOR orange][B]Favoritas[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=shows&action=favorite&start=-28&limit=28",
                 folder=True))
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html(
                     "[COLOR orange][B]Pendientes[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=shows&action=pending&start=-28&limit=28",
                 folder=True))
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=shows&action=seen&start=-28&limit=28",
                 folder=True))

    return itemlist
コード例 #37
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def fichas(item):
    logger.info()
    itemlist = []

    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)

    if item.title == "Buscar...":
        data = agrupa_datos(
            httptools.downloadpage(item.url, post=item.extra).data)

        s_p = scrapertools.get_match(
            data,
            '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
                '<h3 class="section-title">')

        if len(s_p) == 1:
            data = s_p[0]
            if 'Lo sentimos</h3>' in s_p[0]:
                return [
                    Item(
                        channel=item.channel,
                        title=bbcode_kodi2html(
                            "[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" +
                            texto.replace('%20', ' ') +
                            "[/COLOR] sin resultados"))
                ]
        else:
            data = s_p[0] + s_p[1]
    else:
        data = agrupa_datos(httptools.downloadpage(item.url).data)

    data = re.sub(
        r'<div class="span-6[^<]+<div class="item"[^<]+' + \
        '<a href="([^"]+)"[^<]+' + \
        '<img.*?src="([^"]+)".*?' + \
        '<div class="left"(.*?)</div>' + \
        '<div class="right"(.*?)</div>.*?' + \
        'title="([^"]+)".*?' + \
        'onclick="setFavorite.\d, (\d+),',
        r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
        data
    )

    patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:

        thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")

        title = scrapedtitle.strip()
        show = title
        contentTitle = scrapedtitle.strip()

        if scrapedlangs != ">":
            textoidiomas = extrae_idiomas(scrapedlangs)
            title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas +
                                      "[/B][/COLOR])")

        if scrapedrating != ">":
            valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>',
                                r'\1,\2', scrapedrating)
            title += bbcode_kodi2html(" ([COLOR orange]" + valoracion +
                                      "[/COLOR])")

        url = urlparse.urljoin(item.url, scrapedurl)

        if "/serie" in url or "/tags-tv" in url:
            action = "episodios"
            url += "###" + scrapedid + ";1"
            type = "shows"
            contentType = "tvshow"
        else:
            action = "findvideos"
            url += "###" + scrapedid + ";2"
            type = "movies"
            contentType = "movie"

        str = get_status(status, type, scrapedid)
        if str != "": title += str

        if item.title == "Buscar...":
            tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/')
            title += bbcode_kodi2html(" - [COLOR blue]" +
                                      tag_type.capitalize() + "[/COLOR]")

        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 url=url,
                 fulltitle=title,
                 thumbnail=thumbnail,
                 show=show,
                 folder=True,
                 contentType=contentType,
                 contentTitle=contentTitle))

    ## Paginación
    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">.raquo;</a>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 title=">> Página siguiente",
                 url=urlparse.urljoin(item.url, next_page_url),
                 folder=True))

    return itemlist
def fichas(item):
    logger.info("[altadefinizioneclick.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    # fix - IMDB
    data = re.sub(
        r'<h5> </div>',
        '<fix>IMDB: 0.0</fix>',
        data
    )
    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione.site.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?'
        patron += 'href="([^"]+)".*?'
        patron += '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'src="([^"]+)".*?'
        patron += 'class="titleFilm">([^<]+)<.*?'
        patron += 'IMDB: ([^<]+)<'
    else:
        patron = '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'href="([^"]+)".*?'
        patron += 'src="([^"]+)".*?'
        patron += 'href[^>]+>([^<]+)</a>.*?'
        patron += 'IMDB: ([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches:

        scrapedurl = scraped_2
        scrapedcalidad = scraped_1
        if "/?s=" in item.url:
            scrapedurl = scraped_1
            scrapedcalidad = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        title += " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")"

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + title + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title), tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
コード例 #39
0
def porGenero(item):
    logger.info("[pelisadicto.py] porGenero")

    itemlist = []
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Acción",
             url="http://pelisadicto.com/genero/Acción/1",
             viewmode="movie_with_plot"))
    if config.get_setting("adult_mode") == "true":
        itemlist.append(
            Item(channel=item.channel,
                 action="agregadas",
                 title="Adulto",
                 url="http://pelisadicto.com/genero/Adulto/1",
                 viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Animación",
             url="http://pelisadicto.com/genero/Animación/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Aventura",
             url="http://pelisadicto.com/genero/Aventura/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Biográfico",
             url="http://pelisadicto.com/genero/Biográfico/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Ciencia Ficción",
             url="http://pelisadicto.com/genero/Ciencia Ficción/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Cine Negro",
             url="http://pelisadicto.com/genero/Cine Negro/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Comedia",
             url="http://pelisadicto.com/genero/Comedia/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Corto",
             url="http://pelisadicto.com/genero/Corto/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Crimen",
             url="http://pelisadicto.com/genero/Crimen/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Deporte",
             url="http://pelisadicto.com/genero/Deporte/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Documental",
             url="http://pelisadicto.com/genero/Documental/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Drama",
             url="http://pelisadicto.com/genero/Drama/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Familiar",
             url="http://pelisadicto.com/genero/Familiar/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Fantasía",
             url="http://pelisadicto.com/genero/Fantasía/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Guerra",
             url="http://pelisadicto.com/genero/Guerra/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Historia",
             url="http://pelisadicto.com/genero/Historia/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Misterio",
             url="http://pelisadicto.com/genero/Misterio/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Música",
             url="http://pelisadicto.com/genero/Música/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Musical",
             url="http://pelisadicto.com/genero/Musical/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Romance",
             url="http://pelisadicto.com/genero/Romance/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Terror",
             url="http://pelisadicto.com/genero/Terror/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Thriller",
             url="http://pelisadicto.com/genero/Thriller/1",
             viewmode="movie_with_plot"))
    itemlist.append(
        Item(channel=item.channel,
             action="agregadas",
             title="Western",
             url="http://pelisadicto.com/genero/Western/1",
             viewmode="movie_with_plot"))

    return itemlist
コード例 #40
0
def get_episodios(item):
    logger.info("[newpct1.py] get_episodios")
    itemlist = []
    logger.info("[newpct1.py] get_episodios url=" + item.url)
    data = re.sub(r'\n|\r|\t|\s{2}|<!--.*?-->|<i class="icon[^>]+"></i>', "",
                  scrapertools.cache_page(item.url))
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    #logger.info("[newpct1.py] data=" +data)

    patron = '<ul class="buscar-list">(.*?)</ul>'
    #logger.info("[newpct1.py] patron=" + patron)

    fichas = scrapertools.get_match(data, patron)
    #logger.info("[newpct1.py] matches=" + str(len(fichas)))

    #<li><a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><img src="http://www.newpct1.com/pictures/c/minis/1880_forever.jpg" alt="Serie Forever 1x01"></a> <div class="info"> <a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><h2 style="padding:0;">Serie <strong style="color:red;background:none;">Forever - Temporada 1 </strong> - Temporada<span style="color:red;background:none;">[ 1 ]</span>Capitulo<span style="color:red;background:none;">[ 01 ]</span><span style="color:red;background:none;padding:0px;">Espa�ol Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2></a> <span>27-10-2014</span> <span>450 MB</span> <span class="color"><ahref="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"> Descargar</a> </div></li>
    #logger.info("[newpct1.py] get_episodios: " + fichas)
    patron = '<a href="([^"]+).*?'  #url
    patron += '<img src="([^"]+)".*?'  #thumbnail
    patron += '<h2 style="padding(.*?)/h2>.*?'  #titulo, idioma y calidad

    matches = re.compile(patron, re.DOTALL).findall(fichas)
    #logger.info("[newpct1.py] get_episodios matches: " + str(len(matches)))
    for scrapedurl, scrapedthumbnail, scrapedinfo in matches:
        try:
            url = scrapedurl
            if '</span>' in scrapedinfo:
                #logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo)
                #<h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Big Bang Theory - Temporada 6 </strong> - Temporada<span style="color:red;background:none;">[ 6 ]</span>Capitulo<span style="color:red;background:none;">[ 03 ]</span><span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2>
                patron = '<span style=".*?">\[\s*(.*?)\]</span>.*?'  #temporada
                patron += '<span style=".*?">\[\s*(.*?)\].*?'  #capitulo
                patron += ';([^/]+)'  #idioma

                info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo)
                (temporada, capitulo, idioma) = info_extra[0]

                #logger.info("[newpct1.py] get_episodios: temporada=" + temporada)
                #logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo)
                #logger.info("[newpct1.py] get_episodios: idioma=" + idioma)
                if '">' in idioma:
                    idioma = " [" + scrapertools.find_single_match(
                        idioma, '">([^<]+)').strip() + "]"
                elif '&nbsp' in idioma:
                    idioma = " [" + scrapertools.find_single_match(
                        idioma, '&nbsp;([^<]+)').strip() + "]"
                else:
                    idioma = ""
                title = item.title + " (" + temporada.strip(
                ) + "x" + capitulo.strip() + ")" + idioma

            else:
                #<h2 style="padding:0;">The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]</h2>
                #<h2 style="padding:0;">The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]</h2
                #<h2 style="padding:0;">The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish]</h2>
                try:
                    temp, cap = scrapertools.get_season_and_episode(
                        scrapedinfo).split('x')
                except:
                    #Formatear temporadaXepisodio
                    patron = re.compile('Cap.*?\s*([\d]+)', re.IGNORECASE)
                    info_extra = patron.search(scrapedinfo)

                    if len(str(info_extra.group(1))) >= 3:
                        cap = info_extra.group(1)[-2:]
                        temp = info_extra.group(1)[:-2]
                    else:
                        cap = info_extra.group(1)
                        patron = 'Temp.*?\s*([\d]+)'
                        temp = re.compile(
                            patron, re.IGNORECASE).search(scrapedinfo).group(1)

                title = item.title + " (" + temp + 'x' + cap + ")"

            #logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart)
            itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     title=title,
                     url=url,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     fanart=item.fanart))
        except:
            logger.info("[newpct1.py] ERROR al añadir un episodio")
    if "pagination" in data:
        patron = '<ul class="pagination">(.*?)</ul>'
        paginacion = scrapertools.get_match(data, patron)
        #logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
        if "Next" in paginacion:
            url_next_page = scrapertools.get_match(
                paginacion, '<a href="([^>]+)>Next</a>')[:-1]
            url_next_page = url_next_page.replace(" ", "%20")
            #logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
            itemlist.append(
                Item(channel=__channel__,
                     action="get_episodios",
                     title=">> Página siguiente",
                     url=url_next_page))

    return itemlist
コード例 #41
0
ファイル: cinecalidad.py プロジェクト: prosich/pelisalacarta
def findvideos(item):
    servidor = {
        "http://uptobox.com/": "uptobox",
        "http://userscloud.com/": "userscloud",
        "https://my.pcloud.com/publink/show?code=": "pcloud",
        "http://thevideos.tv/": "thevideos",
        "http://ul.to/": "uploadedto",
        "http://turbobit.net/": "turbobit",
        "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
        "http://www.mediafire.com/download/": "mediafire",
        "https://www.youtube.com/watch?v=": "youtube",
        "http://thevideos.tv/embed-": "thevideos",
        "//www.youtube.com/embed/": "youtube",
        "http://ok.ru/video/": "okru",
        "http://ok.ru/videoembed/": "okru",
        "http://www.cinemaqualidade.com/protect/v.html?i=":
        "cinemaqualidade.com",
        "http://usersfiles.com/": "usersfiles",
        "https://depositfiles.com/files/": "depositfiles",
        "http://www.nowvideo.sx/video/": "nowvideo",
        "http://vidbull.com/": "vidbull",
        "http://filescdn.com/": "filescdn",
        "http://www.yourupload.com/watch/": "yourupload"
    }
    logger.info("pelisalacarta.channels.cinecalidad links")
    itemlist = []
    data = scrapertools.cache_page(item.url)

    #   {h=dec("111 123 123 119 65 54 54 124 119 123 118 105 118 127 53 106 118 116 54")+dec("114 114 110 115 110 55 121 117 64 120 120 115");}
    patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    recomendados = ["uptobox", "thevideos", "nowvideo", "pcloud"]
    for scrapedurl, scrapedtitle in matches:
        if dec(scrapedurl) in servidor:

            if 'yourupload' in dec(scrapedurl):
                url = dec(scrapedurl).replace('watch',
                                              'embed') + dec(scrapedtitle)
            else:
                url = dec(scrapedurl) + dec(scrapedtitle)

            title = "Ver " + item.contentTitle + " en " + servidor[dec(
                scrapedurl)].upper()
            if (servidor[dec(scrapedurl)]) in recomendados:
                title = title + "[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]"


#           if (servidor[dec(scrapedurl)])=='pcloud':
#              thumbnail='https://pbs.twimg.com/profile_images/687592526694473728/bCQCZC7b.png'
#           else:
            thumbnail = servertools.guess_server_thumbnail(
                servidor[dec(scrapedurl)])
            plot = ""
            if (DEBUG):
                logger.info("title=[" + title + "], url=[" + url +
                            "], thumbnail=[" + thumbnail + "])")
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     fulltitle=item.title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     extra=item.thumbnail,
                     server=servidor[dec(scrapedurl)]))

    if config.get_library_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
コード例 #42
0
ファイル: search.py プロジェクト: userColaborator/addon
def do_search(item, categories=None):
    logger.info("blaa categorias %s" % categories)

    if item.contextual==True:
        categories = ["Películas"]
        setting_item = Item(channel=item.channel, title=config.get_localized_string(59994), folder=False,
                            thumbnail=get_thumb("search.png"))
        if not setting_channel(setting_item):
            return False

    if categories is None:
        categories = []

    multithread = config.get_setting("multithread", "search")
    result_mode = config.get_setting("result_mode", "search")

    if item.wanted!='':
        tecleado=item.wanted
    else:
        tecleado = item.extra

    itemlist = []

    channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json')
    logger.info("channels_path=%s" % channels_path)

    channel_language = config.get_setting("channel_language", default="all")
    logger.info("channel_language=%s" % channel_language)

    # Para Kodi es necesario esperar antes de cargar el progreso, de lo contrario
    # el cuadro de progreso queda "detras" del cuadro "cargando..." y no se le puede dar a cancelar
    time.sleep(0.5)
    progreso = platformtools.dialog_progress(config.get_localized_string(30993) % tecleado, "")
    channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x))
    import math

    threads = []
    search_results = {}
    start_time = time.time()
    list_channels_search = []

    # Extrae solo los canales a buscar
    for index, infile in enumerate(channel_files):
        try:
            basename = os.path.basename(infile)
            basename_without_extension = basename[:-5]
            logger.info("%s..." % basename_without_extension)

            channel_parameters = channeltools.get_channel_parameters(basename_without_extension)

            # No busca si es un canal inactivo
            if not channel_parameters["active"]:
                logger.info("%s -no activo-" % basename_without_extension)
                continue

            # En caso de búsqueda por categorias
            if categories:

                # Si no se ha seleccionado torrent no se muestra
                #if "torrent" not in categories and "infoPlus" not in categories:
                #    if "torrent" in channel_parameters["categories"]:
                #        logger.info("%s -torrent-" % basename_without_extension)
                #        continue

                for cat in categories:
                    if cat not in channel_parameters["categories"]:
                        logger.info("%s -no en %s-" % (basename_without_extension, cat))
                        continue

            # No busca si es un canal para adultos, y el modo adulto está desactivado
            if channel_parameters["adult"] and config.get_setting("adult_mode") == 0:
                logger.info("%s -adulto-" % basename_without_extension)
                continue

            # No busca si el canal es en un idioma filtrado
            if channel_language != "all" and channel_language not in channel_parameters["language"] \
                    and "*" not in channel_parameters["language"]:
                logger.info("%s -idioma no válido-" % basename_without_extension)
                continue

            # No busca si es un canal excluido de la búsqueda global
            include_in_global_search = channel_parameters["include_in_global_search"]
            if include_in_global_search:
                # Buscar en la configuracion del canal
                include_in_global_search = config.get_setting("include_in_global_search", basename_without_extension)

            if not include_in_global_search:
                logger.info("%s -no incluido en lista a buscar-" % basename_without_extension)
                continue
            list_channels_search.append(infile)
        except:
            logger.error("No se puede buscar en: %s" % channel_parameters["title"])
            import traceback
            logger.error(traceback.format_exc())
            continue


    for index, infile in enumerate(list_channels_search):
        try:
           # fix float porque la division se hace mal en python 2.x
            percentage = int(float((index+1))/len(list_channels_search)*float(100))
            basename = os.path.basename(infile)
            basename_without_extension = basename[:-5]
            logger.info("%s..." % basename_without_extension)
            channel_parameters = channeltools.get_channel_parameters(basename_without_extension)
            # Movido aqui el progreso, para que muestre el canal exacto que está buscando
            progreso.update(percentage,
                            config.get_localized_string(60520) % (channel_parameters["title"]))
            # Modo Multi Thread
            if progreso.iscanceled():
                progreso.close()
                logger.info("Búsqueda cancelada")
                return itemlist
            if multithread:
                t = Thread(target=channel_search, args=[search_results, channel_parameters, tecleado],
                           name=channel_parameters["title"])
                t.setDaemon(True)
                t.start()
                threads.append(t)
            # Modo single Thread
            else:
                logger.info("Intentado búsqueda en %s de %s " % (basename_without_extension, tecleado))
                channel_search(search_results, channel_parameters, tecleado)
        except:
            logger.error("No se puede buscar en: %s" % channel_parameters["title"])
            import traceback
            logger.error(traceback.format_exc())
            continue

    # Modo Multi Thread
    # Usando isAlive() no es necesario try-except,
    # ya que esta funcion (a diferencia de is_alive())
    # es compatible tanto con versiones antiguas de python como nuevas
    if multithread:
        pendent = [a for a in threads if a.isAlive()]
        if len(pendent) > 0: t = float(100) / len(pendent)
        while len(pendent) > 0:
            index = (len(threads) - len(pendent)) + 1
            percentage = int(math.ceil(index * t))

            list_pendent_names = [a.getName() for a in pendent]
            mensaje = config.get_localized_string(70282) % (", ".join(list_pendent_names))
            progreso.update(percentage, config.get_localized_string(60521) % (len(threads) - len(pendent) + 1, len(threads)),
                            mensaje)
            if progreso.iscanceled():
                logger.info("Búsqueda cancelada")
                break
            time.sleep(0.5)
            pendent = [a for a in threads if a.isAlive()]
    total = 0
    for channel in sorted(search_results.keys()):
        for element in search_results[channel]:
            total += len(element["itemlist"])
            title = channel
            # resultados agrupados por canales
            if item.contextual == True or item.action == 'search_tmdb':
                result_mode = 1
            if result_mode == 0:
                if len(search_results[channel]) > 1:
                    title += " -%s" % element["item"].title.strip()
                title += " (%s)" % len(element["itemlist"])
                title = re.sub("\[COLOR [^\]]+\]", "", title)
                title = re.sub("\[/COLOR]", "", title)
                itemlist.append(Item(title=title, channel="search", action="show_result", url=element["item"].url,
                                     extra=element["item"].extra, folder=True, adult=element["adult"],
                                     from_action="search", from_channel=element["item"].channel, tecleado=tecleado))
            # todos los resultados juntos, en la misma lista
            else:
                title = " [ Resultados del canal %s ] " % channel
                itemlist.append(Item(title=title, channel="search", action="",
                                     folder=False, text_bold=True, from_channel=channel))
                for i in element["itemlist"]:
                    if i.action:
                        title = "    " + i.title
                        if "infoPlus" in categories:            #Se marca si viene de una ventana de InfoPlus
                            i.infoPlus = True
                        itemlist.append(i.clone(title=title, from_action=i.action, from_channel=i.channel,
                                                channel="search", action="show_result", adult=element["adult"]))
    title = config.get_localized_string(59972) % (
    tecleado, total, time.time() - start_time)
    itemlist.insert(0, Item(title=title, text_color='yellow'))
    progreso.close()
    #Para opcion Buscar en otros canales
    if item.contextual == True:
        return exact_results(itemlist, tecleado)
    else:
        return itemlist
コード例 #43
0
ファイル: pepecine.py プロジェクト: shorbete/pelisalacarta
def get_episodios(item):
    logger.info("[pepecine.py] get_episodios")
    itemlist = []
    plot = {}

    try:
        from core.tmdb import Tmdb
        oTmdb = Tmdb(id_Tmdb=item.extra, tipo="tv")
    except:
        pass

    infoLabels = item.infoLabels

    lista_links = ast.literal_eval(item.url)
    # Agrupar enlaces por episodios  temXcap
    temXcap_dict = {}
    for link in lista_links:
        title_id = link['title_id']
        try:
            season = str(int(link['season']))
            episode = str(int(link['episode'])).zfill(2)
        except:
            continue
        id = season + "x" + episode
        if temXcap_dict.has_key(id):
            l = temXcap_dict[id]
            l.append(link)
            temXcap_dict[id] = l
        else:
            temXcap_dict[id] = [link]

    # Ordenar lista de enlaces por temporada y capitulo
    temXcap_list = temXcap_dict.items()
    temXcap_list.sort(
        key=lambda x: (int(x[0].split("x")[0]), int(x[0].split("x")[1])))
    for episodio in temXcap_list:
        title = infoLabels['titleraw'] + ' (' + episodio[0] + ')'
        infoLabels['season'], infoLabels['episode'] = episodio[0].split('x')
        try:
            # añadimos sinopsis e imagenes para cada capitulo
            datos_tmdb = oTmdb.get_episodio(temporada=infoLabels['season'],
                                            capitulo=infoLabels['episode'])
            if datos_tmdb["episodio_sinopsis"] != "":
                infoLabels['plot'] = datos_tmdb["episodio_sinopsis"]
            if datos_tmdb["episodio_imagen"] != "":
                item.thumbnail = datos_tmdb["episodio_imagen"]
            if datos_tmdb["episodio_titulo"] != "":
                title = title + " " + datos_tmdb["episodio_titulo"].replace(
                    '\t', '')
        except:
            pass

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=str(episodio[1]),
                 extra=item.extra,
                 show=infoLabels['tvshowtitle'],
                 fanart=item.fanart,
                 infoLabels=infoLabels,
                 thumbnail=item.thumbnail,
                 viewmode="movie_with_plot",
                 text_color="0xFFFFCE9C"))

    if config.get_library_support() and itemlist:
        url = urlparse.urljoin(__url_base__, "series-online/" + str(title_id))
        #url= urlparse.urljoin(__url_base__,"series-online/" + (str(infoLabels['title_id']) +"-"+ infoLabels['titleraw']))
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la biblioteca",
                 url=url,
                 text_color="0xFFe5ffcc",
                 action="add_serie_to_library",
                 extra='episodios###serie_add',
                 show=infoLabels['tvshowtitle'],
                 thumbnail='https://d5.usercdn.com/dl/i/02360/a99fzwbqdaen.png'
                 ))

    return itemlist
コード例 #44
0
ファイル: tuteve.py プロジェクト: codixor/maximumTv
def mainlist(item):
    logger.info("tvalacarta.channels.tuteve mainlist")

    item = Item(channel=CHANNELNAME, url="http://play.tuteve.tv")
    return secciones(item)
コード例 #45
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def episodios(item):
    logger.info()
    id = "0"
    itemlist = []

    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)

    url_targets = item.url

    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    ## Temporadas
    data = agrupa_datos(httptools.downloadpage(item.url).data)

    if id == "0":
        ## Se saca el id de la serie de la página cuando viene de listado_series
        id = scrapertools.get_match(data,
                                    "<script>var sid = '([^']+)';</script>")
        url_targets = url_targets.replace('###0', '###' + id)

    str = get_status(status, "shows", id)
    if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
        if config.get_videolibrary_support():
            title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show +
                                     "[/B][/COLOR] )")
            itemlist.append(
                Item(channel=item.channel,
                     action="episodios",
                     title=title,
                     fulltitle=title,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))
        title = str.replace('green', 'red').replace('Siguiendo', 'Abandonar')
        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=True))
    elif account and item.category != "Series" and "XBMC" not in item.title:
        if config.get_videolibrary_support():
            title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show +
                                     "[/B][/COLOR] )")
            itemlist.append(
                Item(channel=item.channel,
                     action="episodios",
                     title=title,
                     fulltitle=title,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))
        title = bbcode_kodi2html(" ( [COLOR orange][B]Seguir[/B][/COLOR] )")
        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=True))

    patron = "<li><a href='([^']+)'>[^<]+</a></li>"

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl in matches:

        ## Episodios
        data = agrupa_datos(httptools.downloadpage(scrapedurl).data)

        sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
        ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
        post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid)

        url = host + "/a/episodes"

        data = httptools.downloadpage(url, post=post).data

        episodes = jsontools.load(data)

        for episode in episodes:

            thumbnail = host + "/thumbs/" + episode['thumbnail']

            temporada = episode['season']
            episodio = episode['episode']
            if len(episodio) == 1: episodio = '0' + episodio

            if episode['languages'] != "[]":
                idiomas = "( [COLOR teal][B]"
                for idioma in episode['languages']:
                    idiomas += idioma + " "
                idiomas += "[/B][/COLOR])"
                idiomas = bbcode_kodi2html(idiomas)
            else:
                idiomas = ""

            if episode['title']:
                try:
                    title = episode['title']['es'].strip()
                except:
                    title = episode['title']['en'].strip()

            if len(title) == 0:
                title = "Temporada " + temporada + " Episodio " + episodio

            try:
                title = temporada + "x" + episodio + " - " + title.decode(
                    'utf-8') + ' ' + idiomas
            except:
                title = temporada + "x" + episodio + " - " + title.decode(
                    'iso-8859-1') + ' ' + idiomas
            # try: title = temporada + "x" + episodio + " - " + title + ' ' + idiomas
            # except: pass
            # except: title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas

            str = get_status(status, 'episodes', episode['id'])
            if str != "": title += str

            try:
                title = title.encode('utf-8')
            except:
                title = title.encode('iso-8859-1')

            url = urlparse.urljoin(
                scrapedurl, 'temporada-' + temporada + '/episodio-' +
                episodio) + "###" + episode['id'] + ";3"

            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title,
                     fulltitle=title,
                     url=url,
                     thumbnail=thumbnail,
                     show=item.show,
                     folder=True,
                     contentType="episode"))

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 url=url_targets,
                 action="add_serie_to_library",
                 extra="episodios",
                 show=item.show))
        itemlist.append(
            Item(channel=item.channel,
                 title="Descargar todos los episodios de la serie",
                 url=url_targets,
                 action="download_all_episodes",
                 extra="episodios",
                 show=item.show))

    return itemlist
コード例 #46
0
ファイル: mocosoftx.py プロジェクト: codixor/maximumTv
def foro(item):
    logger.info("pelisalacarta.channels.mocosoftx foro")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url, headers=MAIN_HEADERS)

    # Extrae los foros y subforos
    patron = '<h4><a href="([^"]+)"[^>]+>([^<]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = unicode(scrapedtitle, "iso-8859-1",
                               errors="replace").encode("utf-8")
        title = ">> Foro " + scrapedtitle
        url = urlparse.urljoin(item.url, scrapedurl)
        #http://mocosoftx.com/foro/fotos-hentai/?PHPSESSID=nflddqf9nvbm2dd92
        if "PHPSESSID" in url:
            url = scrapertools.get_match(url, "(.*?)\?PHPSESSID=")
        thumbnail = ""
        plot = ""
        itemlist.append(
            Item(channel=__channel__,
                 title=title,
                 action="foro",
                 url=url,
                 plot=plot,
                 thumbnail=thumbnail,
                 folder=True))

    # Extrae los hilos individuales
    patron = '<td class="icon2 windowbgb">[^<]+'
    patron += '<img src="([^"]+)"[^<]+'
    patron += '</td>[^<]+'
    patron += '<td class="subject windowbgb2">[^<]+'
    patron += '<div >[^<]+'
    patron += '<span id="msg_\d+"><a href="([^"]+)">([^>]+)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        title = scrapedtitle
        url = urlparse.urljoin(item.url, scrapedurl)
        if "PHPSESSID" in url:
            url = scrapertools.get_match(url, "(.*?)\?PHPSESSID=")
        thumbnail = scrapedthumbnail
        plot = ""
        itemlist.append(
            Item(channel=__channel__,
                 title=title,
                 action="findvideos",
                 url=url,
                 plot=plot,
                 thumbnail=thumbnail,
                 folder=True))

    # Extrae la marca de siguiente página
    #<a class="navPages" href="http://mocosoftx.com/foro/peliculas-xxx-online-(completas)/20/?PHPSESSID=rpejdrj1trngh0sjdp08ds0ef7">2</a>
    patronvideos = '<strong>\d+</strong[^<]+<a class="navPages" href="([^"]+)">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedtitle = ">> Página siguiente"
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        if "PHPSESSID" in scrapedurl:
            scrapedurl = scrapertools.get_match(scrapedurl,
                                                "(.*?)\?PHPSESSID=")
        scrapedthumbnail = ""
        scrapedplot = ""
        itemlist.append(
            Item(channel=__channel__,
                 title=scrapedtitle,
                 action="foro",
                 url=scrapedurl,
                 plot=scrapedplot,
                 thumbnail=scrapedthumbnail,
                 folder=True))

    return itemlist
コード例 #47
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def items_usuario(item):
    logger.info()

    itemlist = []
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)

    ## Fichas usuario
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]

    old_start = scrapertools.get_match(post, 'start=([^&]+)&')
    limit = scrapertools.get_match(post, 'limit=(\d+)')
    start = "%s" % (int(old_start) + int(limit))

    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post

    ## Carga las fichas de usuario
    data = httptools.downloadpage(url, post=post).data
    fichas_usuario = jsontools.load(data)

    for ficha in fichas_usuario:

        try:
            title = ficha['title']['es'].strip()
        except:
            title = ficha['title']['en'].strip()

        try:
            title = title.encode('utf-8')
        except:
            pass

        show = title

        try:
            thumbnail = host + "/thumbs/" + ficha['thumbnail']
        except:
            thumbnail = host + "/thumbs/" + ficha['thumb']

        try:
            url = urlparse.urljoin(
                host,
                '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1"
            action = "episodios"
            str = get_status(status, 'shows', ficha['id'])
            if "show_title" in ficha:
                action = "findvideos"
                try:
                    serie = ficha['show_title']['es'].strip()
                except:
                    serie = ficha['show_title']['en'].strip()
                temporada = ficha['season']
                episodio = ficha['episode']
                serie = bbcode_kodi2html("[COLOR whitesmoke][B]" + serie +
                                         "[/B][/COLOR]")
                if len(episodio) == 1: episodio = '0' + episodio
                try:
                    title = temporada + "x" + episodio + " - " + serie + ": " + title
                except:
                    title = temporada + "x" + episodio + " - " + serie.decode(
                        'iso-8859-1') + ": " + title.decode('iso-8859-1')
                url = urlparse.urljoin(
                    host, '/serie/' + ficha['permalink'] + '/temporada-' +
                    temporada + '/episodio-' +
                    episodio) + "###" + ficha['id'] + ";3"
        except:
            url = urlparse.urljoin(host, '/pelicula/' +
                                   ficha['perma']) + "###" + ficha['id'] + ";2"
            action = "findvideos"
            str = get_status(status, 'movies', ficha['id'])
        if str != "": title += str

        # try: title = title.encode('utf-8')
        # except: pass

        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 show=show,
                 folder=True))

    if len(itemlist) == int(limit):
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=">> Página siguiente",
                 url=next_page,
                 folder=True))

    return itemlist
コード例 #48
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def findvideos(item):
    logger.info()

    itemlist = []
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)

    url_targets = item.url

    ## Vídeos
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]

    if type == "2" and account and item.category != "Cine":
        title = bbcode_kodi2html(
            " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
        if "Favorito" in item.title:
            title = bbcode_kodi2html(
                " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
        if config.get_videolibrary_support():
            title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show +
                                           "[/B][/COLOR] )")
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     folder=False))

            title_label = bbcode_kodi2html(
                " ( [COLOR green][B]Tráiler[/B][/COLOR] )")

            itemlist.append(
                Item(channel=item.channel,
                     action="trailer",
                     title=title_label,
                     fulltitle=title_label,
                     url=url_targets,
                     thumbnail=item.thumbnail,
                     show=item.show))

        itemlist.append(
            Item(channel=item.channel,
                 action="set_status",
                 title=title,
                 fulltitle=title,
                 url=url_targets,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=True))

    data_js = httptools.downloadpage(
        "http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
    key = scrapertools.find_single_match(
        data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')

    data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
    try:
        data_js = jhexdecode(data_js)
    except:
        from lib.aadecode import decode as aadecode
        data_js = data_js.split(";゚ω゚")
        decode_aa = ""
        for match in data_js:
            decode_aa += aadecode(match)

        data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
        data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)

    data = agrupa_datos(httptools.downloadpage(item.url).data)
    data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
    data_decrypt = jsontools.load(
        obfs(base64.b64decode(data_obf), 126 - int(key)))

    infolabels = {}
    year = scrapertools.find_single_match(
        data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
    infolabels["year"] = year

    matches = []
    for match in data_decrypt:
        prov = eval(
            scrapertools.find_single_match(
                data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
        function = prov["l"].replace("code", match["code"]).replace(
            "var_1", match["code"])

        url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}")
        url = re.sub(r'\'|"|\s|\+', '', url)
        url = re.sub(r'var_\d+\[\d+\]', '', url)
        embed = prov["e"]

        matches.append([match["lang"], match["quality"], url, embed])

    enlaces = []
    for idioma, calidad, url, embed in matches:
        servername = scrapertools.find_single_match(
            url, "(?:http:|https:)//(?:www.|)([^.]+).")
        if servername == "streamin": servername = "streaminto"
        if servername == "waaw": servername = "netutv"
        if servername == "uploaded" or servername == "ul":
            servername = "uploadedto"
        mostrar_server = True
        if config.get_setting("hidepremium") == True:
            mostrar_server = servertools.is_server_enabled(servername)
        if mostrar_server:
            option = "Ver"
            if re.search(r'return ([\'"]{2,}|\})', embed):
                option = "Descargar"
            calidad = unicode(calidad, "utf8").upper().encode("utf8")
            servername_c = unicode(servername,
                                   "utf8").capitalize().encode("utf8")
            title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")"
            thumbnail = item.thumbnail
            plot = item.title + "\n\n" + scrapertools.find_single_match(
                data, '<meta property="og:description" content="([^"]+)"')
            plot = scrapertools.htmlclean(plot)
            fanart = scrapertools.find_single_match(
                data, '<div style="background-image.url. ([^\s]+)')
            if account:
                url += "###" + id + ";" + type

            enlaces.append(
                Item(channel=item.channel,
                     action="play",
                     title=title,
                     fulltitle=title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     fanart=fanart,
                     show=item.show,
                     folder=True,
                     server=servername,
                     infoLabels=infolabels,
                     contentTitle=item.contentTitle,
                     contentType=item.contentType,
                     tipo=option))

    enlaces.sort(key=lambda it: it.tipo, reverse=True)
    itemlist.extend(enlaces)
    ## 2 = película
    if type == "2" and item.category != "Cine":
        ## STRM para todos los enlaces de servidores disponibles
        ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la videoteca..."
        try:
            itemlist.extend(file_cine_library(item, url_targets))
        except:
            pass

    return itemlist
コード例 #49
0
def letras(item):
    logger.info("pelisalacarta.channels.peliculasmx letras")

    extra = item.url
    itemlist = []
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="0-9", url="http://www.peliculasmx.net/letra/09.html", extra="http://www.peliculasmx.net/letra/09.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="A"  , url="http://www.peliculasmx.net/letra/a.html", extra="http://www.peliculasmx.net/letra/a.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="B"  , url="http://www.peliculasmx.net/letra/b.html", extra="http://www.peliculasmx.net/letra/b.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="C"  , url="http://www.peliculasmx.net/letra/c.html", extra="http://www.peliculasmx.net/letra/c.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="D"  , url="http://www.peliculasmx.net/letra/d.html", extra="http://www.peliculasmx.net/letra/d.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="E"  , url="http://www.peliculasmx.net/letra/e.html", extra="http://www.peliculasmx.net/letra/e.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="F"  , url="http://www.peliculasmx.net/letra/f.html", extra="http://www.peliculasmx.net/letra/f.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="G"  , url="http://www.peliculasmx.net/letra/g.html", extra="http://www.peliculasmx.net/letra/g.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="H"  , url="http://www.peliculasmx.net/letra/h.html", extra="http://www.peliculasmx.net/letra/h.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="I"  , url="http://www.peliculasmx.net/letra/i.html", extra="http://www.peliculasmx.net/letra/i.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="J"  , url="http://www.peliculasmx.net/letra/j.html", extra="http://www.peliculasmx.net/letra/j.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="K"  , url="http://www.peliculasmx.net/letra/k.html", extra="http://www.peliculasmx.net/letra/k.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="L"  , url="http://www.peliculasmx.net/letra/l.html", extra="http://www.peliculasmx.net/letra/l.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="M"  , url="http://www.peliculasmx.net/letra/m.html", extra="http://www.peliculasmx.net/letra/m.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="N"  , url="http://www.peliculasmx.net/letra/n.html", extra="http://www.peliculasmx.net/letra/n.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="O"  , url="http://www.peliculasmx.net/letra/o.html", extra="http://www.peliculasmx.net/letra/o.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="P"  , url="http://www.peliculasmx.net/letra/p.html", extra="http://www.peliculasmx.net/letra/p.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="Q"  , url="http://www.peliculasmx.net/letra/q.html", extra="http://www.peliculasmx.net/letra/q.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="R"  , url="http://www.peliculasmx.net/letra/r.html", extra="http://www.peliculasmx.net/letra/r.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="S"  , url="http://www.peliculasmx.net/letra/s.html", extra="http://www.peliculasmx.net/letra/s.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="T"  , url="http://www.peliculasmx.net/letra/t.html", extra="http://www.peliculasmx.net/letra/t.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="U"  , url="http://www.peliculasmx.net/letra/u.html", extra="http://www.peliculasmx.net/letra/u.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="V"  , url="http://www.peliculasmx.net/letra/v.html", extra="http://www.peliculasmx.net/letra/v.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="W"  , url="http://www.peliculasmx.net/letra/w.html", extra="http://www.peliculasmx.net/letra/w.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="X"  , url="http://www.peliculasmx.net/letra/x.html", extra="http://www.peliculasmx.net/letra/x.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="Y"  , url="http://www.peliculasmx.net/letra/y.html", extra="http://www.peliculasmx.net/letra/y.html"))
    itemlist.append( Item(channel=item.channel, action="peliculas" , title="Z"  , url="http://www.peliculasmx.net/letra/z.html", extra="http://www.peliculasmx.net/letra/z.html"))

    return itemlist
コード例 #50
0
def completo(item):
    logger.info("[newpct1.py] completo")
    itemlist = []
    categoryID = ""

    # Guarda el valor por si son etiquetas para que lo vea 'listadofichas'
    item_extra = item.extra
    item_show = item.show
    item_title = item.title

    # Lee las entradas
    if item_extra.startswith("serie"):
        ultimo_action = "get_episodios"

        if item.extra != "serie_add":
            # Afinar mas la busqueda
            if item_extra == "serie-hd":
                categoryID = buscar_en_subcategoria(item.show, '1469')
            elif item_extra == "serie-vo":
                categoryID = buscar_en_subcategoria(item.show, '775')

            if categoryID != "":
                item.url = item.url.replace("categoryID=",
                                            "categoryID=" + categoryID)

            #Fanart
            oTvdb = TvDb()
            serieID = oTvdb.get_serieId_by_title(item.show)
            fanart = oTvdb.get_graphics_by_serieId(serieID)
            if len(fanart) > 0:
                item.fanart = fanart[0]
        else:
            item_title = item.show
            item.title = item.show

        items_programas = get_episodios(item)
    else:
        ultimo_action = "listado"
        items_programas = listado(item)

    if len(items_programas) == 0:
        return itemlist  # devolver lista vacia

    salir = False
    while not salir:

        # Saca la URL de la siguiente página
        ultimo_item = items_programas[len(items_programas) - 1]

        # Páginas intermedias
        if ultimo_item.action == ultimo_action:
            # Quita el elemento de "Página siguiente"
            ultimo_item = items_programas.pop()

            # Añade las entradas de la página a la lista completa
            itemlist.extend(items_programas)

            # Carga la siguiente página
            ultimo_item.extra = item_extra
            ultimo_item.show = item_show
            ultimo_item.title = item_title
            logger.info("[newpct1.py] completo url=" + ultimo_item.url)
            if item_extra.startswith("serie"):
                items_programas = get_episodios(ultimo_item)
            else:
                items_programas = listado(ultimo_item)

        # Última página
        else:
            # Añade a la lista completa y sale
            itemlist.extend(items_programas)
            salir = True

    if (config.get_platform().startswith("xbmc")
            or config.get_platform().startswith("boxee")
        ) and len(itemlist) > 0 and (item.extra.startswith("serie")):
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la biblioteca de XBMC",
                 url=item.url,
                 action="add_serie_to_library",
                 extra="completo###serie_add",
                 show=item.show))
    logger.info("[newpct1.py] completo items=" + str(len(itemlist)))
    return itemlist
コード例 #51
0
def findvideos(item):
    logger.info("pelisalacarta.channels.descargasmix findvideos")
    if item.extra and item.extra != "findvideos":
        return epienlaces(item)
    itemlist = []
    item.text_color = color3
    data = scrapertools.downloadpage(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year != "":
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    #Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
    
    #Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-'
                                                       'posts">')
    if len(data_online) > 0:
        itemlist.append(item.clone(title="Enlaces Online", action="", text_color=color1))
        patron = 'dm\(c.a\(\'([^\']+)\''
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for code in matches:
            enlace = dm(code)
            enlaces = servertools.findvideos(data=enlace)
            if len(enlaces) > 0:
                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))

    #Patron descarga
    bloques_descarga = scrapertools.find_multiple_matches(data, '<div class="floatLeft double(?:nuevo|)">(.*?)</div>(.*?)(?:<div id="mirrors"|<script>)')
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        itemlist.append(item.clone(title=title_bloque, action="", text_color=color1))
        patron = '<div class="fondoenlaces".*?id=".*?_([^"]+)".*?textContent=nice=dm\(c.a\(\'([^\']+)\''
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = scrapedserver.capitalize()
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
                continue
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+scrapedserver)
                    #Saca numero de enlaces
                    patron = "(dm\(c.a\('"+scrapedurl.replace("+", "\+")+"'.*?)</div>"
                    data_enlaces = scrapertools.find_single_match(bloque, patron)
                    patron = 'dm\(c.a\(\'([^\']+)\''
                    matches_enlaces = scrapertools.find_multiple_matches(data_enlaces, patron)
                    numero = str(len(matches_enlaces))
                    titulo = "   "+titulo+" - Nº enlaces:"+numero
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist
コード例 #52
0
def findvideos(item):
    logger.info("pelisalacarta.channels.seriesdanko findvideos")
   
    # Descarga la página
    if config.get_platform()=="xbmceden":
        from core.subtitletools import saveSubtitleName
        saveSubtitleName(item)
   
    if "seriesdanko.com" in item.url:
        data = scrapertools.downloadpageGzip(item.url).replace("\n","")
        patronvideos = "<tr><td class=('tam12'>.*?)</td></tr>"
        matches = re.compile(patronvideos,re.DOTALL).findall(data)
        #for match in matches:
            #print match
        itemlist = []
        for match in matches:
            try:
                scrapedurl = urlparse.urljoin(item.url,re.compile(r"href='(.+?)'").findall(match)[0])
            except:continue
           
            ## Modificado para que se vea el servidor en el título (15-12-2014)
            try:
                scrapedthumbnail = re.compile(r"src='(.+?)'").findall(match)[1]
                servidor = re.compile(r"servidores/([^\.]+)\.").findall(scrapedthumbnail)[0]
                servidor = " [" + servidor + "]"
                #if "megavideo" in scrapedthumbnail:
                #    mega = " [Megavideo]"
                #elif "megaupload" in scrapedthumbnail:
                #    mega = " [Megaupload]"
                #else:
                #    mega = ""
                if not scrapedthumbnail.startswith("http"):
                    scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
            #except:continue
            except: servidor = ""
            try:
                subtitle = re.compile(r"src='(.+?)'").findall(match)[0]
                if "es.png" in subtitle:
                    subtitle = " (Español)"
                elif "la.png" in  subtitle:
                    subtitle = " (Latino)"
                elif "vo.png" in  subtitle:
                    subtitle = " (Version Original)"
                elif "vos.png" in  subtitle:
                    subtitle = " (Subtitulado)"
                elif "ca.png"  in match[2]:
                    subtitle = " (Catalan)"
                elif "ga.jpg"  in match[2]:
                    subtitle = " (Gallego)"
                elif "eu.jpg"  in match[2]:
                    subtitle = " (Euskera)"
                elif "ba.png"  in match[2]:
                    subtitle = " (Bable)"
                else:
                    subtitle = "(desconocido)"
               
                try:
                    opcion = re.compile(r"(Ver|Descargar)").findall(match)[0]
                except:
                    opcion = "Ver"
               
                ## Modificado para que se vea el servidor en el título (15-12-2014)
                #scrapedtitle = opcion + " video " + subtitle + mega
                scrapedtitle = opcion + " video " + subtitle + servidor
            except:
                scrapedtitle = item.title
            scrapedplot = ""
            #scrapedthumbnail = item.thumbnail
            #if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
   
            # Añade al listado de XBMC
            itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot, fulltitle = item.fulltitle, extra = item.thumbnail , fanart=item.thumbnail , folder=False) )    
   
    else:
        from core import servertools
        itemlist = servertools.find_video_items( item )
   
    return itemlist
コード例 #53
0
def nuevos_cap(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)
    logger.debug(data)
    capitulos = []
    if "Nuevas" in item.title:
        for child in data["b"]:
            capitulos.append([child["season"], child])
    else:
        for child in data["a"]:
            capitulos.append(['', child])

    for season, child in capitulos:
        infoLabels = item.infoLabels
        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season:
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0
        if "Nuevos" in item.title:
            if not child['episode']:
                episode = scrapertools.find_single_match(
                    child['name'], '\d+x(\d+)')
                if not episode:
                    episode = "0"
                infoLabels['episode'] = int(episode)
            elif "al" in child['episode']:
                episode = "0"
                infoLabels['episode'] = int(episode)
            else:
                infoLabels['episode'] = int(child['episode'])
            infoLabels['mediatype'] = "episode"

        if "Nuevos" in item.title:
            url = host % "movie/%s/movie.js" % child["id"]
            action = "findvideos"
            thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
            fanart = item.fanart
        else:
            url = host % "list/%s" % child["season"] + ext
            action = "episodios"
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        if "Nuevos" in item.title:
            title = fulltitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        else:
            title = fulltitle = child['name']

        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 contentTitle=fulltitle,
                 viewmode="movie",
                 show=item.fulltitle,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="nuevos",
                 text_color=color3))

    return itemlist
コード例 #54
0
def listado(item):
    logger.info("[newpct1.py] listado")
    itemlist = []

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  scrapertools.cache_page(item.url))
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    patron = '<ul class="' + item.extra + '">(.*?)</ul>'
    logger.info("[newpct1.py] patron=" + patron)
    fichas = scrapertools.get_match(data, patron)

    #<li><a href="http://www.newpct1.com/pelicula/x-men-dias-del-futuro-pasado/ts-screener/" title="Descargar XMen Dias Del Futuro gratis"><img src="http://www.newpct1.com/pictures/f/58066_x-men-dias-del-futuro--blurayrip-ac3-5.1.jpg" width="130" height="180" alt="Descargar XMen Dias Del Futuro gratis"><h2>XMen Dias Del Futuro </h2><span>BluRayRip AC3 5.1</span></a></li>
    patron = '<li><a href="([^"]+).*?'  #url
    patron += 'title="([^"]+).*?'  #titulo
    patron += '<img src="([^"]+)"[^>]+>.*?'  #thumbnail
    patron += '<span>([^<]*)</span>'  #calidad

    matches = re.compile(patron, re.DOTALL).findall(fichas)

    for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
        url = scrapedurl
        title = scrapedtitle
        thumbnail = scrapedthumbnail
        action = "findvideos"
        extra = ""

        if "1.com/series" in url:
            action = "completo"
            extra = "serie"

            title = scrapertools.find_single_match(title, '([^-]+)')
            title = title.replace("Ver online", "",
                                  1).replace("Descarga Serie HD", "",
                                             1).replace("Ver en linea", "",
                                                        1).strip()
            #logger.info("[newpct1.py] titulo="+title)

            if "1.com/series-hd" in url:
                extra = "serie-hd"
                url = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(
                    " ", "%20")
                url += '%22&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(
                    " ", "+")  #DTV+720p+AC3+5.1
                url += '&idioma=&ordenar=Nombre&inon=Descendente'
            elif "1.com/series-vo" in url:
                extra = "serie-vo"
                url = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(
                    " ", "%20")
                url += '%22&categoryID=&categoryIDR=775&calidad=' + calidad.replace(
                    " ", "+")  #HDTV+720p+AC3+5.1
                url += '&idioma=&ordenar=Nombre&inon=Descendente'

        else:
            title = title.replace("Descargar", "", 1).strip()
            if title.endswith("gratis"): title = title[:-7]

        show = title
        if item.extra != "buscar-list":
            title = title + ' ' + calidad

        itemlist.append(
            Item(channel=__channel__,
                 action=action,
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 extra=extra,
                 show=show))

    if "pagination" in data:
        patron = '<ul class="pagination">(.*?)</ul>'
        paginacion = scrapertools.get_match(data, patron)

        if "Next" in paginacion:
            url_next_page = scrapertools.get_match(
                paginacion,
                '<a href="([^>]+)>Next</a>')[:-1].replace(" ", "%20")
            itemlist.append(
                Item(channel=__channel__,
                     action="listado",
                     title=">> Página siguiente",
                     url=url_next_page,
                     extra=item.extra))
    #logger.info("[newpct1.py] listado items:" + str(len(itemlist)))
    return itemlist
コード例 #55
0
def entradasconlistas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load(data)

    # Si hay alguna lista
    contentSerie = False
    contentList = False
    if data.get('b'):
        for child in data['b']:
            infolabels = {}

            infolabels['originaltitle'] = child['originalTitle']
            infolabels['plot'] = child['description']
            infolabels['year'] = data['year']
            if child.get('tags'):
                infolabels['genre'] = ', '.join(
                    [x.strip() for x in child['tags']])
            infolabels['rating'] = child['rateHuman'].replace(',', '.')
            infolabels['votes'] = child['rateCount']
            if child.get('runtime'):
                try:
                    infolabels['duration'] = int(child['runtime'].replace(
                        " min.", "")) * 60
                except:
                    pass
            if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
            infolabels['director'] = child['director']
            season = child.get('season', '')
            if season.isdigit() and not contentList:
                contentSerie = True
                action = "episodios"
            else:
                contentSerie = False
                contentList = True
                action = "entradasconlistas"

            url = host % "list/%s" % child["id"] + ext
            title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            if not title:
                title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
                fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
            title = unicode(title, "utf-8").capitalize().encode("utf-8")
            fulltitle = unicode(fulltitle,
                                "utf-8").capitalize().encode("utf-8")
            show = ""
            if contentSerie:
                title += " (Serie TV)"
                show = fulltitle
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

            thumbnail += "|User-Agent=%s" % httptools.get_user_agent
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     show=show,
                     infoLabels=infolabels,
                     contentTitle=fulltitle,
                     viewmode="movie_with_plot",
                     text_color=color3))
    else:
        contentList = True

    if contentSerie and itemlist:
        itemlist.sort(key=lambda it: it.infoLabels['season'], reverse=True)

    if itemlist:
        itemlist.insert(
            0,
            Item(channel=item.channel,
                 title="**LISTAS**",
                 action="",
                 text_color=color4,
                 text_bold=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    if data.get("a") and itemlist:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS**",
                 action="",
                 text_color=color6,
                 text_bold=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    for child in data.get("a", []):
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_1080.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
            .decode("utf-8")
        if not child['name']:
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])
        thumbnail += "|User-Agent=%s" % httptools.get_user_agent
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 video_urls=video_urls,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 viewmode="movie_with_plot",
                 text_color=color3))

    # Se añade item para añadir la lista de vídeos a la videoteca
    if data.get(
            'a'
    ) and itemlist and contentList and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 text_color=color5,
                 title="Añadir esta lista a la videoteca",
                 url=item.url,
                 action="listas"))
    elif contentSerie and config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la videoteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 show=item.show,
                 fulltitle=item.fulltitle,
                 extra="episodios"))

    return itemlist
コード例 #56
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def novedades_episodios(item):
    logger.info()

    itemlist = []
    ## Carga estados
    status = jsontools.load(
        httptools.downloadpage(host + '/a/status/all').data)

    ## Episodios
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]

    old_start = scrapertools.get_match(post, 'start=([^&]+)&')
    start = "%s" % (int(old_start) + 24)

    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post

    data = httptools.downloadpage(url, post=post).data

    episodes = jsontools.load(data)

    for episode in episodes:

        thumbnail = host + "/thumbs/" + episode['thumbnail']

        temporada = episode['season']
        episodio = episode['episode']
        if len(episodio) == 1: episodio = '0' + episodio

        if episode['languages'] != "[]":
            idiomas = "( [COLOR teal][B]"
            for idioma in episode['languages']:
                idiomas += idioma + " "
            idiomas += "[/B][/COLOR])"
            idiomas = bbcode_kodi2html(idiomas)
        else:
            idiomas = ""

        try:
            show = episode['show']['title']['es'].strip()
        except:
            show = episode['show']['title']['en'].strip()

        show = bbcode_kodi2html("[COLOR whitesmoke][B]" + show +
                                "[/B][/COLOR]")

        if episode['title']:
            try:
                title = episode['title']['es'].strip()
            except:
                title = episode['title']['en'].strip()

        if len(title) == 0:
            title = "Temporada " + temporada + " Episodio " + episodio

        try:
            title = temporada + "x" + episodio + " - " + show.decode(
                'utf-8') + ": " + title.decode('utf-8') + ' ' + idiomas
        except:
            title = temporada + "x" + episodio + " - " + show.decode(
                'iso-8859-1') + ": " + title.decode(
                    'iso-8859-1') + ' ' + idiomas

        str = get_status(status, 'episodes', episode['id'])
        if str != "": title += str

        try:
            title = title.encode('utf-8')
        except:
            title = title.encode('iso-8859-1')
        # try: show = show.encode('utf-8')
        # except:  show = show.encode('iso-8859-1')

        url = urlparse.urljoin(
            host, '/serie/' + episode['permalink'] + '/temporada-' +
            temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3"

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 folder=True,
                 contentType="episode"))

    if len(itemlist) == 24:
        itemlist.append(
            Item(channel=item.channel,
                 action="novedades_episodios",
                 title=">> Página siguiente",
                 url=next_page,
                 folder=True))

    return itemlist
コード例 #57
0
ファイル: hdfull.py プロジェクト: llaurator/addon
def menupeliculas(item):
    logger.info()

    itemlist = []

    if account:
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html(
                     "[COLOR orange][B]Favoritos[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=movies&action=favorite&start=-28&limit=28",
                 folder=True))
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html(
                     "[COLOR orange][B]Pendientes[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=movies&action=pending&start=-28&limit=28",
                 folder=True))

    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="ABC",
             url=host + "/peliculas/abc",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="Últimas películas",
             url=host + "/peliculas",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="Películas Estreno",
             url=host + "/peliculas-estreno",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="Películas Actualizadas",
             url=host + "/peliculas-actualizadas",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="fichas",
             title="Rating IMDB",
             url=host + "/peliculas/imdb_rating",
             folder=True))
    itemlist.append(
        Item(channel=item.channel,
             action="generos",
             title="Películas por Género",
             url=host,
             folder=True))
    if account:
        itemlist.append(
            Item(channel=item.channel,
                 action="items_usuario",
                 title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"),
                 url=host +
                 "/a/my?target=movies&action=seen&start=-28&limit=28",
                 folder=True))

    return itemlist
コード例 #58
0
def findvideos(item):
    logger.info("[newpct1.py] findvideos")
    itemlist = []

    ## Cualquiera de las tres opciones son válidas
    #item.url = item.url.replace("1.com/","1.com/ver-online/")
    #item.url = item.url.replace("1.com/","1.com/descarga-directa/")
    item.url = item.url.replace("1.com/", "1.com/descarga-torrent/")

    # Descarga la página
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  scrapertools.cache_page(item.url))
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    title = scrapertools.find_single_match(
        data, "<h1><strong>([^<]+)</strong>[^<]+</h1>")
    title += scrapertools.find_single_match(
        data, "<h1><strong>[^<]+</strong>([^<]+)</h1>")
    caratula = scrapertools.find_single_match(
        data, '<div class="entry-left">.*?src="([^"]+)"')

    #<a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>

    patron = '<a href="([^"]+)" title="[^"]+" class="btn-torrent" target="_blank">'

    # escraped torrent
    url = scrapertools.find_single_match(data, patron)
    if url != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="torrent",
                 title=title + " [torrent]",
                 fulltitle=title,
                 url=url,
                 thumbnail=caratula,
                 plot=item.plot,
                 folder=False))

    # escraped ver vídeos, descargar vídeos un link, múltiples liks
    data = data.replace("'", '"')
    data = data.replace(
        'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=',
        "")
    data = data.replace(
        "http://tumejorserie.com/descargar/url_encript.php?link=", "")
    data = data.replace("$!", "#!")

    patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
    patron_ver = '<div id="tab3"[^>]+>.*?</ul>'

    match_ver = scrapertools.find_single_match(data, patron_ver)
    match_descargar = scrapertools.find_single_match(data, patron_descargar)

    patron = '<div class="box1"><img src="([^"]+)".*?'  # logo
    patron += '<div class="box2">([^<]+)</div>'  # servidor
    patron += '<div class="box3">([^<]+)</div>'  # idioma
    patron += '<div class="box4">([^<]+)</div>'  # calidad
    patron += '<div class="box5"><a href="([^"]+)".*?'  # enlace
    patron += '<div class="box6">([^<]+)</div>'  # titulo

    enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
    enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
        servidor = servidor.replace("played", "playedto")
        titulo = titulo + " [" + servidor + "]"
        mostrar_server = True
        if config.get_setting("hidepremium") == "true":
            mostrar_server = servertools.is_server_enabled(servidor)
        if mostrar_server:
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     server=servidor,
                     title=titulo,
                     fulltitle=item.title,
                     url=enlace,
                     thumbnail=logo,
                     plot=item.plot,
                     folder=False))

    for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
        servidor = servidor.replace("uploaded", "uploadedto")
        partes = enlace.split(" ")
        p = 1
        for enlace in partes:
            parte_titulo = titulo + " (%s/%s)" % (
                p, len(partes)) + " [" + servidor + "]"
            p += 1
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(servidor)
            if mostrar_server:
                itemlist.append(
                    Item(channel=__channel__,
                         action="play",
                         server=servidor,
                         title=parte_titulo,
                         fulltitle=item.title,
                         url=enlace,
                         thumbnail=logo,
                         plot=item.plot,
                         folder=False))

    return itemlist
コード例 #59
0
ファイル: pepecine.py プロジェクト: shorbete/pelisalacarta
def get_temporadas(item):
    logger.info("[pepecine.py] get_temporadas")
    itemlist = []
    infoLabels = {}

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  scrapertools.cache_page(item.url))
    patron = 'vars.title =(.*?)};'
    try:
        data_dict = jsontools.load_json(
            scrapertools.get_match(data, patron) + '}')
    except:
        return itemlist  # Devolvemos lista vacia

    if item.extra == "serie_add":
        item.extra = str(data_dict['tmdb_id'])
        item.url = str(data_dict["link"])
        infoLabels['titleraw'] = data_dict["title"]
        infoLabels['tvshowtitle'] = data_dict["title"]
        infoLabels['title_id'] = data_dict['id']
        item.infoLabels = infoLabels
        itemlist = get_episodios(item)
    else:
        infoLabels = item.infoLabels
        if data_dict.has_key("actor"):
            cast = []
            rol = []
            for actor in data_dict["actor"]:
                cast.append(actor['name'])
                rol.append(actor['pivot']['char_name'])
            infoLabels['cast'] = cast
            infoLabels['castandrole'] = zip(cast, rol)

        if data_dict.has_key("writer"):
            writers_list = []
            for writer in data_dict["writer"]:
                writers_list.append(writer['name'])
            infoLabels['writer'] = ", ".join(writers_list)

        if data_dict.has_key("director"):
            director_list = []
            for director in data_dict["director"]:
                director_list.append(director['name'])
            infoLabels['director'] = ", ".join(director_list)

        if len(data_dict["season"]) == 1:
            # Si solo hay una temporada ...
            item.extra = str(data_dict['tmdb_id'])
            item.url = str(data_dict["link"])
            item.infoLabels = infoLabels
            itemlist = get_episodios(item)
        else:  #... o si hay mas de una temporada y queremos el listado por temporada...
            item.extra = str(data_dict['tmdb_id'])
            data_dict["season"].sort(
                key=lambda x:
                (x['number']))  # ordenamos por numero de temporada
            for season in data_dict["season"]:
                url = filter(
                    lambda l: l["season"] == season['number'],
                    data_dict["link"])  #filtramos enlaces por temporada
                if url:
                    if season['overview']:
                        infoLabels['plot'] = season['overview']
                    if season['number']:
                        infoLabels['season'] = season['number']
                    if season["poster"]:
                        item.thumbnail = re.compile("/w\d{3}/").sub(
                            "/w500/", season["poster"])
                    if season["release_date"]:
                        infoLabels['premiered'] = season['release_date']

                    item.infoLabels = infoLabels
                    title = item.title + ' ' + season["title"].lower().replace(
                        'season', 'temporada').capitalize()

                    itemlist.append(
                        Item(channel=item.channel,
                             action="get_episodios",
                             title=title,
                             url=str(url),
                             extra=item.extra,
                             fanart=item.fanart,
                             text_color="0xFFFFCE9C",
                             thumbnail=item.thumbnail,
                             viewmode="movie_with_plot",
                             infoLabels=item.infoLabels))

            if config.get_library_support() and itemlist:
                url = urlparse.urljoin(__url_base__,
                                       "series-online/" + str(data_dict['id']))
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir esta serie a la biblioteca",
                         url=url,
                         action="add_serie_to_library",
                         extra='episodios###serie_add',
                         show=data_dict["title"],
                         text_color="0xFFe5ffcc",
                         thumbnail=
                         'https://d5.usercdn.com/dl/i/02360/a99fzwbqdaen.png'))

    return itemlist
コード例 #60
0
def episodios(item):
    logger.info("pelisalacarta.channels.seriesdanko episodios")
   
    if config.get_platform()=="xbmc" or config.get_platform()=="xbmcdharma":
        import xbmc
        if config.get_setting("forceview")=="true":
            xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons
            #xbmc.executebuiltin("Container.Content(Movies)")

    item.url = item.url.replace("../","") ## Corrige los enlaces que vienen de search
    if "|" in item.url:
        url = item.url.split("|")[0]
        sw = True
    else:
        url = item.url
        sw = False
    # Descarga la página
    if item.extra:
       
        contenidos = item.extra
        #print contenidos
    else:
        data = scrapertools.downloadpageWithoutCookies(url)

    # Extrae las entradas
        if sw:
            try:
                datadict = eval( "(" + data + ")" )    
                data = urllib.unquote_plus(datadict["entry"]["content"]["$t"].replace("\\u00","%"))
                matches=[]
                matches.append(data)
            except:
                matches = []
        else:
            patronvideos = "entry-content(.*?)<div class='blog-pager' id='blog-pager'>"
            matches = re.compile(patronvideos,re.DOTALL).findall(data)
           
        if len(matches)>0:
            contenidos = matches[0].replace('"',"'").replace("\n","")
        else:
            contenidos = item.url
            if sw:
                url = item.url.split("|")[1]
                if not url.startswith("http://"):
                    url = urlparse.urljoin("http://seriesdanko.com",url)
                # Descarga la página
                data = scrapertools.downloadpageGzip(url)
                patronvideos  = "entry-content(.*?)<div class='post-footer'>"
                matches = re.compile(patronvideos,re.DOTALL).findall(data)
                if len(matches)>0:
                    contenidos = matches[0]
               
    patronvideos  = "<a href='([^']+)'>([^<]+)</a> <img(.+?)/>"
    matches = re.compile(patronvideos,re.DOTALL).findall(contenidos.replace('"',"'"))
    #print contenidos        
    try:
        plot = re.compile(r'(Informac.*?/>)</div>').findall(contenidos)[0]
        if len(plot)==0:
            plot = re.compile(r"(Informac.*?both;'>)</div>").findall(contenidos)[0]
        plot = re.sub('<[^>]+>'," ",plot)
    except:
        plot = ""

    itemlist = []
    for match in matches:
        scrapedtitle = match[1].replace("\n","").replace("\r","")
        logger.info("scrapedtitle="+scrapedtitle)
        ## Eliminado para la opción "Añadir esta serie a la biblioteca de XBMC" (15-12-2014)
        #scrapedtitle = scrapertools.remove_show_from_title(scrapedtitle,item.show)
       
        episode_code = scrapertools.find_single_match(scrapedtitle,"(\d+X\d+)")
        logger.info("episode_code="+episode_code)
        if episode_code!="":
            season_number = scrapertools.find_single_match(scrapedtitle,"(\d+)X\d+")
            logger.info("season_number="+season_number)
            episode_number = scrapertools.find_single_match(scrapedtitle,"\d+X(\d+)")
            logger.info("episode_number="+episode_number)
            new_episode_code = season_number+"x"+episode_number
            logger.info("new_episode_code="+new_episode_code)
            scrapedtitle = scrapedtitle.replace(episode_code,new_episode_code)
            logger.info("scrapedtitle="+scrapedtitle)

        #[1x01 - Capitulo 01]
        #patron = "(\d+x\d+) - Capitulo \d+"
        #matches = re.compile(patron,re.DOTALL).findall(scrapedtitle)
        #print matches
        #if len(matches)>0 and len(matches[0])>0:
        #    scrapedtitle = matches[0]

        if "es.png" in match[2]:
            subtitle = " (Español)"
        elif "la.png" in match[2]:
            subtitle = " (Latino)"
        elif "vo.png" in match[2]:
            subtitle = " (VO)"
        elif "vos.png" in match[2]:
            subtitle = " (VOS)"
        elif "ca.png"  in match[2]:
            subtitle = " (Catalan)"
        elif "ga.jpg"  in match[2]:
            subtitle = " (Gallego)"
        elif "eu.jpg"  in match[2]:
            subtitle = " (Euskera)"
        elif "ba.png"  in match[2]:
            subtitle = " (Bable)"
        else:
            subtitle = ""
        scrapedplot = plot
        scrapedurl = urlparse.urljoin(item.url,match[0]).replace("\n","").replace("\r","")
        if not item.thumbnail:
            try:
                scrapedthumbnail = re.compile(r"src=([^']+)'").findall(contenidos)[0]
            except:
                    scrapedthumbnail = ""
        else:
            scrapedthumbnail = item.thumbnail
        scrapedthumbnail = scrapedthumbnail.replace("\n","").replace("\r","")
        if item.fulltitle == '':
            item.fulltitle = scrapedtitle + subtitle
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        ## Añadido show para la opción "Añadir esta serie a la biblioteca de XBMC" (15-12-2014)
        # Añade al listado de XBMC
        itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle+subtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , fulltitle = item.fulltitle, context="4", show=item.show, folder=True) )

    if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0:
        itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=item.show))
        itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios###", show=item.show))

    #xbmc.executebuiltin("Container.Content(Movies)")
   
    if len(itemlist)==0:
        listvideos = servertools.findvideos(contenidos)
       
        for title,url,server in listvideos:
           
            if server == "youtube":
                scrapedthumbnail = "http://i.ytimg.com/vi/" + url + "/0.jpg"
            else:
                scrapedthumbnail = item.thumbnail
            scrapedtitle = title
            scrapedplot = ""
            scrapedurl = url
           
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

            # Añade al listado de XBMC
            itemlist.append( Item(channel=__channel__, action="play", server=server, title=item.title +" "+ scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot, fulltitle = scrapedtitle , folder=False) )

    return itemlist