Example #1
0
def list_all(item):
    logger.info()
    itemlist = []
    genero = scrapertools.find_single_match(item.url, "genre=(\w+)")
    data = get_source(item.url)
    token = scrapertools.find_single_match(data, "token:.*?'(.*?)'")
    url = host + '/titles/paginate?_token=%s&perPage=24&page=%s&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (
        token, item.page, item.type, genero)
    data = httptools.downloadpage(url).data
    dict_data = jsontools.load(data)
    items = dict_data['items']
    for dict in items:
        new_item = Item(channel=item.channel,
                        title=dict['title'] + ' [%s]' % dict['year'],
                        plot=dict['plot'],
                        thumbnail=dict['poster'],
                        url=dict['link'],
                        infoLabels={'year': dict['year']})
        if item.type == 'movie':
            new_item.contentTitle = dict['title']
            new_item.fulltitle = dict['title']
            new_item.action = 'findvideos'
        elif item.type == 'series':
            new_item.contentSerieName = dict['title']
            new_item.action = ''
        itemlist.append(new_item)
    tmdb.set_infoLabels(itemlist)
    itemlist.append(
        item.clone(title='Siguiente>>>',
                   url=item.url,
                   action='list_all',
                   type=item.type,
                   page=str(int(item.page) + 1)))
    return itemlist
Example #2
0
    def onClick(self, controlId):
        if controlId == OPTION_PANEL:
            xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
            self.list = self.getControl(6)
            selecitem = self.list.getSelectedItem()
            url = selecitem.getProperty("url")
            data = get_data(url, cookies)
            url = servertools.findvideosbyserver(data, "p2p")
            if len(url) > 1:
                enlaces = []
                for u in url:
                    enlaces.append(u[1])
                xbmc.log(str(enlaces))
                selection = xbmcgui.Dialog().select("Selecciona un enlace",
                                                    enlaces)
                if selection > 0:
                    url = url[selection][1]
            elif url:
                url = url[0][1]

            #Creamos el item para platformtools
            item = Item()
            item.fulltitle = self.fulltitle
            item.url = url + "|" + item.fulltitle
            item.server = "p2p"

            config.set_setting("arenavision_play", False, "arenavision")
            from threading import Thread
            t = Thread(target=platformtools.play_video, args=[item])
            t.start()
            close = False
            while True:
                xbmc.sleep(500)
                try:
                    if not t.is_alive() and not config.get_setting(
                            "arenavision_play", "arenavision"):
                        break
                    elif not t.is_alive() and config.get_setting(
                            "arenavision_play", "arenavision"):
                        xbmc.executebuiltin('Action(PreviousMenu)')
                        break
                except:
                    if not t.isAlive() and not config.get_setting(
                            "arenavision_play", "arenavision"):
                        break
                    elif not t.isAlive() and config.get_setting(
                            "arenavision_play", "arenavision"):
                        xbmc.executebuiltin('Action(PreviousMenu)')
                        break

        elif controlId == OPTIONS_OK or controlId == 99:
            global select, ventana
            borrar = [select, ventana]
            for window in borrar:
                window.close()
                del window
            xbmc.sleep(300)
            xbmc.executebuiltin('Action(PreviousMenu)')
    def onAction(self, action):
        if action.getId(
        ) == ACTION_SELECT_ITEM and not controlId() == OPTIONS_OK:
            xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
            self.list = self.getControl(6)
            selecitem = self.list.getSelectedItem()
            url = selecitem.getProperty("url")
            data = scrapertools.cache_page(url)
            url = servertools.findvideosbyserver(data, "p2p")
            if url:
                url = url[0][1]
            #Creamos el item para platformtools
            item = Item()
            item.fulltitle = self.fulltitle
            item.url = url + "|" + item.fulltitle
            item.server = "p2p"
            self.close()
            check_skin = xbmc.getSkinDir()

            if not "confluence" in check_skin:
                xbmc.sleep(300)
                xbmc.executebuiltin('Action(PreviousMenu)')
                xbmc.sleep(300)

            platformtools.play_video(item)
            self.close()
            check_skin = xbmc.getSkinDir()

            if "confluence" in check_skin:
                if xbmc.Player().isPlayingVideo():
                    #xbmc.sleep(300)
                    xbmc.executebuiltin('Action(PreviousMenu)')
                else:
                    xbmc.executebuiltin('Action(PreviousMenu)')
            else:
                xbmc.executebuiltin('Action(PreviousMenu)')
        elif action.getId() == ACTION_PREVIOUS_MENU or action.getId(
        ) == ACTION_MOUSE_RIGHT_CLICK or action == 92:

            self.close()
            xbmc.sleep(300)
            xbmc.executebuiltin('Action(PreviousMenu)')
    def onClick(self, controlId):
        if controlId == OPTION_PANEL:
            xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
            self.list = self.getControl(6)
            selecitem = self.list.getSelectedItem()
            url = selecitem.getProperty("url")
            data = scrapertools.cache_page(url)
            url = servertools.findvideosbyserver(data, "p2p")
            if url:
                url = url[0][1]
            #Creamos el item para platformtools
            item = Item()
            item.fulltitle = self.fulltitle
            item.url = url + "|" + item.fulltitle
            item.server = "p2p"
            self.close()
            check_skin = xbmc.getSkinDir()

            if not "confluence" in check_skin:
                xbmc.sleep(300)
                xbmc.executebuiltin('Action(PreviousMenu)')
                xbmc.sleep(300)

            platformtools.play_video(item)
            check_skin = xbmc.getSkinDir()

            if "confluence" in check_skin:
                if xbmc.Player().isPlayingVideo():
                    #xbmc.sleep(300)
                    xbmc.executebuiltin('Action(PreviousMenu)')
                else:
                    xbmc.executebuiltin('Action(PreviousMenu)')
            else:
                xbmc.executebuiltin('Action(PreviousMenu)')

        elif controlId == OPTIONS_OK:
            self.close()
            TESTPYDESTFILE = xbmc.translatePath(
                'special://skin/720p/DialogSelect2.xml')
            xbmc.sleep(300)
            xbmc.executebuiltin('Action(PreviousMenu)')
Example #5
0
def listado(item):
    logger.info("pelisalacarta.channels.pelispedia listado")
    itemlist = []

    action = "findvideos"
    contentType = "movie"

    if item.extra == 'serie':
        action = "temporadas"
        contentType = "tvshow"

    data = scrapertools.anti_cloudflare(item.url,
                                        host=CHANNEL_HOST,
                                        headers=CHANNEL_DEFAULT_HEADERS)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "",
                  data)
    # logger.info("data -- {}".format(data))

    patron = '<li[^>]+><a href="([^"]+)" alt="([^<]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
             '<p class="font12">(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches[:
                                                                                        28]:
        title = "{title} ({year})".format(title=scrapertools.unescape(
            scrapedtitle.strip()),
                                          year=scrapedyear)
        plot = scrapertools.entityunescape(scrapedplot)

        new_item = Item(channel=__channel__,
                        title=title,
                        url=urlparse.urljoin(CHANNEL_HOST, scrapedurl),
                        action=action,
                        thumbnail=scrapedthumbnail,
                        plot=plot,
                        context="",
                        extra=item.extra,
                        text_color=color3,
                        contentType=contentType)

        if item.extra == 'serie':
            new_item.show = scrapertools.unescape(scrapedtitle.strip())
        else:
            new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip())
            new_item.infoLabels = {'year': scrapedyear}
            #logger.debug(new_item.tostring())

        itemlist.append(new_item)

    # Obtenemos los datos basicos de todas las peliculas mediante multihilos
    tmdb.set_infoLabels(itemlist, __modo_grafico__)

    # numero de registros que se muestran por página, se fija a 28 por cada paginación
    if len(matches) >= 28:

        file_php = "666more"
        tipo_serie = ""

        if item.extra == "movies":
            anio = scrapertools.find_single_match(item.url, "(?:year=)(\w+)")
            letra = scrapertools.find_single_match(item.url, "(?:letra=)(\w+)")
            genero = scrapertools.find_single_match(item.url,
                                                    "(?:gender=|genre=)(\w+)")
            params = "letra={letra}&year={year}&genre={genero}".format(
                letra=letra, year=anio, genero=genero)

        else:
            tipo2 = scrapertools.find_single_match(item.url,
                                                   "(?:series/|tipo2=)(\w+)")
            tipo_serie = "&tipo=serie"

            if tipo2 != "all":
                file_php = "letra"
                tipo_serie += "&tipo2=" + tipo2

            genero = ""
            if tipo2 == "anio":
                genero = scrapertools.find_single_match(
                    item.url, "(?:anio/|genre=)(\w+)")
            if tipo2 == "genero":
                genero = scrapertools.find_single_match(
                    item.url, "(?:genero/|genre=)(\w+)")
            if tipo2 == "letra":
                genero = scrapertools.find_single_match(
                    item.url, "(?:letra/|genre=)(\w+)")

            params = "genre={genero}".format(genero=genero)

        url = "http://www.pelispedia.tv/api/{file}.php?rangeStart=28&rangeEnd=28{tipo_serie}&{params}".\
            format(file=file_php, tipo_serie=tipo_serie, params=params)

        if "rangeStart" in item.url:
            ant_inicio = scrapertools.find_single_match(
                item.url, "rangeStart=(\d+)&")
            inicio = str(int(ant_inicio) + 28)
            url = item.url.replace("rangeStart=" + ant_inicio,
                                   "rangeStart=" + inicio)

        itemlist.append(
            Item(channel=__channel__,
                 action="listado",
                 title=">> Página siguiente",
                 extra=item.extra,
                 url=url,
                 thumbnail=thumbnail_host,
                 fanart=fanart_host,
                 text_color=color2))

    return itemlist
Example #6
0
def listado(item):
    logger.info()
    itemlist = []

    action = "findvideos"
    content_type = "movie"

    if item.extra == 'serie':
        action = "temporadas"
        content_type = "tvshow"

    # ~ data = httptools.downloadpage(item.url).data
    data = obtener_data(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "",
                  data)
    # logger.info("data -- {}".format(data))

    patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
             '<p class="font12">(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches[:
                                                                                        28]:
        title = "%s (%s)" % (scrapertools.unescape(
            scrapedtitle.strip()), scrapedyear)
        plot = scrapertools.entityunescape(scrapedplot)

        new_item = Item(channel=__channel__,
                        title=title,
                        url=urlparse.urljoin(CHANNEL_HOST, scrapedurl),
                        action=action,
                        thumbnail=scrapedthumbnail,
                        plot=plot,
                        context="",
                        extra=item.extra,
                        contentType=content_type,
                        fulltitle=title)

        if item.extra == 'serie':
            new_item.show = scrapertools.unescape(scrapedtitle.strip())
            # fix en algunos casos la url está mal
            new_item.url = new_item.url.replace(CHANNEL_HOST + "pelicula",
                                                CHANNEL_HOST + "serie")
        else:
            new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip())
            new_item.infoLabels = {'year': scrapedyear}
            # logger.debug(new_item.tostring())

        itemlist.append(new_item)

    # Obtenemos los datos basicos de todas las peliculas mediante multihilos
    tmdb.set_infoLabels(itemlist, __modo_grafico__)

    # numero de registros que se muestran por página, se fija a 28 por cada paginación
    if len(matches) >= 28 and '/buscar/?' not in item.url:

        file_php = "666more"
        tipo_serie = ""

        if item.extra == "movies":
            anio = scrapertools.find_single_match(item.url, "(?:year=)(\w+)")
            letra = scrapertools.find_single_match(item.url, "(?:letra=)(\w+)")
            genero = scrapertools.find_single_match(item.url,
                                                    "(?:gender=|genre=)(\w+)")
            params = "letra=%s&year=%s&genre=%s" % (letra, anio, genero)

        else:
            tipo2 = scrapertools.find_single_match(item.url,
                                                   "(?:series/|tipo2=)(\w+)")
            tipo_serie = "&tipo=serie"

            if tipo2 != "all":
                file_php = "letra"
                tipo_serie += "&tipo2=" + tipo2

            genero = ""
            if tipo2 == "anio":
                genero = scrapertools.find_single_match(
                    item.url, "(?:anio/|genre=)(\w+)")
            if tipo2 == "genero":
                genero = scrapertools.find_single_match(
                    item.url, "(?:genero/|genre=)(\w+)")
            if tipo2 == "letra":
                genero = scrapertools.find_single_match(
                    item.url, "(?:letra/|genre=)(\w+)")

            params = "genre=%s" % genero

        url = "http://www.pelispedia.tv/api/%s.php?rangeStart=28&rangeEnd=28%s&%s" % (
            file_php, tipo_serie, params)

        if "rangeStart" in item.url:
            ant_inicio = scrapertools.find_single_match(
                item.url, "rangeStart=(\d+)&")
            inicio = str(int(ant_inicio) + 28)
            url = item.url.replace("rangeStart=" + ant_inicio,
                                   "rangeStart=" + inicio)

        itemlist.append(
            Item(channel=__channel__,
                 action="listado",
                 title=">> Página siguiente",
                 extra=item.extra,
                 url=url,
                 thumbnail=thumbnail_host,
                 fanart=fanart_host))

    return itemlist
Example #7
0
def listado(item):
    logger.info()
    itemlist = []

    # ~ data = httptools.downloadpage(item.url).data
    data = obtener_data(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "",
                  data)

    if item.extra == 'movies':
        action = "findvideos"
        content_type = "movie"

        patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
                 '<p class="font12">(.*?)</p>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
            title = "%s (%s)" % (scrapertools.unescape(
                scrapedtitle.strip()), scrapedyear)
            plot = scrapertools.entityunescape(scrapedplot)

            new_item = Item(channel=__channel__,
                            title=title,
                            url=urlparse.urljoin(CHANNEL_HOST, scrapedurl),
                            action=action,
                            thumbnail=scrapedthumbnail,
                            plot=plot,
                            context="",
                            extra=item.extra,
                            contentType=content_type)
            new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip())
            new_item.infoLabels = {'year': scrapedyear}
            itemlist.append(new_item)

    else:
        action = "temporadas"
        content_type = "tvshow"

        patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+)'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
            title = scrapertools.unescape(scrapedtitle.strip())

            new_item = Item(channel=__channel__,
                            title=title,
                            url=urlparse.urljoin(CHANNEL_HOST, scrapedurl),
                            action=action,
                            thumbnail=scrapedthumbnail,
                            context="",
                            extra=item.extra,
                            contentType=content_type,
                            fulltitle=title)
            new_item.show = title
            # fix en algunos casos la url está mal
            new_item.url = new_item.url.replace(CHANNEL_HOST + "pelicula",
                                                CHANNEL_HOST + "serie")
            itemlist.append(new_item)

    # Obtenemos los datos basicos de todas las peliculas mediante multihilos
    tmdb.set_infoLabels(itemlist, __modo_grafico__)

    if '<ul class="pagination"' in data:
        url_next = scrapertools.find_single_match(data,
                                                  'href="([^"]*)" rel="next"')
        if url_next:
            url = urlparse.urljoin(CHANNEL_HOST, url_next)

            itemlist.append(
                Item(channel=__channel__,
                     action="listado",
                     title=">> Página siguiente",
                     extra=item.extra,
                     url=url,
                     thumbnail=thumbnail_host,
                     fanart=fanart_host))

    return itemlist
Example #8
0
def listado(item):
    logger.info("pelisalacarta.channels.pelispedia listado")
    itemlist = []

    action = "findvideos"
    if item.extra == 'serie':
        action = "temporadas"

    data = scrapertools.anti_cloudflare(item.url , host=CHANNEL_HOST , headers=CHANNEL_DEFAULT_HEADERS )
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    # logger.info("data -- {}".format(data))

    patron = '<li[^>]+><a href="([^"]+)" alt="([^<]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \
             '<p class="font12">(.*?)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches[:28]:
        title = "{title} ({year})".format(title=scrapertools.unescape(scrapedtitle.strip()), year=scrapedyear)
        plot = scrapertools.entityunescape(scrapedplot)

        new_item= Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action,
                       thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra, text_color= color3)

        if item.extra == 'serie':
            new_item.show = scrapertools.unescape(scrapedtitle.strip())
        else:
            new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip())
            new_item.infoLabels = {'year':scrapedyear}
            #logger.debug(new_item.tostring())

        itemlist.append(new_item)

    # Obtenemos los datos basicos de todas las peliculas mediante multihilos
    tmdb.set_infoLabels(itemlist, __modo_grafico__)

    # numero de registros que se muestran por página, se fija a 28 por cada paginación
    if len(matches) >= 28:

        file_php = "more"
        tipo_serie = ""

        if item.extra == "movies":
            anio = scrapertools.find_single_match(item.url, "(?:year=)(\w+)")
            letra = scrapertools.find_single_match(item.url, "(?:letra=)(\w+)")
            genero = scrapertools.find_single_match(item.url, "(?:gender=|genre=)(\w+)")
            params = "letra={letra}&year={year}&genre={genero}".format(letra=letra, year=anio, genero=genero)

        else:
            tipo2 = scrapertools.find_single_match(item.url, "(?:series/|tipo2=)(\w+)")
            tipo_serie = "&tipo=serie"

            if tipo2 != "all":
                file_php = "letra"
                tipo_serie += "&tipo2="+tipo2

            genero = ""
            if tipo2 == "anio":
                genero = scrapertools.find_single_match(item.url, "(?:anio/|genre=)(\w+)")
            if tipo2 == "genero":
                genero = scrapertools.find_single_match(item.url, "(?:genero/|genre=)(\w+)")
            if tipo2 == "letra":
                genero = scrapertools.find_single_match(item.url, "(?:letra/|genre=)(\w+)")

            params = "genre={genero}".format(genero=genero)

        url = "http://www.pelispedia.tv/api/{file}.php?rangeStart=28&rangeEnd=28{tipo_serie}&{params}".\
            format(file=file_php, tipo_serie=tipo_serie, params=params)

        if "rangeStart" in item.url:
            ant_inicio = scrapertools.find_single_match(item.url, "rangeStart=(\d+)&")
            inicio = str(int(ant_inicio)+28)
            url = item.url.replace("rangeStart="+ant_inicio, "rangeStart="+inicio)

        itemlist.append(Item(channel=__channel__, action="listado", title=">> Página siguiente", extra=item.extra,
                             url=url, thumbnail=thumbnail_host, fanart= fanart_host, text_color= color2))

    return itemlist