示例#1
0
def play(item):
    logger.info("deportesalacarta.channels.miscelanea_p2p play")
    itemlist = []
    xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
    data = scrapertools.downloadpage(item.url)

    # Si el canal está en la web se busca manualmente el enlace ya que puede haber varios
    if item.extra == "dhd1":
        url = scrapertools.find_single_match(data, 'href="(acestream://[^"]+)"')
        if url == "":
            redirect = scrapertools.find_single_match(data, 'src="(http://buker[^"]+)"')
            data = scrapertools.downloadpage(redirect)
            urls = servertools.findvideosbyserver(data, "p2p")
            if urls:
                url = urls[0][1] +"|" + item.title
                itemlist.append(item.clone(url=url, server="p2p"))
        else:
            url += "|" + item.title
            itemlist.append(item.clone(url=url, server="p2p"))
    elif item.extra == "euro":
        itemlist.append(item.clone(server="directo"))
    else:
        # Se automatiza la búsqueda del enlace acestream/sopcast a través del conector p2p
        urls = servertools.findvideosbyserver(data, "p2p")
        if urls:
            url = urls[0][1]+"|" + item.title
            itemlist.append(item.clone(url=url, server="p2p"))
        
    return itemlist
示例#2
0
def play(item):
    logger.info("pelisalacarta.channels.cinetux play")
    itemlist = []
    enlace = servertools.findvideosbyserver(item.url, item.server)
    itemlist.append(item.clone(url=enlace[0][1]))

    return itemlist
示例#3
0
def play(item):
    logger.info()
    itemlist = list()
    enlace = servertools.findvideosbyserver(item.url, item.server)
    itemlist.append(item.clone(url=enlace[0][1]))

    return itemlist
def play(item):
    logger.info()
    itemlist = []

    if not item.url.startswith("http") and not item.url.startswith("magnet"):
        post = "source=%s&action=obtenerurl" % urllib.quote(item.url)
        headers = {'X-Requested-With': 'XMLHttpRequest'}
        data = httptools.downloadpage("%s/wp-admin/admin-ajax.php" %
                                      host.replace("https", "http"),
                                      post=post,
                                      headers=headers,
                                      follow_redirects=False).data

        url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace(
            "\\", "")
        if "enlacesmix" in url:
            data = httptools.downloadpage(url,
                                          headers={
                                              'Referer': item.extra
                                          },
                                          follow_redirects=False).data
            url = scrapertools.find_single_match(data,
                                                 '<iframe.*?src="([^"]+)"')
        enlaces = servertools.findvideosbyserver(url, item.server)
        if enlaces:
            itemlist.append(
                item.clone(action="play",
                           server=enlaces[0][2],
                           url=enlaces[0][1]))
    else:
        itemlist.append(item.clone())

    return itemlist
def play(item):
    logger.info()
    itemlist = []
    
    location = ""
    i = 0
    while not location:
        try:
            data = httptools.downloadpage(item.url).data
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            if not url_redirect:
                import StringIO
                compressedstream = StringIO.StringIO(data)
                import gzip
                gzipper = gzip.GzipFile(fileobj=compressedstream)
                data = gzipper.read()
                gzipper.close()
                url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
            location = httptools.downloadpage(url_redirect, follow_redirects=False).headers["location"]
        except:
            pass
        i += 1
        if i == 6:
            return itemlist

    enlaces = servertools.findvideosbyserver(location, item.server)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#6
0
def play(item):
    logger.info("pelisalacarta.channels.oranline play")
    itemlist = []
    enlace = servertools.findvideosbyserver(item.url, item.server)
    itemlist.append(item.clone(url=enlace[0][1]))

    return itemlist
示例#7
0
def play(item):
    logger.info()
    itemlist = list()
    enlace = servertools.findvideosbyserver(item.url, item.server)
    itemlist.append(item.clone(url=enlace[0][1]))

    return itemlist
def play(item):
    itemlist = []
    data = scrapertools.downloadpage(item.url)
    if "Web" in item.title:
        videourl = scrapertools.find_single_match(data, "source: '([^']+)'")
        if not videourl:
            baseurl, var_url, lasturl = scrapertools.find_single_match(
                data,
                'return\(\[([^\[]+)\].*?\+\s*([A-z]+)\.join.*?"([^"]+)"\)\.innerHTML'
            )
            auth = scrapertools.find_single_match(
                data, var_url + '\s*=\s*\[([^\[]+)\]')
            lasturl = scrapertools.find_single_match(
                data, lasturl + '\s*>\s*([^<]+)<')
            videourl = baseurl + auth + lasturl
            videourl = re.sub(r'"|,|\\', '', videourl)
        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 server="directo",
                 url=videourl,
                 action="play",
                 folder=False))
    else:
        lista = servertools.findvideosbyserver(data, 'p2p')
        if lista:
            itemlist.append(
                Item(channel=__channel__,
                     title=item.title,
                     server="p2p",
                     url=lista[0][1],
                     action="play",
                     folder=False))
    return itemlist
示例#9
0
def play(item):
    logger.info()

    itemlist = []

    # Buscamos video por servidor ...

    devuelve = servertools.findvideosbyserver(item.url, item.server)

    if not devuelve:
        # ...sino lo encontramos buscamos en todos los servidores disponibles

        devuelve = servertools.findvideos(item.url, skip=True)

    if devuelve:
        # logger.debug(devuelve)
        itemlist.append(
            Item(channel=item.channel,
                 title=item.contentTitle,
                 action="play",
                 server=devuelve[0][2],
                 url=devuelve[0][1],
                 thumbnail=item.thumbnail,
                 folder=False))

    return itemlist
示例#10
0
文件: cinefox.py 项目: dealex1/addon
def normalizar_url(url, server):
    url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    enlaces = servertools.findvideosbyserver(url, server)[0]
    if enlaces[1] != '':
        return enlaces[1]
    return url
示例#11
0
def play(item):
    logger.info("deportesalacarta.torrenttv go")
    itemlist = []
    xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
    fulltitle = item.fulltitle
    if item.extra == "":
        # Se incluye el título en la url para pasarlo al conector
        url = item.url + "|" + fulltitle
        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 server="p2p",
                 url=url,
                 action="play",
                 folder=False))
    else:
        data = httptools.downloadpage(item.url).data
        if item.extra == "lista3":
            iframe = scrapertools.find_single_match(data,
                                                    '<iframe src="([^"]+)"')
            data = httptools.downloadpage(iframe).data

        urls = servertools.findvideosbyserver(data, "p2p")
        if urls:
            url = urls[0][1] + "|" + fulltitle
            itemlist.append(
                Item(channel=__channel__,
                     title=item.title,
                     server="p2p",
                     url=url,
                     action="play",
                     folder=False))

    return itemlist
示例#12
0
    def onClick(self, controlId):
        if controlId == OPTION_PANEL:
            xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
            self.list = self.getControl(6)
            selecitem = self.list.getSelectedItem()
            url = selecitem.getProperty("url")
            data = get_data(url, cookies)
            url = servertools.findvideosbyserver(data, "p2p")
            if len(url) > 1:
                enlaces = []
                for u in url:
                    enlaces.append(u[1])
                xbmc.log(str(enlaces))
                selection = xbmcgui.Dialog().select("Selecciona un enlace",
                                                    enlaces)
                if selection > 0:
                    url = url[selection][1]
            elif url:
                url = url[0][1]

            #Creamos el item para platformtools
            item = Item()
            item.fulltitle = self.fulltitle
            item.url = url + "|" + item.fulltitle
            item.server = "p2p"

            config.set_setting("arenavision_play", False, "arenavision")
            from threading import Thread
            t = Thread(target=platformtools.play_video, args=[item])
            t.start()
            close = False
            while True:
                xbmc.sleep(500)
                try:
                    if not t.is_alive() and not config.get_setting(
                            "arenavision_play", "arenavision"):
                        break
                    elif not t.is_alive() and config.get_setting(
                            "arenavision_play", "arenavision"):
                        xbmc.executebuiltin('Action(PreviousMenu)')
                        break
                except:
                    if not t.isAlive() and not config.get_setting(
                            "arenavision_play", "arenavision"):
                        break
                    elif not t.isAlive() and config.get_setting(
                            "arenavision_play", "arenavision"):
                        xbmc.executebuiltin('Action(PreviousMenu)')
                        break

        elif controlId == OPTIONS_OK or controlId == 99:
            global select, ventana
            borrar = [select, ventana]
            for window in borrar:
                window.close()
                del window
            xbmc.sleep(300)
            xbmc.executebuiltin('Action(PreviousMenu)')
示例#13
0
文件: cinefox.py 项目: fcammed/addon
def normalizar_url(url, server):
    # Pasar por findvideosbyserver para para obtener url a partir de los pattern/url de los json de servidores
    # Excepciones copiadas de la funcion play
    url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    enlaces = servertools.findvideosbyserver(url, server)[0]
    if enlaces[1] != '':
        return enlaces[1]
    return url
示例#14
0
def play(item):
    from core import servertools
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t", '', data)

    url = servertools.findvideosbyserver(data, "p2p")
    if url:
        url = url[0][1]
        itemlist.append(item.clone(url=url, server="p2p"))

    return itemlist
示例#15
0
def play(item):
    logger.info("deportesalacarta.privatehd go")
    itemlist = []

    headers = [[
        "User-Agent",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0"
    ]]
    fulltitle = item.title
    data = scrapertools.cachePage(item.url)
    iframe = scrapertools.find_single_match(
        data, '<center><iframe.*?src="(http://privatehd.pw/tv[^"]+)"')
    data = scrapertools.cachePage(iframe)

    urls = servertools.findvideosbyserver(data, "p2p")
    if urls:
        url = urls[0][1] + "|" + fulltitle
        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 server="p2p",
                 url=url,
                 action="play",
                 folder=False))
    else:
        headers.append(["Referer", iframe])
        newurl = scrapertools.find_single_match(
            data, "src='(http://privatehd.pw/server[^']+)'")
        newurl = newurl.replace("channel.php?file=",
                                "embed.php?a=") + "&strech="
        data = scrapertools.downloadpage(newurl, headers=headers)

        url_video = scrapertools.find_single_match(
            data, "'streamer'\s*,\s*'([^']+)'")
        if "rtmp" in url_video:
            file = scrapertools.find_single_match(data,
                                                  "'file'\s*,\s*'([^']+)'")
            url_video += " playpath=%s swfUrl=http://privatehd.pw/player.swf live=true swfVfy=1 pageUrl=%s token=0fea41113b03061a" % (
                file, newurl)

        itemlist.append(
            Item(channel=__channel__,
                 title=item.title,
                 server="directo",
                 url=url_video,
                 action="play",
                 folder=False))

    return itemlist
示例#16
0
def play(item):
    logger.info()
    itemlist = []
    if "api.cinetux" in item.url:
        data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
        matches = scrapertools.find_multiple_matches(data,
                  "{file\s*:\s*'([^\"]+)\"[\}]*'\s*,\s*label:\s*'([^']+)'\s*,\s*type:\s*'[^/]+/([^']+)'")
        for url, quality, ext in matches:
            itemlist.insert(0, [".%s %s [directo]" % (ext, quality), url])
    else:
        enlace = servertools.findvideosbyserver(item.url, item.server)
        url = enlace[0][1]
        itemlist.append(item.clone(url=url))

    return itemlist
示例#17
0
def play(item):
    logger.info()
    itemlist = []

    url = item.url.replace("http://miracine.tv/n/?etu=",
                           "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    if item.server:
        enlaces = servertools.findvideosbyserver(url, item.server)
    else:
        enlaces = servertools.findvideos(url)

    if len(enlaces) == 0: return itemlist

    itemlist.append(item.clone(url=enlaces[0][1], server=enlaces[0][2]))
    return itemlist
示例#18
0
文件: lacajita.py 项目: yonvima/addon
def play(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data

    url = scrapertools.find_single_match(data, 'window.open\("([^"]+)"')
    enlaces = servertools.findvideosbyserver(url, item.server)
    if enlaces:
        itemlist.append(item.clone(action="play", url=enlaces[0][1]))
    else:
        enlaces = servertools.findvideos(url, True)
        if enlaces:
            itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#19
0
文件: cinefox.py 项目: fcammed/addon
def play(item):
    logger.info()
    itemlist = []
    if item.extra != "" and "google" not in item.url:
        post = "id=%s" % item.extra
        data = httptools.downloadpage(host + "/goto/", post=post, add_referer=True).data
        item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')

    url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    if item.server:
        enlaces = servertools.findvideosbyserver(url, item.server)[0]
    else:
        enlaces = servertools.findvideos(url)[0]
    itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
    return itemlist
示例#20
0
def play(item):
    logger.info()
    itemlist = []
    if "drive.php?v=" in item.url:
        if not item.url.startswith("http:") and not item.url.startswith("https:"):
            item.url = "http:" + item.url
        data = httptools.downloadpage(item.url, add_referer=True).data.replace("\\", "")

        subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
        patron = '"label":\s*"([^"]+)","type":\s*"video/([^"]+)","(?:src|file)":\s*"([^"]+)"'
        matches = scrapertools.find_multiple_matches(data, patron)
        for calidad, extension, url in matches:
            url = url.replace(",", "%2C")
            title = ".%s %s [directo]" % (extension, calidad)
            itemlist.append([title, url, 0, subtitulo])
        try:
            itemlist.sort(key=lambda it:int(it[0].split(" ")[1].split("p")[0]))
        except:
            pass
    elif "metiscs" in item.url:
        import base64
        from lib import jsunpack

        if not item.url.startswith("http:") and not item.url.startswith("https:"):
            item.url = "http:" + item.url

        data = httptools.downloadpage(item.url, add_referer=True).data
        str_encode = scrapertools.find_multiple_matches(data, '(?:\+|\()"([^"]+)"')
        data = base64.b64decode("".join(str_encode))
        packed = scrapertools.find_single_match(data, '(eval\(function.*?)(?:</script>|\}\)\))')
        if not packed:
            packed = data
        data_js = jsunpack.unpack(packed)

        subtitle = scrapertools.find_single_match(data_js, 'tracks:\[\{"file":"([^"]+)"')
        patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"'
        matches = scrapertools.find_multiple_matches(data_js, patron)
        for url, calidad, extension in matches:
            url = url.replace(",", "%2C")
            title = ".%s %s [directo]" % (extension, calidad)
            itemlist.insert(0, [title, url, 0, subtitle])
    else:
        enlaces = servertools.findvideosbyserver(item.url, item.server)[0]
        if len(enlaces) > 0:
            itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    
    return itemlist
示例#21
0
def elgoles(item):
    logger.info("deportesalacarta.channels.miscelanea_p2p elgoles")
    itemlist = []

    data = scrapertools.downloadpage(item.url)
    bloque = scrapertools.find_single_match(data, '<ul id="menu-menu-1" class="menu">(.*?)TV-ESPAÑA')
    matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">(.*?)(?:\s|</a>)')
    for scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.replace("Canal.", "Canal ")
        scrapedtitle = "[COLOR crimson]"+scrapedtitle.capitalize()+"[/COLOR]"
        data = scrapertools.downloadpage(scrapedurl)
        urls = servertools.findvideosbyserver(data, "p2p")
        if urls:
            scrapedtitle += "   [COLOR darkcyan]"+urls[0][0]+"[/COLOR]"
            itemlist.append(item.clone(url=scrapedurl, action="play", title=scrapedtitle, folder=False))
    
    return itemlist
示例#22
0
def play(item):
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
        post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
        data = httptools.downloadpage(host+"/a/status", post=post).data

    devuelve = servertools.findvideosbyserver(item.url, item.server)
    if devuelve:
        item.url = devuelve[0][1]
    else:
        devuelve = servertools.findvideos(item.url, True)
        if devuelve:
            item.url = devuelve[0][1]
            item.server = devuelve[0][2]
    
    return [item]
示例#23
0
def play(item):
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
        post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
        data = httptools.downloadpage(host + "/a/status", post=post).data
    devuelve = servertools.findvideosbyserver(item.url, item.server)
    if devuelve:
        item.url = devuelve[0][1]
    else:
        devuelve = servertools.findvideos(item.url, True)
        if devuelve:
            item.url = devuelve[0][1]
            item.server = devuelve[0][2]
    item.thumbnail = item.contentThumbnail
    item.fulltitle = item.contentTitle
    return [item]
示例#24
0
def play(item):
    if "###" in item.url:
        id = item.url.split("###")[1].split(";")[0]
        type = item.url.split("###")[1].split(";")[1]
        item.url = item.url.split("###")[0]
        post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
        data = agrupa_datos(urlparse.urljoin(host, "/a/status"), post=post)
    devuelve = servertools.findvideosbyserver(item.url, item.server)
    if devuelve:
        item.url = devuelve[0][1]
    else:
        devuelve = servertools.findvideos(item.url, True)
        if devuelve:
            item.url = devuelve[0][1]
            item.server = devuelve[0][2]
    item.thumbnail = item.contentThumbnail
    item.contentTitle = item.contentTitle
    return [item]
示例#25
0
def play(item):
    logger.info()
    itemlist = []
    if "drive.php?v=" in item.url:
        if not item.url.startswith("http:"):
            item.url = "http:" + item.url
        data = httptools.downloadpage(item.url).data

        subtitulo = scrapertools.find_single_match(data,
                                                   "var subtitulo='([^']+)'")
        patron = '{"label":\s*"([^"]+)","type":\s*"video/([^"]+)","src":\s*"([^"]+)"'
        matches = scrapertools.find_multiple_matches(data, patron)
        for calidad, extension, url in matches:
            url = url.replace(",", "%2C")
            title = ".%s %s [directo]" % (extension, calidad)
            itemlist.append([title, url, 0, subtitulo])
        itemlist.reverse()
    elif "metiscs" in item.url:
        if not item.url.startswith("http:"):
            item.url = "http:" + item.url
        referer = {'Referer': "http://peliculas.nu"}
        data = httptools.downloadpage(item.url, headers=referer).data

        from lib import jsunpack
        packed = scrapertools.find_single_match(
            data,
            '<script type="text/javascript">(eval\(function.*?)</script>')
        data_js = jsunpack.unpack(packed)

        patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"'
        matches = scrapertools.find_multiple_matches(data_js, patron)
        for url, calidad, extension in matches:
            url = url.replace(",", "%2C")
            title = ".%s %s [directo]" % (extension, calidad)
            itemlist.append([title, url])
        itemlist.reverse()
    else:
        enlaces = servertools.findvideosbyserver(item.url, item.server)[0]
        if len(enlaces) > 0:
            itemlist.append(
                item.clone(action="play", server=enlaces[2], url=enlaces[1]))

    return itemlist
示例#26
0
def play(item):
    logger.info()
    itemlist = []

    if item.extra != "":
        post = "id=%s" % item.extra
        data = httptools.downloadpage("http://www.cinefox.tv/goto/", post=post, add_referer=True).data

        item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')

    url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
    if item.server:
        enlaces = servertools.findvideosbyserver(url, item.server)[0]
    else:
        enlaces = servertools.findvideos(url)[0]
    itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
    
    return itemlist
    def onAction(self, action):
        if action.getId(
        ) == ACTION_SELECT_ITEM and not controlId() == OPTIONS_OK:
            xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
            self.list = self.getControl(6)
            selecitem = self.list.getSelectedItem()
            url = selecitem.getProperty("url")
            data = scrapertools.cache_page(url)
            url = servertools.findvideosbyserver(data, "p2p")
            if url:
                url = url[0][1]
            #Creamos el item para platformtools
            item = Item()
            item.fulltitle = self.fulltitle
            item.url = url + "|" + item.fulltitle
            item.server = "p2p"
            self.close()
            check_skin = xbmc.getSkinDir()

            if not "confluence" in check_skin:
                xbmc.sleep(300)
                xbmc.executebuiltin('Action(PreviousMenu)')
                xbmc.sleep(300)

            platformtools.play_video(item)
            self.close()
            check_skin = xbmc.getSkinDir()

            if "confluence" in check_skin:
                if xbmc.Player().isPlayingVideo():
                    #xbmc.sleep(300)
                    xbmc.executebuiltin('Action(PreviousMenu)')
                else:
                    xbmc.executebuiltin('Action(PreviousMenu)')
            else:
                xbmc.executebuiltin('Action(PreviousMenu)')
        elif action.getId() == ACTION_PREVIOUS_MENU or action.getId(
        ) == ACTION_MOUSE_RIGHT_CLICK or action == 92:

            self.close()
            xbmc.sleep(300)
            xbmc.executebuiltin('Action(PreviousMenu)')
示例#28
0
def play(item):
    logger.info()
    itemlist = []
    if item.server == "directo" and "tusfiles" in item.url:
        data = httptools.downloadpage(item.url).data.replace("\\", "")
        matches = scrapertools.find_multiple_matches(data, '"label"\s*:\s*(.*?),"type"\s*:\s*"([^"]+)","file"\s*:\s*"([^"]+)"')
        for calidad, tipo, video_url in matches:
            tipo = tipo.replace("video/", "")
            video_url += "|Referer=%s" % item.url
            itemlist.append([".%s %sp [directo]" % (tipo, calidad), video_url])
        try:
            itemlist.sort(key=lambda it:int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
        except:
            pass
    else:
        devuelve = servertools.findvideosbyserver(item.url, item.server)
        if devuelve:
            itemlist.append(item.clone(url=devuelve[0][1]))

    return itemlist
示例#29
0
def play(item):
    logger.info()
    itemlist = []

    if not item.url.startswith("http") and not item.url.startswith("magnet"):
        post = "source=%s&action=obtenerurl" % urllib.quote(item.url)
        headers = {'X-Requested-With': 'XMLHttpRequest'}
        data = httptools.downloadpage("%s/wp-admin/admin-ajax.php" % host.replace("https", "http"), post=post, headers=headers, follow_redirects=False).data

        url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
        if "enlacesmix" in url:
            data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
            url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
        enlaces = servertools.findvideosbyserver(url, item.server)
        if enlaces:
            itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))
    else:
        itemlist.append(item.clone())

    return itemlist
示例#30
0
def play(item):
    logger.info()
    itemlist = []
    if item.server == "directo" and "tusfiles" in item.url:
        data = httptools.downloadpage(item.url).data.replace("\\", "")
        matches = scrapertools.find_multiple_matches(data, '"label"\s*:\s*(.*?),"type"\s*:\s*"([^"]+)","file"\s*:\s*"([^"]+)"')
        for calidad, tipo, video_url in matches:
            tipo = tipo.replace("video/", "")
            video_url += "|Referer=%s" % item.url
            itemlist.append([".%s %sp [directo]" % (tipo, calidad), video_url])
        try:
            itemlist.sort(key=lambda it:int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
        except:
            pass
    else:
        devuelve = servertools.findvideosbyserver(item.url, item.server)
        if devuelve:
            itemlist.append(item.clone(url=devuelve[0][1]))

    return itemlist
    def onClick(self, controlId):
        if controlId == OPTION_PANEL:
            xbmc.executebuiltin('xbmc.PlayMedia(Stop)')
            self.list = self.getControl(6)
            selecitem = self.list.getSelectedItem()
            url = selecitem.getProperty("url")
            data = scrapertools.cache_page(url)
            url = servertools.findvideosbyserver(data, "p2p")
            if url:
                url = url[0][1]
            #Creamos el item para platformtools
            item = Item()
            item.fulltitle = self.fulltitle
            item.url = url + "|" + item.fulltitle
            item.server = "p2p"
            self.close()
            check_skin = xbmc.getSkinDir()

            if not "confluence" in check_skin:
                xbmc.sleep(300)
                xbmc.executebuiltin('Action(PreviousMenu)')
                xbmc.sleep(300)

            platformtools.play_video(item)
            check_skin = xbmc.getSkinDir()

            if "confluence" in check_skin:
                if xbmc.Player().isPlayingVideo():
                    #xbmc.sleep(300)
                    xbmc.executebuiltin('Action(PreviousMenu)')
                else:
                    xbmc.executebuiltin('Action(PreviousMenu)')
            else:
                xbmc.executebuiltin('Action(PreviousMenu)')

        elif controlId == OPTIONS_OK:
            self.close()
            TESTPYDESTFILE = xbmc.translatePath(
                'special://skin/720p/DialogSelect2.xml')
            xbmc.sleep(300)
            xbmc.executebuiltin('Action(PreviousMenu)')
示例#32
0
def play(item):
    logger.info()
    itemlist = []

    location = ""
    i = 0
    while not location:
        try:
            data = httptools.downloadpage(item.url).data
            url_redirect = scrapertools.find_single_match(
                data,
                'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"'
            )
            if not url_redirect:
                import StringIO
                compressedstream = StringIO.StringIO(data)
                import gzip
                gzipper = gzip.GzipFile(fileobj=compressedstream)
                data = gzipper.read()
                gzipper.close()
                url_redirect = scrapertools.find_single_match(
                    data,
                    'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"'
                )
            location = httptools.downloadpage(
                url_redirect, follow_redirects=False).headers["location"]
        except:
            pass
        i += 1
        if i == 6:
            return itemlist

    enlaces = servertools.findvideosbyserver(location, item.server)
    if len(enlaces) > 0:
        itemlist.append(
            item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#33
0
def play(item):
    itemlist = []
    data = httptools.downloadpage(item.url, follow_redirects=False)
    if data.headers.get("refresh") or data.headers.get("location"):
        import urllib
        if data.headers.get("refresh"):
            url = scrapertools.find_single_match(data.headers["refresh"], '(?i)URL=(.*)')
        else:
            url = data.headers["location"]
        url = urllib.unquote_plus(url.replace("https://url.rw/?", ""))
        data = httptools.downloadpage(url, headers={'Referer': item.url}).data
        embedurl = scrapertools.find_single_match(data, "<iframe.*?src=\s*['\"]([^'\"]+)['\"]")
        data = httptools.downloadpage(embedurl, headers={'Referer': url}).data
    else:
        data = data.data

    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0",
               "Referer": item.url}
    if "Web" in item.title:
        iframe = scrapertools.find_single_match(data, '<iframe.*?src="(http://tumarcador.xyz/red[^"]+)"')
        if iframe:
            data = httptools.downloadpage(iframe, headers=headers, replace_headers=True).data

        videourl = scrapertools.find_single_match(data, "(?:source:|source src=)\s*['\"]([^'\"]+)['\"]")
        if not videourl:
            baseurl, var_url, lasturl = scrapertools.find_single_match(data, 'return\(\[([^\[]+)\].*?\+\s*([A-z]+)\.join.*?"([^"]+)"\)\.innerHTML')
            auth = scrapertools.find_single_match(data, var_url+'\s*=\s*\[([^\[]+)\]')
            lasturl = scrapertools.find_single_match(data, lasturl+'\s*>\s*([^<]+)<')
            videourl = baseurl + auth + lasturl
            videourl = re.sub(r'"|,|\\', '', videourl)

        videourl += "|User-Agent=%s" % headers["User-Agent"]
        itemlist.append(Item(channel=__channel__, title=item.title, server="directo", url=videourl, action="play", folder=False))
    else:
        lista = servertools.findvideosbyserver(data, 'p2p')
        if lista:
            itemlist.append(Item(channel=__channel__, title=item.title, server="p2p", url=lista[0][1], action="play", folder=False))
    return itemlist
示例#34
0
def play(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    urls = servertools.findvideosbyserver(data, "p2p")
    if urls:
        url = urls[0][1] + "|" + item.title
        itemlist.append(Item(channel=__channel__, title=item.title, server="p2p", url=url, action="play"))
    else:
        headers = {"Referer": iframe}
        newurl = scrapertools.find_single_match(data, "src='(http://freelive365.com/server[^']+)'")
        newurl = newurl.replace("channel.php?file=", "embed.php?a=") + "&strech="
        data = httptools.downloadpage(newurl, headers=headers).data

        url_video = scrapertools.find_single_match(data, "'streamer'\s*,\s*'([^']+)'")
        if "rtmp" in url_video:
            file = scrapertools.find_single_match(data, "'file'\s*,\s*'([^']+)'")
            url_video += " playpath=%s swfUrl=http://freelive365.com/player.swf live=true swfVfy=1 pageUrl=%s token=0fea41113b03061a" % (file, newurl)

        itemlist.append(Item(channel=__channel__, title=item.title, server="directo", url=url_video, action="play"))
    
    return itemlist
示例#35
0
文件: dascer.py 项目: shlibidon/addon
def findvideos(item):
    logger.info()
    
    itemlist = []
    itemlist_t = []                                                             #Itemlist total de enlaces
    itemlist_f = []                                                             #Itemlist de enlaces filtrados
    matches = []

    #logger.debug(item)

    #Ahora tratamos los enlaces .torrent con las diferentes calidades
    for scrapedurl, scrapedserver in item.url_enlaces:

        #Generamos una copia de Item para trabajar sobre ella
        item_local = item.clone()

        item_local.url = scrapedurl
        item_local.server = scrapedserver.lower()
        item_local.action = "play" 
        
        #Buscamos tamaño en el archivo .torrent
        size = ''
        if item_local.server == 'torrent' and not size and not item_local.url.startswith('magnet:'):
            size = generictools.get_torrent_size(item_local.url) #              Buscamos el tamaño en el .torrent desde la web

        if size:
            size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
                        .replace('Mb', 'M·b').replace('.', ',')
            item_local.torrent_info = '%s, ' % size                             #Agregamos size
        if item_local.url.startswith('magnet:') and not 'Magnet' in item_local.torrent_info:
            item_local.torrent_info += ' Magnet'
        if item_local.torrent_info:
            item_local.torrent_info = item_local.torrent_info.strip().strip(',')
            if not item.unify:
                item_local.torrent_info = '[%s]' % item_local.torrent_info

        #Ahora pintamos lo enlaces
        item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][%s][/COLOR] ' %item_local.server.capitalize() \
                        + '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \
                        (item_local.quality, str(item_local.language), \
                        item_local.torrent_info)

        # Verificamos enlaces
        if item_local.server != 'torrent':
            if config.get_setting("hidepremium"):                               #Si no se aceptan servidore premium, se ignoran
                if not servertools.is_server_enabled(item_local.server):
                    continue
            devuelve = servertools.findvideosbyserver(item_local.url, item_local.server)    #existe el link ?
            if not devuelve:
                continue
            item_local.url = devuelve[0][1]
            item_local.alive = servertools.check_video_link(item_local.url, item_local.server, timeout=timeout)     #activo el link ?
            if 'NO' in item_local.alive:
                continue
        else:
            if not size or 'Magnet' in size:
                item_local.alive = "??"                                         #Calidad del link sin verificar
            elif 'ERROR' in size:
                item_local.alive = "no"                                         #Calidad del link en error?
                continue
            else:
                item_local.alive = "ok"                                         #Calidad del link verificada
        
        itemlist_t.append(item_local.clone())                                   #Pintar pantalla, si no se filtran idiomas
        
        # Requerido para FilterTools
        if config.get_setting('filter_languages', channel) > 0:                 #Si hay idioma seleccionado, se filtra
            itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language)  #Pintar pantalla, si no está vacío

    if len(itemlist_f) > 0:                                                     #Si hay entradas filtradas...
        itemlist.extend(itemlist_f)                                             #Pintamos pantalla filtrada
    else:                                                                       
        if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
            thumb_separador = get_thumb("next.png")                             #... pintamos todo con aviso
            itemlist.append(Item(channel=item.channel, url=host, 
                        title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", 
                        thumbnail=thumb_separador, folder=False))
        itemlist.extend(itemlist_t)                                             #Pintar pantalla con todo si no hay filtrado
    
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)                                              #Lanzamos Autoplay
    
    return itemlist
示例#36
0
def findvideos(item):
    itemlist = []
    duplicated = []

    data = httptools.downloadpage(item.url).data
    patron = '<div class="player-box" id="tabs-(\d+)"><iframe data-src="(.*?)".*?allowfullscreen'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for id, scrapedurl in matches:
        lang = scrapertools.find_single_match(
            data,
            '<li><a href="#tabs-%s"><img src=".*?"  alt="(.*?)".*?\/>' % id)
        server = servertools.get_server_from_url(scrapedurl)
        title = '%s (%s) (%s)' % (item.title, server, lang)
        thumbnail = ''
        if 'enlac' in scrapedurl:

            if 'google' in scrapedurl:
                server = 'gvideo'
            elif 'openload' in scrapedurl:
                server = 'openload'

            title = '%s (%s) (%s)' % (item.title, server, lang)
            scrapedurl = scrapedurl.replace('embed', 'stream')
            gdata = httptools.downloadpage(scrapedurl).data
            url_list = servertools.findvideosbyserver(gdata, server)
            for url in url_list:
                if url[1] not in duplicated:
                    thumbnail = servertools.guess_server_thumbnail(server)
                    itemlist.append(
                        item.clone(title=title,
                                   url=url[1],
                                   action='play',
                                   server=server,
                                   thumbnail=thumbnail))
                    duplicated.append(url[1])

        elif '.html' in scrapedurl:
            url_list = servertools.findvideosbyserver(data, server)
            for url in url_list:
                if url[1] not in duplicated:
                    thumbnail = servertools.guess_server_thumbnail(server)
                    itemlist.append(
                        item.clone(title=title,
                                   url=url[1],
                                   action='play',
                                   server=server,
                                   thumbnail=thumbnail))
                    duplicated.append(url[1])
        else:
            url = scrapedurl
            if url not in duplicated:
                thumbnail = servertools.guess_server_thumbnail(server)
                itemlist.append(
                    item.clone(title=title,
                               url=url,
                               action='play',
                               server=server,
                               thumbnail=thumbnail))
                duplicated.append(url)

    return itemlist
示例#37
0
def findvideos(item):
    logger.info()
    itemlist = []
    duplicados = []
    data = httptools.downloadpage(item.url).data
    logger.debug('data: %s' % data)
    video_page = scrapertools.find_single_match(
        data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'")
    data = httptools.downloadpage(video_page).data
    patron = '<li data-id=".*?">\s+<a href="(.*?)" >'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl in matches:

        if 'tipo' in scrapedurl:
            server = 'gvideo'
            gvideo_data = httptools.downloadpage(scrapedurl).data
            video_url = scrapertools.find_single_match(
                gvideo_data,
                '<div id="player">.*?border: none" src="\/\/(.*?)" ')
            video_url = 'http://%s' % video_url
            gvideo_url = httptools.downloadpage(video_url).data
            videourl = servertools.findvideosbyserver(gvideo_url, server)

            logger.debug('videourl: %s' % videourl)
            language = 'latino'
            quality = 'default'
            url = videourl[0][1]
            title = '%s (%s)' % (item.contentTitle, server)
            thumbnail = item.thumbnail
            fanart = item.fanart
            if video_url not in duplicados:
                itemlist.append(
                    item.clone(action="play",
                               title=title,
                               url=url,
                               thumbnail=thumbnail,
                               fanart=fanart,
                               show=title,
                               extra='gvideo',
                               language=language,
                               quality=quality,
                               server=server))
                duplicados.append(video_url)

    itemlist.extend(servertools.find_video_items(data=data))

    for videoitem in itemlist:
        # videoitem.infoLabels = item.infoLabels
        videoitem.channel = item.channel
        if videoitem.quality == '' or videoitem.language == '':
            videoitem.quality = 'default'
            videoitem.language = 'Latino'
        if videoitem.server != '':
            videoitem.thumbnail = servertools.guess_server_thumbnail(
                videoitem.server)
        else:
            videoitem.thumbnail = item.thumbnail
            videoitem.server = 'directo'
        videoitem.action = 'play'
        videoitem.fulltitle = item.title

        if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
            videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'

    n = 0
    for videoitem in itemlist:
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]'
            itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
        n = n + 1

    if item.extra == 'findvideos' and 'youtube' in itemlist[-1]:
        itemlist.pop(1)

        # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if 'serie' not in item.url:
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist