예제 #1
0
파일: cuevana3.py 프로젝트: lopezvg/addon
def findvideos(item):
    logger.info()

    itemlist = list ()
    servers_list = {"1": "directo", "2": "streamtape", "3": "fembed", "4": "netu"}
    soup = create_soup(item.url, forced_proxy_opt=forced_proxy_opt).find("div", class_="TPlayer embed_div")

    matches = soup.find_all("div", class_="TPlayerTb")
    for elem in matches[:-1]:
        srv = servers_list.get(elem["id"][-1], "directo")
        lang = IDIOMAS.get(elem["id"][:-1].lower(), "VOSE")
        elem = elem.find("iframe")
        url = elem["data-src"]
        v_id = scrapertools.find_single_match(url, '\?h=(.*)')

        if url:
            itemlist.append(Item(channel=item.channel, title="%s", url=url, action="play", server=srv.capitalize(),
                                 language=lang, v_id=v_id, infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s]' % (i.server.capitalize(),
                                                                                           i.language))


    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                 action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))

    return itemlist
예제 #2
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'\n|\r|\t|&nbsp;|<br>||<br/>', "", data)
    data = scrapertools.find_single_match(data, 'var videos =(.*?)\}')
    patron = 'src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for url in matches:
        url = url.replace("cloud/index.php", "cloud/query.php")
        if "/player.php" in url:
            data = httptools.downloadpage(url).data
            phantom = scrapertools.find_single_match(
                data, 'Phantom.Start\("(.*?)"\)')
            phantom = phantom.replace('"+"', '')
            import base64
            packed = base64.b64decode(phantom).decode("utf8")
            unpacked = jsunpack.unpack(packed)
            url = scrapertools.find_single_match(unpacked, '"src","([^"]+)"')
            if not url.startswith("https"):
                url = "https:%s" % url
        itemlist.append(
            item.clone(title="%s",
                       url=url,
                       action='play',
                       language='VO',
                       contentTitle=item.contentTitle))
    itemlist = servertools.get_servers_itemlist(itemlist,
                                                lambda x: x.title % x.server)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    return itemlist
예제 #3
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron  = 'player-option-\d+.*?'
    patron += 'data-sv=(\w+).*?'
    patron += 'data-user="******"]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    headers = {"X-Requested-With":"XMLHttpRequest"}
    for scrapedserver, scrapeduser in matches:
        data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
        data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
        url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
        url1 = devuelve_enlace(url)
        if url1:
            itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
    tmdb.set_infoLabels(itemlist)
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
        itemlist.append(
            item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                action="add_pelicula_to_library"))
    autoplay.start(itemlist, item)
    return itemlist
예제 #4
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup, matches = AlfaChannel.get_video_options(item.url)

    for elem in matches:
        data = AlfaChannel.get_data_by_post(elem).json
        itemlist.append(
            Item(channel=item.channel,
                 title='%s',
                 url=data['embed_url'],
                 action='play'))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
예제 #5
0
def play(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    url = scrapertools.find_single_match(
        data, '<iframe sandbox="[^"]+" src="([^"]+)"')
    pornstars = scrapertools.find_multiple_matches(
        data, '<h3 class="hireq">([^<]+)<')
    pornstar = ' & '.join(pornstars)
    pornstar = "[COLOR cyan]%s[/COLOR]" % pornstar
    lista = item.title.split()
    logger.debug(lista)
    if "HD" in item.title:
        lista.insert(5, pornstar)
    else:
        lista.insert(3, pornstar)
    item.title = ' '.join(lista)
    patron = '<iframe sandbox="[^"]+" src="([^"]+)"'
    itemlist.append(
        item.clone(action="play", title="%s", contentTitle=item.title,
                   url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    return itemlist
예제 #6
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    if "Próximamente" in data:
        itemlist.append(Item(channel=item.channel, title="Próximamente"))
        return itemlist
    patron = 'data-link="([^"]+).*?'
    patron += '>([^<]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, calidad in matches:
        itemlist.append(
            item.clone(
                channel=item.channel,
                action="play",
                title=calidad,
                fulltitle=item.title,
                contentThumbnail=item.thumbnail,
                url=url,
            ))
    try:
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass
    itemlist = servertools.get_servers_itemlist(itemlist)
    itemlist.append(Item(channel=item.channel))
    if config.get_videolibrary_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir a la videoteca",
                 text_color="green",
                 action="add_pelicula_to_library",
                 url=item.url,
                 thumbnail=item.thumbnail,
                 fulltitle=item.fulltitle))
    return itemlist
예제 #7
0
파일: cinetux.py 프로젝트: Jaloga/xiaomi
def play(item):
    if not item.spost:
        new_data = httptools.downloadpage(item.url,
                                          headers={
                                              'Referer': item.url
                                          }).data
        url = scrapertools.find_single_match(new_data,
                                             'id="link" href="([^"]+)"')
        item.url = get_url(url)
    else:
        post = item.spost
        new_data = httptools.downloadpage(CHANNEL_HOST +
                                          'wp-admin/admin-ajax.php',
                                          post=post,
                                          headers={
                                              'Referer': item.url
                                          }).data

        url = scrapertools.find_single_match(new_data, "src='([^']+)'")
        item.url = get_url(url)
    item.server = ""
    item = servertools.get_servers_itemlist([item])
    #item.thumbnail = item.contentThumbnail
    return item
예제 #8
0
파일: mariachi.py 프로젝트: Intel11/prueba
def mainlist(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(host).data
    logger.info("Intel11 %s" % data)
    patron = '(?is)<span>([^<]+)'
    patron += '.*?<a href="([^"]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for title, url in matches:
        episode = scrapertools.find_single_match(title, 'tulo (\w+)')
        itemlist.append(
            Item(action="play",
                 channel=item.channel,
                 infoLabels={
                     "season": 1,
                     "episode": episode
                 },
                 contentSerieName="El Mariachi",
                 title="El Mariachi - " + title,
                 url=url))
    tmdb.set_infoLabels(itemlist)
    itemlist = servertools.get_servers_itemlist(itemlist)
    scrapertools.printMatches(itemlist)
    return itemlist
예제 #9
0
def play(item):
    logger.info()
    itemlist = []
    soup = create_soup(item.url).find('div', class_='responsive-player')
    url = soup.iframe['src']
    if not "player-x.php" in url:
        itemlist.append(
            item.clone(action="play",
                       title="%s",
                       contentTitle=item.title,
                       url=url))
        itemlist = servertools.get_servers_itemlist(
            itemlist, lambda i: i.title % i.server.capitalize())
    else:
        soup = create_soup(url).find_all('source')
        logger.debug(soup)
        for elem in soup:
            url = elem['src']
            quality = elem['title']
            itemlist.append(['%s' % quality, url])
        itemlist.sort(key=lambda item: int(re.sub("\D", "", item[0])))
    # Requerido para AutoPlay
    # autoplay.start(itemlist, item)
    return itemlist
예제 #10
0
def findvideos(item):
    logger.info()
    itemlist = []
    if 'links' in item.url:
        for url in item.url['links']:
            quality, language, plot, poster = set_extra_values(url)
            title = ''
            title = set_title(title, language, quality)

            itemlist.append(
                Item(channel=item.channel,
                     title=format_title('%s' + title),
                     url=url['url'],
                     action='play',
                     quality=quality,
                     language=language,
                     infoLabels=item.infoLabels))

        itemlist = servertools.get_servers_itemlist(
            itemlist, lambda i: i.title % i.server.capitalize())

        autoplay.start(itemlist, item)

        return itemlist
예제 #11
0
파일: danimados.py 프로젝트: pipcat/addon
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron = 'data-type="(tv).*?'
    patron += 'data-post="([^"]+).*?'
    patron += 'data-nume="([^"]+).*?'
    patron += 'server">([^<]+).*?'
    matches = scrapertools.find_multiple_matches(data, patron)
    headers = {"X-Requested-With": "XMLHttpRequest"}
    for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
        post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" % (
            scrapedtype, scrapedpost, scrapednume)
        data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php",
                                       headers=headers,
                                       post=post).data
        url1 = scrapertools.find_single_match(data1, "src='([^']+)")
        url1 = devuelve_enlace(url1)
        if url1:
            itemlist.append(
                item.clone(title="Ver en %s", url=url1, action="play"))
    tmdb.set_infoLabels(itemlist)
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    if config.get_videolibrary_support() and len(
            itemlist
    ) > 0 and item.contentType == "movie" and item.contentChannel != 'videolibrary':
        itemlist.append(
            item.clone(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library"))
    autoplay.start(itemlist, item)
    return itemlist
예제 #12
0
def findvideos(item):
    logger.info()

    itemlist = list()

    soup, matches = AlfaChannel.get_video_options(item.url)

    for elem in matches:
        if elem["data-nume"] == "trailer":
            continue
        lang = elem.find("span", class_="flag").img["src"]
        lang = scrapertools.find_single_match(lang, "/flags/([^.]+).")
        data = AlfaChannel.get_data_by_post(elem).json
        url = data.get("embed_url", "")
        if not url or "youtube" in url:
            continue

        itemlist.append(Item(channel=item.channel, title='%s', action='play', url=url,
                            language=IDIOMAS.get(lang, "VOSE"), infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                             url=item.url, action="add_pelicula_to_library", extra="findvideos",
                             contentTitle=item.contentTitle))

    return itemlist
예제 #13
0
def findvideos(item):
    logger.info()
    itemlist = []
    infoLabels = item.infoLabels
    data = httptools.downloadpage(item.url).data
    patron = '<iframe src="([^"]+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url in matches:
        itemlist.append(
            item.clone(action="play",
                       infoLabels=infoLabels,
                       title="Ver en %s",
                       url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    tmdb.set_infoLabels(itemlist, __modo_grafico__)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if itemlist and item.contentChannel != "videolibrary":
        itemlist.append(Item(channel=item.channel))
        # Opción "Añadir esta película a la biblioteca de KODI"
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=item.url,
                     thumbnail=item.thumbnail,
                     contentTitle=item.contentTitle))
    return itemlist
예제 #14
0
파일: doomtv.py 프로젝트: pcjavyjavy/addon
def findvideos(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)

    patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, urls in matches:
        if 'http' not in urls:
            urls = 'https:' + urls
        new_item = Item(
            channel=item.channel,
            url=urls,
            title=item.title,
            contentTitle=item.title,
            action='play',
        )
        itemlist.append(new_item)
    itemlist = servertools.get_servers_itemlist(itemlist)

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(
                channel=item.channel,
                title=
                '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                url=item.url,
                action="add_pelicula_to_library",
                extra="findvideos",
                contentTitle=item.contentTitle,
            ))

    return itemlist
예제 #15
0
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data
    patron = '(?s)fmi(.*?)thead'
    bloque = scrapertools.find_single_match(data, patron)
    match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
    for url in match:
        titulo = "Ver en: %s"
        if "youtube" in url:
            titulo = "[COLOR = yellow]Ver trailer: %s[/COLOR]"
        if "ad.js" in url or "script" in url:
            continue
        elif "vimeo" in url:
            url += "|" + "http://www.allcalidad.com"
        itemlist.append(
                 Item(channel = item.channel,
                 action = "play",
                 title = titulo,
                 fulltitle = item.fulltitle,
                 thumbnail = item.thumbnail,
                 server = "",
                 url = url
                 ))
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    if itemlist:
        itemlist.append(Item(channel = item.channel))
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))
        # Opción "Añadir esta película a la biblioteca de KODI"
        if item.extra != "library":
            if config.get_videolibrary_support():
                itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                     filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                     infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                                     extra="library"))
    return itemlist
예제 #16
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    links_data = scrapertools.find_single_match(
        data, '<div id="pettabs">(.*?)</ul>')
    patron = 'href="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(links_data)
    for url in matches:
        itemlist.append(
            Item(channel=item.channel,
                 title='%s',
                 url=url,
                 action='play',
                 language='VO',
                 contentTitle=item.contentTitle))
    itemlist = servertools.get_servers_itemlist(itemlist,
                                                lambda x: x.title % x.server)
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
예제 #17
0
파일: cinetux.py 프로젝트: x7r6xx/repo
def play(item):
    logger.info()
    itemlist = []
    if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
        data = httptools.downloadpage(item.url,
                                      headers={
                                          'Referer': item.extra
                                      }).data.replace("\\", "")
        id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
        item.url = "http://docs.google.com/get_video_info?docid=" + id
        if item.server == "okru":
            item.url = "https://ok.ru/videoembed/" + id
        if item.server == "youtube":
            item.url = "https://www.youtube.com/embed/" + id
    elif "links" in item.url or "www.cinetux.me" in item.url:
        data = httptools.downloadpage(item.url).data
        scrapedurl = scrapertools.find_single_match(data,
                                                    '<a href="(http[^"]+)')
        if scrapedurl == "":
            scrapedurl = scrapertools.find_single_match(
                data, '(?i)frame.*?src="(http[^"]+)')
            if scrapedurl == "":
                scrapedurl = scrapertools.find_single_match(
                    data, 'replace."([^"]+)"')
        elif "goo.gl" in scrapedurl:
            scrapedurl = httptools.downloadpage(scrapedurl,
                                                follow_redirects=False,
                                                only_headers=True).headers.get(
                                                    "location", "")
        item.url = scrapedurl
    item.server = ""
    itemlist.append(item.clone())
    itemlist = servertools.get_servers_itemlist(itemlist)
    for i in itemlist:
        i.thumbnail = i.contentThumbnail
    return itemlist
예제 #18
0
def findvideos(item):
    logger.info()
    itemlist = []
    serv=[]
    data = httptools.downloadpage(item.url).data
    output = scrapertools.find_single_match(data, 'var output = "(.*?)output ').replace("\\", "")
    output = output.split(";")
    quality = scrapertools.find_single_match(data, "<strong>Calidad: </strong> (\d+)p<")
    online = scrapertools.find_single_match(data, '<div class="centradito"><script>[A-z0-9]+ \(([^\)]+)')
    online = online.replace('"', '').split(",")
    for elem in output:
        if "href" in elem :
            ref = scrapertools.find_single_match(elem, 'href="([^"]+)"')
            id = scrapertools.find_single_match(elem, 'codigo(\d+)')
            if id:
                id = (int(id)-1)
            if "codigo" in ref:
                url = online[id]
            if not "no.html" in ref:
                url = "%s%s" %(ref, online[id])
            itemlist.append(item.clone(action="play", title= "%s", contentTitle = item.title, url=url))
    descarga = scrapertools.find_single_match(data, "var abc = '([^']+)'")
    itemlist.append(item.clone(action="play", title= "%s", contentTitle = item.title, url=descarga))
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para Filtrar enlaces
    if __comprueba_enlaces__:
        itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)

    if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
        itemlist.append(item.clone(action="add_pelicula_to_library", 
                             title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
                             extra="findvideos", contentTitle=item.contentTitle)) 
    return itemlist
예제 #19
0
def findvideos(item):

    itemlist = []
    data=get_source(item.url)
    patron = '<a href=(/reproductor.*?)target'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for link in matches:
        video_data = get_source(host+link)
        language = ''
        if 'latino' in link.lower():
            language='Latino'
        elif 'español' in link.lower():
            language = 'Español'
        elif 'subtitulado' in link.lower():
            language = 'VOSE'
        elif 'vo' in link.lower():
            language = 'VO'

        url = scrapertools.find_single_match(video_data, '<iframe src=(.*?) scrolling')
        title = '%s [%s]'

        itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,
                             infoLabels=item.infoLabels))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
예제 #20
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    #item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
    #item.plot = scrapertools.htmlclean(item.plot).strip()
    #item.contentPlot = item.plot
    patron = '<strong>Ver película online.*?>.*?>([^<]+)'
    scrapedopcion = scrapertools.find_single_match(data, patron)
    titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
    bloque  = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')
    cuenta = 0
    for datos in bloque:
        cuenta = cuenta + 1
        patron = '<em>((?:opción|opción) %s.*?)</em>' %cuenta
        scrapedopcion = scrapertools.find_single_match(data, patron)
        titulo_opcion = "(" + scrapertools.find_single_match(scrapedopcion, "op.*?, (.*)").upper() + ")"
        if "TRAILER" in titulo_opcion or titulo_opcion == "()":
            titulo_opcion = "(" + titulo_opcional + ")"
        urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)')
        titulo = "Ver en %s " + titulo_opcion
        for url in urls:
            itemlist.append(item.clone(action = "play",
                                 title = titulo,
                                 url = url
                                 ))
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    #tmdb.set_infoLabels_itemlist(itemlist, True)
    if itemlist:
        if config.get_videolibrary_support():
                itemlist.append(Item(channel = item.channel, action = ""))
                itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                     action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                     contentTitle = item.contentTitle
                                     ))
    return itemlist
예제 #21
0
def findvideos(item):
    logger.info()
    itemlist = []
    # Descarga la pagina
    data = httptools.downloadpage(item.url).data
    patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedurl, scrapedidioma, scrapedcalidad in matches:
        title = "%s [" + scrapedcalidad + "][" + scrapedidioma + "]"
        quality = scrapedcalidad
        language = scrapedidioma
        if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
            itemlist.append(
                item.clone(channel=item.channel,
                           action="play",
                           title=title,
                           fulltitle=item.title,
                           url=scrapedurl,
                           quality=quality,
                           language=language,
                           extra=item.thumbnail))
    tmdb.set_infoLabels(itemlist, True)
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Opción "Añadir esta película a la biblioteca de KODI"
    if item.extra != "library":
        if config.get_videolibrary_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir a la videoteca",
                     text_color="green",
                     action="add_pelicula_to_library",
                     url=item.url,
                     thumbnail=item.thumbnail,
                     fulltitle=item.title))
    return itemlist
예제 #22
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
    patron = ' - on ([^"]+)" href="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedtitle, url in matches:
        # if "streamz" in url:
        # url = url.replace("streamz.cc", "stream2.vg").replace("streamz.vg", "stream2.vg")
        if not "vidup" in url and not "vev.io/" in url:
            itemlist.append(
                item.clone(action="play",
                           title="%s",
                           contentTitle=item.title,
                           url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
예제 #23
0
파일: seriesblanco.py 프로젝트: x7r6xx/repo
def findvideos(item):
    logger.info("%s = %s" % (item.show, item.url))

    # Descarga la página
    data = httptools.downloadpage(item.url).data
    # logger.info(data)

    online = extract_videos_section(data)
    try:
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_enlaces = 2

    list_links = []

    if filtro_enlaces != 0:
        list_links.extend(parse_videos(item, "Ver", online[-2]))

    if filtro_enlaces != 1:
        list_links.extend(parse_videos(item, "Descargar", online[-1]))
    list_links = filtertools.get_links(list_links, item, list_idiomas,
                                       CALIDADES)

    for i in range(len(list_links)):
        a = list_links[i].title
        b = a[a.find("en") + 2:]
        c = b.split('[')
        d = c[0].rstrip()
        d = d.lstrip()
        list_links[i].server = d.replace("streamix", "streamixcloud")
        list_links[i].server = d.replace("uploaded", "uploadedto")

    list_links = servertools.get_servers_itemlist(list_links)
    autoplay.start(list_links, item)

    return list_links
예제 #24
0
def findvideos(item):
    logger.info()
    itemlist = []

    tmdb.set_infoLabels_item(item, __modo_grafico__)
    data = httptools.downloadpage(item.url).data

    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>')
    fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"')
    if not item.fanart and fanart:
        item.fanart = fanart

    patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for tipo, source, title in matches:
        if tipo == "trailer":
            continue
        post = "source=%s&action=obtenerurl" % urllib.quote(source)
        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
        data_url = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post, headers=headers).data
        url = jsontools.load(data_url).get("url")

        if 'openload' in url:
            url = url + '|' + item.url

        title = "%s - %s" % ('%s', title)
        itemlist.append(item.clone(action="play", url=url, title=title, text_color=color3))

    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

    if item.extra != "findvideos" and config.get_videolibrary_support():
        itemlist.append(item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library",
                                   extra="findvideos", text_color="green"))

    return itemlist
예제 #25
0
def play(item):
    logger.info()

    itemlist = list()

    doo_url = "%swp-admin/admin-ajax.php" % host
    data = httptools.downloadpage(doo_url,
                                  post=item.post,
                                  headers={
                                      "referer": item.ref
                                  }).data
    try:
        url = BeautifulSoup(data, "html5lib").find("iframe")["src"]
    except:
        return

    if not url.startswith("http"):
        url = "https:%s" % url

    itemlist.append(item.clone(url=url, server=''))

    itemlist = servertools.get_servers_itemlist(itemlist)

    return itemlist
예제 #26
0
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
    patron = '- on ([^"]+)" href="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedtitle, url in matches:
        if "tk/goto/" in url:
            n = 3
            while n > 0:
                url = url.replace("https://vshares.tk/goto/", "").replace(
                    "https://waaws.tk/goto/",
                    "").replace("https://openloads.tk/goto/", "")
                logger.debug(url)
                url = base64.b64decode(url)
                n -= 1
        if "mangovideo" in url:  #Aparece como directo
            data = httptools.downloadpage(url).data
            patron = 'video_url: \'function/0/https://mangovideo.pw/get_file/(\d+)/\w+/(.*?)/\?embed=true\''
            matches = scrapertools.find_multiple_matches(data, patron)
            for scrapedtitle, url in matches:
                if scrapedtitle == "1":
                    scrapedtitle = "https://www.mangovideo.pw/contents/videos/"
                if scrapedtitle == "7":
                    scrapedtitle = "https://server9.mangovideo.pw/contents/videos/"
                if scrapedtitle == "8":
                    scrapedtitle = "https://s10.mangovideo.pw/contents/videos/"
                if scrapedtitle == "10":
                    scrapedtitle = "https://server217.mangovideo.pw/contents/videos/"
                if scrapedtitle == "11":
                    scrapedtitle = "https://234.mangovideo.pw/contents/videos/"
                url = scrapedtitle + url
        itemlist.append(item.clone(action="play", title="%s", url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    return itemlist
예제 #27
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
    patron = 'onClick="toplay\((.*?)\).*?>Mirror'
    matches = re.compile(patron,re.DOTALL).findall(data)
    for url in matches:
        url = url.replace("'", "").split(",")
        url = "http://www.veporns.com/ajax.php?page=video_play&thumb=%s&theme=%s&video=%s&id=%s&catid=%s&tip=%s&server=%s" %(url[0],url[1],url[2],url[3],url[4],url[5],str(url[6]))
        headers = {"X-Requested-With":"XMLHttpRequest"}
        data = httptools.downloadpage(url, headers=headers).data
        logger.debug(data)
        url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
        if not url:
            url = scrapertools.find_single_match(data, "<iframe src='([^']+)'")
      
        itemlist.append(item.clone(action="play", title= "%s", contentTitle= item.title, url=url))
    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
예제 #28
0
def findvideos(item):
    logger.info()
    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
    data = scrapertools.find_single_match(
        data, '<div class="entry-content post_content">(.*?)</div>')
    patron = '<(?:iframe src|IFRAME SRC|a href)="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for url in matches:
        if not "0load" in url:  #NETU
            itemlist.append(
                item.clone(action="play",
                           title="%s",
                           contentTitle=item.title,
                           url=url))
    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda i: i.title % i.server.capitalize())
    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language,
                                     list_quality)
    # Requerido para AutoPlay
    autoplay.start(itemlist, item)
    return itemlist
예제 #29
0
파일: animejl.py 프로젝트: shlibidon/addon
def findvideos(item):
    logger.info()

    itemlist = []

    data = get_source(item.url)
    patron = 'video\[\d+\] = \'<iframe.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl in matches:
        #enc_url = scrapertools.find_single_match(scrapedurl, r'hs=(.*)$')
        #url = urllib.unquote(base64.b64decode(rot13(enc_url)))
        #if url != '':
        if scrapedurl != '':
            itemlist.append(
                Item(channel=item.channel,
                     title='%s',
                     url=scrapedurl,
                     action='play'))

    itemlist = servertools.get_servers_itemlist(
        itemlist, lambda x: x.title % x.server.capitalize())

    return itemlist
예제 #30
0
파일: allcalidad.py 프로젝트: Jaloga/xiaomi
def findvideos(item):
    itemlist = []
    data = httptools.downloadpage(item.url).data

    patron = '<a href="([^"]+)" class="btn btn-xs btn-info".*?<span>([^<]+)</span>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for url, srv in matches:

        new_item= Item(channel=item.channel, url=url, title='%s', action="play", infoLables=item.infoLabels,
                       language="Latino")
        if "torrent" in srv.lower():
            new_item.server = "Torrent"
        itemlist.append(new_item)

    itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

    # Requerido para FilterTools
    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    if itemlist and item.contentChannel != "videolibrary":
        itemlist.append(Item(channel=item.channel))
        itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                                   text_color="magenta"))

        # Opción "Añadir esta película a la biblioteca de KODI"
        if config.get_videolibrary_support():
            itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
                                 action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
                                 contentTitle = item.contentTitle
                                 ))
    return itemlist