예제 #1
0
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \
             '"([^"]+)" season="' + \
             item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = "Episodio "+episode+" ["
            titulo += server.capitalize()+"]   ["+idioma+"] ("+calidad_videos.get(quality)+")"
            item.infoLabels['episode'] = episode

            itemlist.append(item.clone(action="play", title=titulo, url=url))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \
             '"([^"]+)" season="'+item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "Episodio "+episode+" "
                titulo += server.capitalize()+"   ["+idioma+"] ("+calidad_videos.get(quality)+")"
                item.infoLabels['episode'] = episode
                itemlist.append(item.clone(action="play", title=titulo, url=url))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
예제 #2
0
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="' + \
             str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'

    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"

            itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode"))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"
                itemlist.append(item.clone(action="play", title=titulo, url=url))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
예제 #3
0
def play(item):
    logger.info()
    itemlist = []

    if 'ecrypt?nombre=' in item.url:
        partes = item.url.split('?nombre=')
        data = httptools.downloadpage(partes[0],
                                      post=urllib.urlencode(
                                          {'nombre': partes[1]})).data
        # ~ logger.debug(data)

        url = scrapertools.find_single_match(data, '(?i) src="([^"]+)"')
        if not url:
            url = scrapertools.find_single_match(data, "(?i) src='([^']+)'")
        if url:
            # ~ logger.info(url)
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                itemlist.append(item.clone(url=url, server=servidor))

    elif item.url.startswith(host):
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)

        url = scrapertools.find_single_match(data, "window.location='([^']+)")
        if not url:
            url = scrapertools.find_single_match(data,
                                                 'window.location="([^"]+)')
        # ~ logger.info(url)

        # Descartar o resolver acortadores
        if url.startswith('https://adf.ly/'):
            url = scrapertools.decode_adfly(url)
            if url:
                servidor = servertools.get_server_from_url(url)
                if servidor == 'directo':
                    return itemlist  # si no encuentra el server o está desactivado
        elif url.startswith('http://uii.io/'):
            url = scrapertools.decode_uiiio(url)
        elif url.startswith('http://srt.am/'):
            url = scrapertools.decode_srtam(url)

        if url:
            itemlist.append(item.clone(url=url))

    else:
        itemlist.append(item.clone())

    return itemlist
예제 #4
0
파일: dospelis.py 프로젝트: Jaloga/xiaomi
def play(item):
    logger.info()
    itemlist = []

    if item.url:
        data = do_downloadpage(item.url)
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, '<a id="link" href="([^"]+)')
        if url: 
            itemlist.append(item.clone( url=servertools.normalize_url(item.server, url) ))

    else:
        post = urllib.urlencode( {'action': 'doo_player_ajax', 'post': item.dpost, 'nume': item.dnume, 'type': item.dtype} )
        data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.referer}).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, "src='([^']+)")
        if not url: url = scrapertools.find_single_match(data, 'src="([^"]+)')
        if url: 
            if 'jwplayer' in url and 'source=' in url: # Ej: https://www.dospelis.online/jwplayer-2/?source=https%3A%2F%2Fyoutu.be%2Fzcn89lxhEWk&id=71977&type=mp4
                url = urllib.unquote(scrapertools.find_single_match(url, "source=([^&']+)"))
            elif 'streamcrypt.net/' in url: # Ej: https://streamcrypt.net/embed/streamz.cc/...
                url = scrapertools.decode_streamcrypt(url)

            if not url: return itemlist
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone( url=url, server=servidor ))

    return itemlist
예제 #5
0
def download_from_best_server(item, ask=False):
    logger.info(
        "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url))
    result = {"downloadStatus": STATUS_CODES.error}

    progreso = platformtools.dialog_progress("Download", "Recupero l'elenco dei server disponibili...")

    channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])

    progreso.update(50, "Recupero l'elenco dei server disponibili.", "Connessione a %s..." % item.contentChannel)
    if hasattr(channel, item.contentAction):
        play_items = getattr(channel, item.contentAction)(
            item.clone(action=item.contentAction, channel=item.contentChannel))
    else:
        play_items = servertools.find_video_items(item.clone(action=item.contentAction, channel=item.contentChannel))

    play_items = filter(lambda x: x.action == "play", play_items)

    progreso.update(100, "Recupero l'elenco dei server disponibili.", "Server disponibili: %s" % len(play_items),
                    "Identifico i server...")

    for i in play_items:
        if not i.server:
            i.server = servertools.get_server_from_url(i.url)
            if progreso.iscanceled():
                return {"downloadStatus": STATUS_CODES.canceled}

    play_items.sort(key=sort_method)

    if progreso.iscanceled():
        return {"downloadStatus": STATUS_CODES.canceled}

    progreso.close()

    if not ask:
        # Recorremos el listado de servers, hasta encontrar uno que funcione
        for play_item in play_items:
            play_item = item.clone(**play_item.__dict__)
            play_item.contentAction = play_item.action
            play_item.infoLabels = item.infoLabels

            result = download_from_server(play_item)

            if progreso.iscanceled():
                result["downloadStatus"] = STATUS_CODES.canceled

            # Tanto si se cancela la descarga como si se completa dejamos de probar mas opciones
            if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]:
                break
    else:
        seleccion = platformtools.dialog_select("Selezionare il server", [s.title for s in play_items])
        if seleccion > -1:
            play_item = item.clone(**play_items[seleccion].__dict__)
            play_item.contentAction = play_item.action
            play_item.infoLabels = item.infoLabels
            result = download_from_server(play_item)
        else:
            result["downloadStatus"] = STATUS_CODES.canceled

    return result
예제 #6
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data

    data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
    patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, url in matches:
        lang = scrapertools.find_single_match(
            data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
        lang = lang.replace('Español ', '').replace('B.S.O. ', '')

        server = servertools.get_server_from_url(url)
        title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
        itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
                                   server=server, text_color=color3))

    itemlist.append(Item(channel=item.channel,
                         title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                         url=item.url, action="add_pelicula_to_library",
                         thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
                         extra="findvideos", contentTitle=item.contentTitle))

    return itemlist
예제 #7
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    url = scrapertools.find_single_match(data, '<p><iframe src="([^"]+)')
    if not url:
        url = scrapertools.find_single_match(
            data, '<div class="single_player">\s*<iframe.*? src="([^"]+)')
    if not url:
        url = scrapertools.find_single_match(
            data, '<div class="single_player">\s*<a href="([^"]+)')
    if not url:
        url = scrapertools.find_single_match(
            data, '<p>\[\w+\]([^\[]+)'
        )  #Ex: <p>[vimeo]https://vimeo.com/...[/vimeo]</p>
    if url:
        url = url.replace('&#038;', '&').replace('&amp;', '&')
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            itemlist.append(
                Item(channel=item.channel,
                     action='play',
                     server=servidor,
                     title=servidor.capitalize(),
                     url=url))

    return itemlist
예제 #8
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data

    patron = '<tr><td> <a class="link_a" href="([^"]+)".*?<td> (.*?)</td><td> (.*?)</td><td> (.*?)</td>'
    matches = scrapertools.find_multiple_matches(data, patron)

    for url, server, calidad, idioma in matches:
        server = servertools.get_server_from_url(url)
        title = '%s [%s] [%s] [%s]' % (item.contentTitle, server, calidad,
                                       idioma)
        itemlist.append(
            item.clone(action="play",
                       title=title,
                       fulltitle=item.title,
                       url=url,
                       language=idioma,
                       contentTitle=item.contentTitle,
                       quality=calidad,
                       server=server))

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Agregar esta pelicula a la Videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))
    return itemlist
예제 #9
0
def findvideos(item):
    logger.info()
    itemlist = []
    
    IDIOMAS = {'Español': 'Esp', 'Latino': 'Lat', 'Subtitulado': 'VOSE'}

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Enlaces en embeds
    patron = '<a href="#embed\d+" data-src="([^"]+)" class="([^"]+)"(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for urlcod, lang, resto in matches:
        if urlcod.startswith('//'): urlcod = 'https:' + urlcod
        elif urlcod.startswith('/'): urlcod = HOST + urlcod[1:]
        cod = urlcod.replace(HOST + 'replayer/', '').split('RRRRR')[0]
        # ~ logger.info('%s %s' % (cod, urlcod))
        numpad = len(cod) % 4
        if numpad > 0: cod += 'R' * (4 - numpad)
        try:
            url = base64.b64decode(cod)
            if numpad > 0: url = url[:-(4 - numpad)]
        except:
            url = None
        if not url: 
            logger.info('No detectada url. %s %s' % (cod, urlcod))
            continue
        
        servidor = servertools.get_server_from_url(url)
        if not servidor or (servidor == 'directo' and 'storage.googleapis.com/' not in url): 
            logger.info('No detectado servidor, url: %s' % url)
            continue
        url = servertools.normalize_url(servidor, url)

        qlty = scrapertools.find_single_match(resto, '([^>]+)</div>$')

        itemlist.append(Item( channel = item.channel, action = 'play', server = servidor,
                              title = '', url = url, 
                              language = IDIOMAS.get(lang, lang), quality = qlty, quality_num = puntuar_calidad(qlty), other='e'
                       ))

    # Enlaces en descargas
    bloque = scrapertools.find_single_match(data, 'id="dlnmt"(.*?)</table>')
    matches = re.compile('<tr>(.*?)</tr>', re.DOTALL).findall(bloque)
    for lin in matches:
        if '<th' in lin: continue
        tds = scrapertools.find_multiple_matches(lin, '<td[^>]*>(.*?)</td>')
        url = scrapertools.find_single_match(tds[0], ' href="([^"]+)')
        servidor = scrapertools.find_single_match(tds[1], '<span>(.*?)</span>')
        lang = tds[2]
        qlty = tds[3]
        if '/link/?go=' in url: url = url.split('/link/?go=')[1]
        if not url or not servidor: continue

        itemlist.append(Item( channel = item.channel, action = 'play', server = servertools.corregir_servidor(servidor),
                              title = '', url = url, 
                              language = IDIOMAS.get(lang, lang), quality = qlty, quality_num = puntuar_calidad(qlty), other='d'
                       ))

    return itemlist
예제 #10
0
파일: zoowoman.py 프로젝트: Jaloga/xiaomi
def extraer_embeds(data):
    itemlist = []

    for tipo in ['iframe', 'source']:
        matches = scrapertools.find_multiple_matches(
            data, '<%s.*? src="([^"]+)' % tipo)
        for url in matches:
            if 'facebook.com' in url or 'twitter.com' in url or 'google.com' in url:
                continue
            if url.startswith('//'): url = 'https:' + url
            if '.us.archive.org' in url: servidor = 'directo'
            else:
                servidor = servertools.get_server_from_url(url)
                if not servidor or servidor == 'directo': continue
                url = servertools.normalize_url(servidor, url)

            itemlist.append(
                Item(channel='zoowoman',
                     action='play',
                     server=servidor,
                     language='?',
                     title='',
                     url=url,
                     other='iframe/source'))

    return itemlist
예제 #11
0
def play(item):
    logger.info()
    if item._post:
        post = urllib.urlencode(item._post)
        try:
            data = httptools.downloadpage(item.url,
                                          post=post,
                                          headers=item._ref).json
            item.url = data.get('url', '')

            if 'peliculonhd' in item.url:
                url = item.url.replace('embed/', 'hls/')
                if not url.endswith('.m3u8'):
                    url += '.m3u8'
                data = httptools.downloadpage(url).data
                new_url = scrapertools.find_single_match(data, '(/mpegURL.*)')
                item.url = 'https://videos.peliculonhd.com%s' % new_url
                return [item]

            item.server = servertools.get_server_from_url(item.url)

        except:
            logger.error('Error get link %s' % item.url)
            item.url = ''

    return [item]
예제 #12
0
def play(item):
    logger.info()
    itemlist = []

    post = urllib.urlencode( {'action': 'doo_player_ajax', 'post': item.dpost, 'nume': item.dnume, 'type':'movie'} )
    data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
    # ~ logger.debug(data)

    url = scrapertools.find_single_match(data, "src='([^']+)'")

    if url.startswith(host):
        locationurl = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get('location', '')
        if locationurl != '':
            try:
                y = scrapertools.find_single_match(locationurl, "y=([^&]+)")
                if y: url = base64.b64decode(y)
                else: url = locationurl
            except:
                url = locationurl

    elif url.startswith('https://hideiframe.site/protect.php?'):
        y = scrapertools.find_single_match(url, 'y=([^&]+)')
        if y: url = base64.b64decode(y)
        else: url = ''

    if url != '': 
        servidor = servertools.get_server_from_url(url)
        if servidor == 'directo': return itemlist # si no encuentra el server o está desactivado

        itemlist.append(item.clone(url = url, server = servidor))

    return itemlist
예제 #13
0
파일: aniyet.py 프로젝트: shlibidon/addon
def play(item):
    logger.info()
    itemlist = list()

    soup = create_soup(item.url).find("div", class_="TPlayerTb", id=item.opt)
    url = scrapertools.find_single_match(str(soup), 'src="([^"]+)"')
    url = scrapertools.decodeHtmlentities(url).replace("&#038;", "&")
    data = httptools.downloadpage(url,
                                  headers={
                                      "referer": item.url
                                  },
                                  follow_redirects=False).data
    id = scrapertools.find_single_match(data, '<iframe.*?tid=([^&]+)&')
    hide = "https://aniyet.com/?trhide=1&trhex=%s" % id[::-1]
    referer = "https://aniyet.com/?trhide=1&tid=%s" % id
    data = httptools.downloadpage(hide,
                                  headers={"referer": referer},
                                  follow_redirects=False)
    url = data.headers.get('location', '')
    if 'danimados' in url:
        data = httptools.downloadpage('https:' + url).data

        if item.server in ['sendvid']:
            url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
        else:
            url = scrapertools.find_single_match(
                data, 'sources: \[{file: "([^"]+)"')

    srv = servertools.get_server_from_url(url)
    itemlist.append(item.clone(url=url, server=srv))

    return itemlist
예제 #14
0
파일: cinetux.py 프로젝트: vguardiola/addon
def play(item):
    logger.info()
    itemlist = []
    if "api.cinetux" in item.url or item.server == "okru":
        data = httptools.downloadpage(item.url,
                                      headers={
                                          'Referer': item.extra
                                      }).data.replace("\\", "")
        id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
        item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
        if item.server == "okru":
            item.url = "https://ok.ru/videoembed/" + id
    elif "links" in item.url or "www.cinetux.me" in item.url:
        data = httptools.downloadpage(item.url).data
        scrapedurl = scrapertools.find_single_match(data,
                                                    '<a href="(http[^"]+)')
        if scrapedurl == "":
            scrapedurl = scrapertools.find_single_match(
                data, '(?i)frame.*?src="(http[^"]+)')
            if scrapedurl == "":
                scrapedurl = scrapertools.find_single_match(
                    data, 'replace."([^"]+)"')
        elif "goo.gl" in scrapedurl:
            scrapedurl = httptools.downloadpage(scrapedurl,
                                                follow_redirects=False,
                                                only_headers=True).headers.get(
                                                    "location", "")
        item.url = scrapedurl
    item.thumbnail = item.contentThumbnail
    item.server = servertools.get_server_from_url(item.url)
    return [item]
예제 #15
0
def findvideos(item):

    itemlist = []
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
    logger.info(data)
    patron_todos = '<div class="video-embed">(.*?)</div>'
    data = scrapertools.find_single_match(data, patron_todos)
    patron = '<iframe src="[^"]+" data-lazy-src="([^"]+)".*?</iframe>'
    matches = scrapertools.find_multiple_matches(data, patron)

    for url in matches:
        title = item.title
        server = servertools.get_server_from_url(url)

        itemlist.append(
            item.clone(action='play',
                       title=title,
                       server=server,
                       mediatype='movie',
                       url=url))

    for videoitem in itemlist:
        videoitem.infoLabels = item.infoLabels
        videoitem.channel = __channel__
        videoitem.title = "%s [COLOR yellow](%s)[/COLOR]" % (item.title,
                                                             videoitem.server)

    return itemlist
예제 #16
0
def get_video_url(page_url, url_referer=''):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []
    
    vid = scrapertools.find_single_match(page_url, "embedy.cc/embed/([A-z0-9=]+)")
    if vid:
        data = httptools.downloadpage('https://embedy.cc/video.get/', post={'video':vid}, headers={'Referer': page_url}).data
        # ~ logger.debug(data)
        try:
            data_json = jsontools.load(data)
            for n in data_json['response']:
                for f in data_json['response'][n]['files']:
                    video_urls.append([f, data_json['response'][n]['files'][f]])
        except:
            pass

    if len(video_urls) == 0:
        data = httptools.downloadpage(page_url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, '<iframe.*? src="([^"]+)')
        
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo' and servidor != 'embedy': 
            url = servertools.normalize_url(servidor, url)
            server_module = __import__('servers.%s' % servidor, None, None, ["servers.%s" % servidor])
            return server_module.get_video_url(url)

    return video_urls
def search_links_filmaff(item):
    logger.info("fusionse.channels.trailertools search_links_filmaff")
    
    itemlist = []
    data = scrapertools.downloadpage(item.url)
    if not "iframe" in data:
        itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
    else:
        patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
        matches = scrapertools.find_multiple_matches(data, patron)
        for scrapedtitle, scrapedurl in matches:
            trailer_url = urlparse.urljoin("http:", scrapedurl).replace("embed/", "watch?v=")
            server = servertools.get_server_from_url(trailer_url)
            scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            scrapedtitle += "  [" + server + "]"
            if item.contextual:
                scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle            
            itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server,
                                       action="play", text_color="white"))

    if keyboard:
        title = "[COLOR green]%s[/COLOR]" if item.contextual else "%s"
        itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
                                   action="manual_search", thumbnail="", text_color="green", extra="filmaffinity"))
    return itemlist
예제 #18
0
파일: seriesflv.py 프로젝트: Jaloga/xiaomi
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    # ~ logger.debug(data)

    for tipo in ['Ver', 'Descargar']:
        bloque = scrapertools.find_single_match(data, '<div class="titles4 font4 bold">%s.*?<tbody>(.*?)</table>' % tipo)
        # ~ logger.debug(bloque)

        matches = scrapertools.find_multiple_matches(bloque, '<tr>(.*?)</tr>')
        for data_epi in matches:
            url = scrapertools.find_single_match(data_epi, ' data-enlace="([^"]+)')
            if url:
                server = servertools.get_server_from_url(url)
                if not server or server == 'directo': continue
                url = servertools.normalize_url(server, url)
            else:
                url = scrapertools.find_single_match(data_epi, ' href="([^"]+)')
                if url.startswith('/'): url = host + url[1:]
                server = scrapertools.find_single_match(data_epi, '\?domain=([^".]+)')
                server = normalize_server(server)

            # ~ logger.info('%s %s' % (server, url))
            if not url or not server: continue

            lang = scrapertools.find_single_match(data_epi, 'img/language/([^\.]+)')
            
            itemlist.append(Item( channel = item.channel, action = 'play', server = server,
                                  title = '', url = url, 
                                  language = IDIOMAS.get(lang, lang) #, other = tipo
                           ))

    return itemlist
예제 #19
0
def findvideos(item):
    logger.info()
    itemlist = []
    audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]',
             'sub': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'}
    data = httptools.downloadpage(item.url).data
    patron = '<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" ' \
             'width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/>&nbsp;(' \
             '[^<]+)<\/td>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    anterior = scrapertools.find_single_match(data,
                                              '<th scope="col"><a href="([^"]+)" rel="prev" '
                                              'class="local-link">Anterior</a></th>')
    siguiente = scrapertools.find_single_match(data,
                                               '<th scope="col"><a href="([^"]+)" rel="next" '
                                               'class="local-link">Siguiente</a></th>')

    for scrapedid, scrapedurl, scrapedserv in matches:
        url = scrapedurl
        server = servertools.get_server_from_url(url).lower()
        title = item.title + ' audio ' + audio[scrapedid] + ' en ' + server
        extra = item.thumbnail
        thumbnail = servertools.guess_server_thumbnail(server)

        itemlist.append(Item(channel=item.channel,
                             action="play",
                             title=title,
                             fulltitle=item.contentSerieName,
                             url=url,
                             thumbnail=thumbnail,
                             extra=extra,
                             language=IDIOMAS[scrapedid],
                             server=server,
                             ))
    if item.extra1 != 'capitulos':
        if anterior != '':
            itemlist.append(Item(channel=item.channel,
                                 action="findvideos",
                                 title='Capitulo Anterior',
                                 url=anterior,
                                 thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png'
                                 ))
        if siguiente != '':
            itemlist.append(Item(channel=item.channel,
                                 action="findvideos",
                                 title='Capitulo Siguiente',
                                 url=siguiente,
                                 thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
                                 ))

    # Requerido para FilterTools

    itemlist = filtertools.get_links(itemlist, item, list_language)

    # Requerido para AutoPlay

    autoplay.start(itemlist, item)

    return itemlist
예제 #20
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = do_downloadpage(item.url)

    # Fuentes de vídeo
    matches = scrapertools.find_multiple_matches(data, "(?i)<div class='pframe'><iframe.*?src=(?:'|\")([^'\"]+)")
    for url in matches:
        if 'youtube.com' in url: continue # trailers
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(Item( channel = item.channel, action = 'play', server = servidor, 
                                  title = '', url = url, language = 'Esp' ))

    # Descarga
    bloque = scrapertools.find_single_match(data, "<div id='download'(.*?)</table></div></div></div>")

    matches = scrapertools.find_multiple_matches(bloque, "<tr id='link-[^']+'>(.*?)</tr>")
    for enlace in matches:
        url = scrapertools.find_single_match(enlace, " href='([^']+)")
        servidor = corregir_servidor(scrapertools.find_single_match(enlace, "domain=(?:www.|dl.|)([^'.]+)"))
        # ~ logger.info('url: %s Servidor: %s' % (url,servidor))
        if not url or not servidor: continue
        quality = 'HD'; lang = 'Esp' # siempre tienen las mismas !?
        
        itemlist.append(Item( channel = item.channel, action = 'play', server = servidor, 
                              title = '', url = url,
                              language = lang, quality = quality , other = 'd'
                       ))

    return itemlist
예제 #21
0
def findvideos(item):
    logger.info()
    host = 'https://www.locopelis.tv/'
    itemlist = []
    new_url = get_link(get_source(item.url))
    new_url = get_link(get_source(new_url))
    video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
    new_url = '%s%s' % (host, 'playeropstream/api.php')
    post = {'h': video_id}
    post = urllib.urlencode(post)
    json_data = httptools.downloadpage(new_url, post=post).json
    url = json_data['url']
    server = servertools.get_server_from_url(url)
    title = '%s' % server
    itemlist.append(
        Item(channel=item.channel,
             title=title,
             url=url,
             action='play',
             server=server,
             infoLabels=item.infoLabels))

    if config.get_videolibrary_support(
    ) and len(itemlist) > 0 and item.extra != 'findvideos':
        itemlist.append(
            Item(channel=item.channel,
                 title=
                 '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                 url=item.url,
                 action="add_pelicula_to_library",
                 extra="findvideos",
                 contentTitle=item.contentTitle))

    return itemlist
def play(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    url = scrapertools.find_single_match(data, 'src="([^"]+)"')

    if '/flixplayer.' in url:
        data = httptools.downloadpage(url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, 'link":"([^"]+)"')

    elif host in url and '?h=' in url:
        fid = scrapertools.find_single_match(url, "h=([^&]+)")
        url2 = url.split('?h=')[0] + 'r.php'
        resp = httptools.downloadpage(url2,
                                      post='h=' + fid,
                                      headers={'Referer': url},
                                      follow_redirects=False)
        if 'location' in resp.headers: url = resp.headers['location']
        else: url = None

    if url:
        servidor = servertools.get_server_from_url(url)
        # ~ if servidor and servidor != 'directo': # descartado pq puede ser 'directo' si viene de flixplayer
        url = servertools.normalize_url(servidor, url)
        itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
예제 #23
0
def play(item):
    logger.info()
    itemlist = []

    if item.url.startswith(host):
        headers = {'Referer': item.referer}
        data = httptools.downloadpage(item.url, headers=headers).data
        # ~ logger.debug(data)

        wopen = scrapertools.find_single_match(
            data, 'onclick="window\.open\(([^\)]+)\);"')
        if wopen:
            url = scrapertools.find_single_match(data,
                                                 "%s\s*=\s*'([^']+)" % wopen)
        else:
            url = scrapertools.find_single_match(data,
                                                 "enlaceeee\s*=\s*'([^']+)")
            if not url:
                url = scrapertools.find_multiple_matches(
                    data, '<a id="link-redirect".*? href="([^"]+)')[-1]
        # ~ logger.debug(url)
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))
    else:
        itemlist.append(item.clone())

    return itemlist
예제 #24
0
파일: miradetodo.py 프로젝트: x7r6xx/repo
def findvideos(item):
    logger.info()
    url_list = []
    itemlist = []
    duplicados = []
    data = get_source(item.url)
    src = data
    patron = 'id=(?:div|player)(\d+)>.*?data-lazy-src=(.*?) scrolling'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for option, videoitem in matches:
        lang = scrapertools.find_single_match(
            src, '<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
        if 'audio ' in lang.lower():
            lang = lang.lower().replace('audio ', '')
            lang = lang.capitalize()

        data = get_source(videoitem)
        video_urls = scrapertools.find_multiple_matches(
            data, '<li><a href=(.*?)><span')
        for video in video_urls:
            video_data = get_source(video)
            if not 'fastplay' in video:
                new_url = scrapertools.find_single_match(
                    video_data, '<li><a href=(.*?srt)><span')
                data_final = get_source(new_url)
            else:
                data_final = video_data
            url = scrapertools.find_single_match(data_final,
                                                 'iframe src=(.*?) scrolling')
            quality = item.quality
            server = servertools.get_server_from_url(url)
            title = item.contentTitle + ' [%s] [%s]' % (server, lang)
            if item.quality != '':
                title = item.contentTitle + ' [%s] [%s] [%s]' % (server,
                                                                 quality, lang)

            if url != '':
                itemlist.append(
                    item.clone(title=title,
                               url=url,
                               action='play',
                               server=server,
                               language=lang))

    if item.infoLabels['mediatype'] == 'movie':
        if config.get_videolibrary_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
예제 #25
0
def play(item):
    logger.info()
    from core import servertools
    itemlist = []
    server = servertools.get_server_from_url(item.url)
    itemlist.append(item.clone(server=server))

    return itemlist
예제 #26
0
def findvideos(item):
    logger.info()
    itemlist = []

    IDIOMAS = {'es_la': 'Lat', 'es': 'Esp', 'vose': 'VOSE', 'en': 'VO'}

    if item.contentType == 'movie':
        if not item.id_pelicula:
            data = do_downloadpage(item.url)
            item.id_pelicula = scrapertools.find_single_match(
                data, 'Idpelicula\s*=\s*"([^"]+)')

        data = do_downloadpage(host + 'frm/obtener-enlaces-pelicula.php',
                               post={'pelicula': item.id_pelicula})
        # ~ logger.debug(data)
        enlaces = jsontools.load(data)
        for lang in enlaces:
            for it in enlaces[lang]:
                # ~ servidor = 'directo' if it['reproductor_nombre'] == 'SuperVideo' else it['reproductor_nombre'].lower()
                servidor = 'directo' if it['reproductor_nombre'] in [
                    'SuperVideo', 'FastPlayer'
                ] else it['reproductor_nombre'].lower()
                itemlist.append(
                    Item(channel=item.channel,
                         action='play',
                         server=servidor,
                         title='',
                         url='https://directv.clivertv.to/getFile.php?hash=' +
                         it['token'],
                         language=IDIOMAS.get(lang, lang),
                         other=it['reproductor_nombre']
                         if servidor == 'directo' else ''))

    else:
        data = do_downloadpage(item.url)
        # ~ logger.debug(data)
        data = scrapertools.find_single_match(
            data, 'data-numcap="%s" data-numtemp="%s"(.*?)>' %
            (item.contentEpisodeNumber, item.contentSeason))

        for opc in [
                'data-url-es', 'data-url-es-la', 'data-url-vose', 'data-url-en'
        ]:
            url = scrapertools.find_single_match(data, '%s="([^"]+)' % opc)
            if url:
                servidor = servertools.get_server_from_url(url)
                if not servidor or servidor == 'directo': continue
                lang = opc.replace('data-url-', '').replace('-', '_')
                itemlist.append(
                    Item(channel=item.channel,
                         action='play',
                         server=servidor,
                         title='',
                         url=url,
                         language=IDIOMAS.get(lang, lang)))

    return itemlist
def findvideos(item):
    logger.info()
    itemlist = []

    IDIOMAS = {'es': 'Esp', 'mx': 'Lat', 'en': 'VOSE'}

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Fuentes de vídeo
    bloque = scrapertools.find_single_match(
        data, "<ul id='playeroptionsul'(.*?)</ul>")

    matches = scrapertools.find_multiple_matches(
        bloque, "<li id='player-option-(\d+)'(.*?)</li>")
    for optnum, enlace in matches:
        # ~ logger.debug(enlace)

        lang = scrapertools.find_single_match(enlace,
                                              "/img/flags/([^.']+)").lower()

        bloque = scrapertools.find_single_match(
            data,
            "<div id='source-player-%s' class='source-box'><div class='pframe'>(.*?)</div></div>"
            % optnum)
        # ~ logger.debug(bloque)

        urls = scrapertools.find_multiple_matches(
            bloque, '(?i)<iframe.*? src=(?:"|\')([^"\']+)')
        if not urls:
            dtype = scrapertools.find_single_match(enlace,
                                                   "data-type='([^']+)")
            dpost = scrapertools.find_single_match(enlace,
                                                   "data-post='([^']+)")
            dnume = scrapertools.find_single_match(enlace,
                                                   "data-nume='([^']+)")
            if not dtype or not dpost or not dnume or dnume == 'trailer':
                continue
            urls = [get_url(dpost, dnume, dtype, item.url)]

        for url in urls:
            if not url: continue
            # ~ logger.info(url)
            servidor = servertools.get_server_from_url(url)
            if not servidor or servidor == 'directo': continue
            url = servertools.normalize_url(servidor, url)

            itemlist.append(
                Item(channel=item.channel,
                     action='play',
                     server=servidor,
                     title='',
                     url=url,
                     language=IDIOMAS.get(lang, lang)))

    return itemlist
def play(item):
    logger.info()
    itemlist = []

    if '/o.php?l=' in item.url:
        url = scrapertools.find_single_match(item.url, "/o\.php\?l=(.*)")
        for i in range(9):  # range(5)
            url = base64.b64decode(url)
            if url.startswith('http'): break
        if not url.startswith('http'): url = None
    else:
        item.url = item.url.replace('&#038;', '&')
        resp = httptools.downloadpage(item.url,
                                      headers={'Referer': item.referer},
                                      follow_redirects=False)
        if 'location' in resp.headers:
            url = resp.headers['location']
        else:
            # ~ logger.debug(resp.data)
            url = scrapertools.find_single_match(resp.data, "src='([^']+)")
            if not url:
                url = scrapertools.find_single_match(resp.data, 'src="([^"]+)')
            if not url:
                url = scrapertools.find_single_match(resp.data, 'src=([^ >]+)')
            if not url:
                url = scrapertools.find_single_match(resp.data,
                                                     '"embed_url":"([^"]+)')

    if 'stream-mx.com/' in url:
        fid = scrapertools.find_single_match(url, "id=([^&]+)")
        if not fid: return itemlist
        url = 'https://stream-mx.com/player.php?id=%s&v=2&ver=si' % fid
        data = httptools.downloadpage(url, headers={'Referer': item.url}).data
        # ~ logger.debug(data)
        bloque = scrapertools.find_single_match(data, '"sources":\s*\[(.*?)\]')
        for enlace in scrapertools.find_multiple_matches(bloque, "\{(.*?)\}"):
            v_url = scrapertools.find_single_match(enlace,
                                                   '"file":\s*"([^"]+)')
            if not v_url: continue
            v_type = scrapertools.find_single_match(enlace,
                                                    '"type":\s*"([^"]+)')
            if v_type == 'hls':
                itemlist.append(item.clone(url=v_url, server='m3u8hls'))
            else:
                v_lbl = scrapertools.find_single_match(enlace,
                                                       '"label":\s*"([^"]+)')
                itemlist.append([v_lbl, v_url])

    elif url:
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
예제 #29
0
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
    logger.info()
    lista_enlaces = []
    matches = []
    if type == "online"  : t_tipo = "Ver Online"
    if type == "descarga": t_tipo = "Descargar"
    data = data.replace("\n","")
    if type == "online":
        patron  = '(?is)#(option-[^"]+).*?png">([^<]+)'
        match = scrapertools.find_multiple_matches(data, patron)
        for scrapedoption, language in match:
            patron = '(?s)id="' + scrapedoption +'".*?metaframe.*?src="([^"]+)'
            url = scrapertools.find_single_match(data, patron)
            if "goo.gl" in url:
                url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location","")
            server = servertools.get_server_from_url(url)
            matches.append([url, server, "", language.strip(), t_tipo])
    bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
    bloque2 = bloque2.replace("\t","").replace("\r","")
    patron  = '(?s)optn" href="([^"]+)'
    patron += '.*?title="([^"]+)'
    patron += '.*?src.*?src="[^>]+"\s/>([^<]+)'
    patron += '.*?src="[^>]+"\s/>([^<]+)'
    patron += '.*?/span>([^<]+)'
    matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
    filtrados = []
    for match in matches:
        scrapedurl = match[0]
        scrapedserver = match[1]
        scrapedcalidad = match[2]
        scrapedlanguage = match[3]
        scrapedtipo = match[4]
        if t_tipo.upper() not in scrapedtipo.upper():
            continue
        title = "   Mirror en " + scrapedserver.split(".")[0] + " (" + scrapedlanguage + ")"
        if len(scrapedcalidad.strip()) > 0:
            title += " (Calidad " + scrapedcalidad.strip() + ")"

        if filtro_idioma == 3 or item.filtro:
            lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
                                            url=scrapedurl, server=scrapedserver, idioma=scrapedlanguage, extra=item.url))
        else:
            idioma = dict_idiomas[language]
            if idioma == filtro_idioma:
                lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",  url=scrapedurl,
                                                extra=item.url))
            else:
                if language not in filtrados:
                    filtrados.append(language)
    if filtro_idioma != 3:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
                                            filtro=True))
    return lista_enlaces
예제 #30
0
def play(item):
    logger.info()
    itemlist = []

    if item.enlace:
        post = urllib.urlencode({'nombre': item.enlace})
        # ~ logger.debug(post)
        data = httptools.downloadpage(item.url, post=post).data
        # ~ logger.debug(data)

        url = scrapertools.find_single_match(data, ' src="([^"]+)"')
        if url == '':
            url = scrapertools.find_single_match(data, " src='([^']+)'")

        if url != '':
            itemlist.append(
                item.clone(url=url,
                           server=servertools.get_server_from_url(url)))

    else:
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)

        url = scrapertools.find_single_match(data, "window.location='([^']+)")
        if url == '':
            url = scrapertools.find_single_match(data,
                                                 'window.location="([^"]+)')

        # Descartar o resolver acortadores
        if url.startswith('https://adf.ly/'):
            url = scrapertools.decode_adfly(url)
            if url:
                item.server = servertools.get_server_from_url(url)
                if item.server == 'directo':
                    return itemlist  # si no encuentra el server o está desactivado
        elif url.startswith('http://uii.io/'):
            url = scrapertools.decode_uiiio(url)

        if url != '':
            itemlist.append(item.clone(url=url))

    return itemlist
예제 #31
0
파일: pelisplay.py 프로젝트: Jaloga/xiaomi
def play(item):
    logger.info()
    itemlist = []

    url = item.url.split('?')[0]
    post = item.url.split('?')[1]
    data = do_downloadpage(url, post=post,
                           raise_weberror=False).replace('\\/', '/')
    # ~ logger.debug(data)

    url = scrapertools.find_single_match(data, '"data":"([^"]+)')

    if 'pelisplay.tv' in url:
        data = httptools.downloadpage(url).data
        if 'gkpluginsphp' in data:
            url = host + 'private/plugins/gkpluginsphp.php'
            post = {
                'link': scrapertools.find_single_match(data, 'link:"([^"]+)')
            }
            data = do_downloadpage(url,
                                   post=urllib.urlencode(post),
                                   raise_weberror=False).replace('\\/', '/')
            # ~ logger.debug(data)
            url = scrapertools.find_single_match(data, '"link":"([^"]+)')
            if url:
                itemlist.append(['mp4', url])

        elif 'start_jwplayer(JSON.parse(' in data:
            data = data.replace('\\/', '/')
            # ~ logger.debug(data)

            matches = scrapertools.find_multiple_matches(
                data,
                '"file"\s*:\s*"([^"]+)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"'
            )
            if matches:
                for url, lbl, typ in sorted(matches,
                                            key=lambda x: int(x[1][:-1])
                                            if x[1].endswith('P') else x[1]):
                    itemlist.append(['%s [%s]' % (lbl, typ), url])

    elif 'tutumeme.net' in url:
        data = do_downloadpage(url, raise_weberror=False)
        f = scrapertools.find_single_match(data, '"file"\s*:\s*"([^"]+)')
        if f:
            itemlist.append(
                item.clone(url='https://tutumeme.net/embed/' + f,
                           server='m3u8hls'))

    elif url:
        itemlist.append(
            item.clone(url=url, server=servertools.get_server_from_url(url)))

    return itemlist
예제 #32
0
def search_links_filmaff(item):
    logger.info("streamondemand.channels.trailertools search_links_filmaff")

    itemlist = []
    data = scrapertools.downloadpage(item.url)
    if not '<a class="lnkvvid"' in data:
        itemlist.append(
            item.clone(title="Nessun video disponibile",
                       action="",
                       text_color=""))
    else:
        patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
        matches = scrapertools.find_multiple_matches(data, patron)
        for scrapedtitle, scrapedurl in matches:
            if not scrapedurl.startswith("http:"):
                scrapedurl = urlparse.urljoin("http:", scrapedurl)
            trailer_url = scrapedurl.replace("-nocookie.com/embed/",
                                             ".com/watch?v=")
            if "youtube" in trailer_url:
                server = "youtube"
                code = scrapertools.find_single_match(trailer_url,
                                                      'v=([A-z0-9\-_]+)')
                thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
            else:
                server = servertools.get_server_from_url(trailer_url)
                thumbnail = item.thumbnail
            scrapedtitle = unicode(scrapedtitle,
                                   encoding="utf-8",
                                   errors="ignore")
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            scrapedtitle += "  [" + server + "]"
            if item.contextual:
                scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
            itemlist.append(
                item.clone(title=scrapedtitle,
                           url=trailer_url,
                           server=server,
                           action="play",
                           thumbnail=thumbnail,
                           text_color="white"))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Ricerca manuale su Filmaffinity",
                       action="manual_search",
                       thumbnail="",
                       text_color="green",
                       extra="filmaffinity"))

    return itemlist
예제 #33
0
def play(item):
    logger.info("pelisalacarta.channels.cinefox play")
    itemlist = []

    headers["Referer"] = item.url
    post = "id=%s" % item.extra
    data = scrapertools.downloadpage("http://www.cinefox.cc/goto/", post=post, headers=headers.items())

    url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')
    url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
    server = servertools.get_server_from_url(url)
    itemlist.append(item.clone(url=url, server=server))
    
    return itemlist
예제 #34
0
def findvideos(item):
    logger.info()
    itemlist = []

    tmdb.set_infoLabels_item(item, __modo_grafico__)
    data = httptools.downloadpage(item.url).data

    if not item.infoLabels["plot"]:
        item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>')
    fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"')
    if not item.fanart and fanart:
        item.fanart = fanart

    patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>'
    matches = scrapertools.find_multiple_matches(data, patron)
    for tipo, source, title in matches:
        if tipo == "trailer":
            continue
        post = "source=%s&action=obtenerurl" % urllib.quote(source)
        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url}
        data_url = httptools.downloadpage(host+'wp-admin/admin-ajax.php', post, headers=headers).data
        url = jsontools.load_json(data_url).get("url")
        if "online.desmix" in url or "metiscs" in url:
            server = "directo"
        elif "openload" in url:
            server = "openload"
            url += "|Referer=" + item.url
        else:
            server = servertools.get_server_from_url(url)
            if server == "directo":
                continue
        title = "%s - %s" % (unicode(server, "utf8").capitalize().encode("utf8"), title)
        itemlist.append(item.clone(action="play", url=url, title=title, server=server, text_color=color3))

    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(item.clone(title="Añadir película a la biblioteca", action="add_pelicula_to_library",
                                   extra="findvideos", text_color="green"))

    return itemlist
def search_links_filmaff(item):
    logger.info("streamondemand.channels.trailertools search_links_filmaff")
    
    itemlist = []
    data = scrapertools.downloadpage(item.url)
    if not '<a class="lnkvvid"' in data:
        itemlist.append(item.clone(title="Nessun video disponibile", action="", text_color=""))
    else:
        patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
        matches = scrapertools.find_multiple_matches(data, patron)
        for scrapedtitle, scrapedurl in matches:
            if not scrapedurl.startswith("http:"):
                scrapedurl = urlparse.urljoin("http:", scrapedurl)
            trailer_url = scrapedurl.replace("-nocookie.com/embed/", ".com/watch?v=")
            if "youtube" in trailer_url:
                server = "youtube"
                code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
                thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
            else:
                server = servertools.get_server_from_url(trailer_url)
                thumbnail = item.thumbnail
            scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            scrapedtitle += "  [" + server + "]"
            if item.contextual:
                scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
            itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play",
                                       thumbnail=thumbnail, text_color="white"))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else: 
            title = "%s"
        itemlist.append(item.clone(title=title % "Ricerca manuale su Filmaffinity",
                                   action="manual_search", thumbnail="", text_color="green", extra="filmaffinity"))

    return itemlist
예제 #36
0
def findvideos(item):
    logger.info()
    itemlist=[]
    data=httptools.downloadpage(item.url).data
    data = re.sub(r"'|\n|\r|\t|&nbsp;|<br>", "", data)

    patron = 'class="servidor" alt=""> ([^<]+)<\/span><span style="width: 40px;">([^<]+)<\/span><a class="verLink" rel="nofollow" href="([^"]+)" target="_blank"> <img title="Ver online gratis"'
    matches = matches = re.compile(patron,re.DOTALL).findall(data)
    for scrapedidioma, scrapedcalidad, scrapedurl in matches:

    	scrapedidioma = scrapertools.decodeHtmlentities(scrapedidioma)
    	
    	scrapedcalidad = scrapertools.decodeHtmlentities(scrapedcalidad)
    	if scrapedidioma.lower() == 'español':
    	   scrapedidioma = 'castellano'
    	scrapedidioma = scrapedidioma.lower()
    	idioma = taudio[scrapedidioma.lower()]
    	calidad = tcalidad[scrapedcalidad.lower()]
    	url = scrapedurl
    	itemlist.append( Item(channel=item.channel, action='play' , idioma=idioma, calidad=calidad, url=url))

    for videoitem in itemlist:
        videoitem.infoLabels=item.infoLabels
        videoitem.channel = item.channel
        videoitem.folder = False
        videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.url)
        videoitem.fulltitle = item.title
        videoitem.server = servertools.get_server_from_url(videoitem.url)
        videoitem.title = item.contentTitle+' | '+videoitem.calidad+' | '+videoitem.idioma+' ('+videoitem.server+')'

       

    if config.get_library_support() and len(itemlist) > 0 and item.extra !='findvideos' :
        itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]', url=item.url,
                             action="add_pelicula_to_library", extra="findvideos", contentTitle = item.contentTitle)) 
    return itemlist
예제 #37
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    if item.extra != "library":
        try:
            from core import tmdb
            tmdb.set_infoLabels(item, __modo_grafico__)
        except:
            pass

    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            if server == "tusfiles" and "stormo.tv" in url:
                server = "stormo"
            if server != "tusfiles":
                servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            elif server == "tusfiles":
                url = "http://tusfiles.org/?%s" % url
                server = "directo"
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+"  ["+idioma+"] ["+calidad_videos.get(calidad)+"]"
            itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
             '"([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, servidor_num, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "["+server.capitalize()+"]  ["+idioma+"] ["+calidad_videos.get(calidad)+"]"
                itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma, server=server))

    itemlist.sort(key=lambda item: (item.extra, item.server))
    if itemlist:
        if not "trailer" in item.infoLabels:
            trailer_url = scrapertools.find_single_match(data, 'class="n-movie-trailer">([^<]+)</span>')
            item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=")

        itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                   text_color="magenta", context=""))
        if item.extra != "library":
            if config.get_library_support():
                itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca",
                                     action="add_pelicula_to_library", url=item.url, text_color="green",
                                     infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
                                     extra="library"))

    return itemlist
예제 #38
0
def findvideostv(item):
    logger.info()
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = httptools.downloadpage(item.url).data
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
             '" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
             % (str(item.infoLabels['episode']), str(item.infoLabels['season']))
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            if server == "tusfiles" and "stormo.tv" in url:
                server = "stormo"
            if server != "tusfiles":
                servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            elif server == "tusfiles":
                url = "http://tusfiles.org/?%s" % url
                server = "directo"
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"

            itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
             '" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)"' \
             % (str(item.infoLabels['episode']), str(item.infoLabels['season']))
    #patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="'+str(item.infoLabels['episode']) +'" season="'+str(item.infoLabels['season']) + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if server == "vimeo":
                url += "|" + item.url
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = server.capitalize()+" ["+idioma+"] ("+calidad_videos.get(quality)+")"
                itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
예제 #39
0
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
    logger.info()
    lista_enlaces = []

    matches = []
    if type == "online":
        patron = '<a href="#([^"]+)" data-toggle="tab">([^<]+)</a>'
        bloques = scrapertools.find_multiple_matches(data, patron)
        for id, language in bloques:
            patron = 'id="' + id + '">.*?<iframe src="([^"]+)"'
            url = scrapertools.find_single_match(data, patron)
            matches.append([url, "", language])

    bloque2 = scrapertools.find_single_match(data, '<div class="table-link" id="%s">(.*?)</table>' % type)
    patron = 'tr>[^<]+<td>.*?href="([^"]+)".*?src.*?title="([^"]+)"' \
             '.*?src.*?title="([^"]+)".*?src.*?title="(.*?)"'
    matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
    filtrados = []
    for match in matches:
        scrapedurl = match[0]
        language = match[2].strip()
        if not match[1]:
            server = servertools.get_server_from_url(scrapedurl)
            title = "   Mirror en " + server + " (" + language + ")"
        else:
            server = match[1].lower()
            if server == "uploaded":
                server = "uploadedto"
            elif server == "streamin":
                server = "streaminto"
            elif server == "netu":
                server = "netutv"
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + server)
                except:
                    pass
            title = "   Mirror en " + server + " (" + language + ") (Calidad " + match[3].strip() + ")"

        if filtro_idioma == 3 or item.filtro:
            lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
                                            url=scrapedurl, idioma=language, extra=item.url))
        else:
            idioma = dict_idiomas[language]
            if idioma == filtro_idioma:
                lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",  url=scrapedurl,
                                                server=server, extra=item.url))
            else:
                if language not in filtrados:
                    filtrados.append(language)

    if filtro_idioma != 3:
        if len(filtrados) > 0:
            title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
            lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
                                            filtro=True))

    return lista_enlaces