示例#1
0
文件: dospelis.py 项目: Jaloga/xiaomi
def play(item):
    logger.info()
    itemlist = []

    if item.url:
        data = do_downloadpage(item.url)
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, '<a id="link" href="([^"]+)')
        if url: 
            itemlist.append(item.clone( url=servertools.normalize_url(item.server, url) ))

    else:
        post = urllib.urlencode( {'action': 'doo_player_ajax', 'post': item.dpost, 'nume': item.dnume, 'type': item.dtype} )
        data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.referer}).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, "src='([^']+)")
        if not url: url = scrapertools.find_single_match(data, 'src="([^"]+)')
        if url: 
            if 'jwplayer' in url and 'source=' in url: # Ej: https://www.dospelis.online/jwplayer-2/?source=https%3A%2F%2Fyoutu.be%2Fzcn89lxhEWk&id=71977&type=mp4
                url = urllib.unquote(scrapertools.find_single_match(url, "source=([^&']+)"))
            elif 'streamcrypt.net/' in url: # Ej: https://streamcrypt.net/embed/streamz.cc/...
                url = scrapertools.decode_streamcrypt(url)

            if not url: return itemlist
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone( url=url, server=servidor ))

    return itemlist
示例#2
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = do_downloadpage(item.url)

    # Fuentes de vídeo
    matches = scrapertools.find_multiple_matches(data, "(?i)<div class='pframe'><iframe.*?src=(?:'|\")([^'\"]+)")
    for url in matches:
        if 'youtube.com' in url: continue # trailers
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(Item( channel = item.channel, action = 'play', server = servidor, 
                                  title = '', url = url, language = 'Esp' ))

    # Descarga
    bloque = scrapertools.find_single_match(data, "<div id='download'(.*?)</table></div></div></div>")

    matches = scrapertools.find_multiple_matches(bloque, "<tr id='link-[^']+'>(.*?)</tr>")
    for enlace in matches:
        url = scrapertools.find_single_match(enlace, " href='([^']+)")
        servidor = corregir_servidor(scrapertools.find_single_match(enlace, "domain=(?:www.|dl.|)([^'.]+)"))
        # ~ logger.info('url: %s Servidor: %s' % (url,servidor))
        if not url or not servidor: continue
        quality = 'HD'; lang = 'Esp' # siempre tienen las mismas !?
        
        itemlist.append(Item( channel = item.channel, action = 'play', server = servidor, 
                              title = '', url = url,
                              language = lang, quality = quality , other = 'd'
                       ))

    return itemlist
示例#3
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url, headers=headers).data
    # ~ logger.debug(data)

    for tipo in ['Ver', 'Descargar']:
        bloque = scrapertools.find_single_match(data, '<div class="titles4 font4 bold">%s.*?<tbody>(.*?)</table>' % tipo)
        # ~ logger.debug(bloque)

        matches = scrapertools.find_multiple_matches(bloque, '<tr>(.*?)</tr>')
        for data_epi in matches:
            url = scrapertools.find_single_match(data_epi, ' data-enlace="([^"]+)')
            if url:
                server = servertools.get_server_from_url(url)
                if not server or server == 'directo': continue
                url = servertools.normalize_url(server, url)
            else:
                url = scrapertools.find_single_match(data_epi, ' href="([^"]+)')
                if url.startswith('/'): url = host + url[1:]
                server = scrapertools.find_single_match(data_epi, '\?domain=([^".]+)')
                server = normalize_server(server)

            # ~ logger.info('%s %s' % (server, url))
            if not url or not server: continue

            lang = scrapertools.find_single_match(data_epi, 'img/language/([^\.]+)')
            
            itemlist.append(Item( channel = item.channel, action = 'play', server = server,
                                  title = '', url = url, 
                                  language = IDIOMAS.get(lang, lang) #, other = tipo
                           ))

    return itemlist
示例#4
0
def play(item):
    logger.info()
    itemlist = []

    if host in item.url:
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(
            data, '<a id="link" rel="nofollow" href="([^"]+)')
        if url:
            if 'ok.cinetux.me/player/ok/?v=' in url:
                data = httptools.downloadpage(url).data
                vid = scrapertools.find_single_match(data,
                                                     ' src=".*?\#([^"]+)')
                if vid:
                    itemlist.append(
                        item.clone(server='okru',
                                   url='https://ok.ru/videoembed/' + vid))
            else:
                itemlist.append(
                    item.clone(
                        url=servertools.normalize_url(item.server, url)))
    else:
        itemlist.append(item.clone())

    return itemlist
示例#5
0
文件: zoowoman.py 项目: Jaloga/xiaomi
def extraer_embeds(data):
    itemlist = []

    for tipo in ['iframe', 'source']:
        matches = scrapertools.find_multiple_matches(
            data, '<%s.*? src="([^"]+)' % tipo)
        for url in matches:
            if 'facebook.com' in url or 'twitter.com' in url or 'google.com' in url:
                continue
            if url.startswith('//'): url = 'https:' + url
            if '.us.archive.org' in url: servidor = 'directo'
            else:
                servidor = servertools.get_server_from_url(url)
                if not servidor or servidor == 'directo': continue
                url = servertools.normalize_url(servidor, url)

            itemlist.append(
                Item(channel='zoowoman',
                     action='play',
                     server=servidor,
                     language='?',
                     title='',
                     url=url,
                     other='iframe/source'))

    return itemlist
示例#6
0
def findvideos(item):
    logger.info()
    itemlist = []
    
    IDIOMAS = {'Español': 'Esp', 'Latino': 'Lat', 'Subtitulado': 'VOSE'}

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Enlaces en embeds
    patron = '<a href="#embed\d+" data-src="([^"]+)" class="([^"]+)"(.*?)</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for urlcod, lang, resto in matches:
        if urlcod.startswith('//'): urlcod = 'https:' + urlcod
        elif urlcod.startswith('/'): urlcod = HOST + urlcod[1:]
        cod = urlcod.replace(HOST + 'replayer/', '').split('RRRRR')[0]
        # ~ logger.info('%s %s' % (cod, urlcod))
        numpad = len(cod) % 4
        if numpad > 0: cod += 'R' * (4 - numpad)
        try:
            url = base64.b64decode(cod)
            if numpad > 0: url = url[:-(4 - numpad)]
        except:
            url = None
        if not url: 
            logger.info('No detectada url. %s %s' % (cod, urlcod))
            continue
        
        servidor = servertools.get_server_from_url(url)
        if not servidor or (servidor == 'directo' and 'storage.googleapis.com/' not in url): 
            logger.info('No detectado servidor, url: %s' % url)
            continue
        url = servertools.normalize_url(servidor, url)

        qlty = scrapertools.find_single_match(resto, '([^>]+)</div>$')

        itemlist.append(Item( channel = item.channel, action = 'play', server = servidor,
                              title = '', url = url, 
                              language = IDIOMAS.get(lang, lang), quality = qlty, quality_num = puntuar_calidad(qlty), other='e'
                       ))

    # Enlaces en descargas
    bloque = scrapertools.find_single_match(data, 'id="dlnmt"(.*?)</table>')
    matches = re.compile('<tr>(.*?)</tr>', re.DOTALL).findall(bloque)
    for lin in matches:
        if '<th' in lin: continue
        tds = scrapertools.find_multiple_matches(lin, '<td[^>]*>(.*?)</td>')
        url = scrapertools.find_single_match(tds[0], ' href="([^"]+)')
        servidor = scrapertools.find_single_match(tds[1], '<span>(.*?)</span>')
        lang = tds[2]
        qlty = tds[3]
        if '/link/?go=' in url: url = url.split('/link/?go=')[1]
        if not url or not servidor: continue

        itemlist.append(Item( channel = item.channel, action = 'play', server = servertools.corregir_servidor(servidor),
                              title = '', url = url, 
                              language = IDIOMAS.get(lang, lang), quality = qlty, quality_num = puntuar_calidad(qlty), other='d'
                       ))

    return itemlist
示例#7
0
def get_video_url(page_url, url_referer=''):
    logger.info("(page_url='%s')" % page_url)
    video_urls = []
    
    vid = scrapertools.find_single_match(page_url, "embedy.cc/embed/([A-z0-9=]+)")
    if vid:
        data = httptools.downloadpage('https://embedy.cc/video.get/', post={'video':vid}, headers={'Referer': page_url}).data
        # ~ logger.debug(data)
        try:
            data_json = jsontools.load(data)
            for n in data_json['response']:
                for f in data_json['response'][n]['files']:
                    video_urls.append([f, data_json['response'][n]['files'][f]])
        except:
            pass

    if len(video_urls) == 0:
        data = httptools.downloadpage(page_url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, '<iframe.*? src="([^"]+)')
        
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo' and servidor != 'embedy': 
            url = servertools.normalize_url(servidor, url)
            server_module = __import__('servers.%s' % servidor, None, None, ["servers.%s" % servidor])
            return server_module.get_video_url(url)

    return video_urls
def play(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    url = scrapertools.find_single_match(data, 'src="([^"]+)"')

    if '/flixplayer.' in url:
        data = httptools.downloadpage(url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, 'link":"([^"]+)"')

    elif host in url and '?h=' in url:
        fid = scrapertools.find_single_match(url, "h=([^&]+)")
        url2 = url.split('?h=')[0] + 'r.php'
        resp = httptools.downloadpage(url2,
                                      post='h=' + fid,
                                      headers={'Referer': url},
                                      follow_redirects=False)
        if 'location' in resp.headers: url = resp.headers['location']
        else: url = None

    if url:
        servidor = servertools.get_server_from_url(url)
        # ~ if servidor and servidor != 'directo': # descartado pq puede ser 'directo' si viene de flixplayer
        url = servertools.normalize_url(servidor, url)
        itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
示例#9
0
def play(item):
    logger.info()
    itemlist = []

    if item.url.startswith(host):
        headers = {'Referer': item.referer}
        data = httptools.downloadpage(item.url, headers=headers).data
        # ~ logger.debug(data)

        wopen = scrapertools.find_single_match(
            data, 'onclick="window\.open\(([^\)]+)\);"')
        if wopen:
            url = scrapertools.find_single_match(data,
                                                 "%s\s*=\s*'([^']+)" % wopen)
        else:
            url = scrapertools.find_single_match(data,
                                                 "enlaceeee\s*=\s*'([^']+)")
            if not url:
                url = scrapertools.find_multiple_matches(
                    data, '<a id="link-redirect".*? href="([^"]+)')[-1]
        # ~ logger.debug(url)
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))
    else:
        itemlist.append(item.clone())

    return itemlist
def findvideos(item):
    logger.info()
    itemlist = []

    IDIOMAS = {'es': 'Esp', 'mx': 'Lat', 'en': 'VOSE'}

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Fuentes de vídeo
    bloque = scrapertools.find_single_match(
        data, "<ul id='playeroptionsul'(.*?)</ul>")

    matches = scrapertools.find_multiple_matches(
        bloque, "<li id='player-option-(\d+)'(.*?)</li>")
    for optnum, enlace in matches:
        # ~ logger.debug(enlace)

        lang = scrapertools.find_single_match(enlace,
                                              "/img/flags/([^.']+)").lower()

        bloque = scrapertools.find_single_match(
            data,
            "<div id='source-player-%s' class='source-box'><div class='pframe'>(.*?)</div></div>"
            % optnum)
        # ~ logger.debug(bloque)

        urls = scrapertools.find_multiple_matches(
            bloque, '(?i)<iframe.*? src=(?:"|\')([^"\']+)')
        if not urls:
            dtype = scrapertools.find_single_match(enlace,
                                                   "data-type='([^']+)")
            dpost = scrapertools.find_single_match(enlace,
                                                   "data-post='([^']+)")
            dnume = scrapertools.find_single_match(enlace,
                                                   "data-nume='([^']+)")
            if not dtype or not dpost or not dnume or dnume == 'trailer':
                continue
            urls = [get_url(dpost, dnume, dtype, item.url)]

        for url in urls:
            if not url: continue
            # ~ logger.info(url)
            servidor = servertools.get_server_from_url(url)
            if not servidor or servidor == 'directo': continue
            url = servertools.normalize_url(servidor, url)

            itemlist.append(
                Item(channel=item.channel,
                     action='play',
                     server=servidor,
                     title='',
                     url=url,
                     language=IDIOMAS.get(lang, lang)))

    return itemlist
def play(item):
    logger.info()
    itemlist = []

    if '/o.php?l=' in item.url:
        url = scrapertools.find_single_match(item.url, "/o\.php\?l=(.*)")
        for i in range(9):  # range(5)
            url = base64.b64decode(url)
            if url.startswith('http'): break
        if not url.startswith('http'): url = None
    else:
        item.url = item.url.replace('&#038;', '&')
        resp = httptools.downloadpage(item.url,
                                      headers={'Referer': item.referer},
                                      follow_redirects=False)
        if 'location' in resp.headers:
            url = resp.headers['location']
        else:
            # ~ logger.debug(resp.data)
            url = scrapertools.find_single_match(resp.data, "src='([^']+)")
            if not url:
                url = scrapertools.find_single_match(resp.data, 'src="([^"]+)')
            if not url:
                url = scrapertools.find_single_match(resp.data, 'src=([^ >]+)')
            if not url:
                url = scrapertools.find_single_match(resp.data,
                                                     '"embed_url":"([^"]+)')

    if 'stream-mx.com/' in url:
        fid = scrapertools.find_single_match(url, "id=([^&]+)")
        if not fid: return itemlist
        url = 'https://stream-mx.com/player.php?id=%s&v=2&ver=si' % fid
        data = httptools.downloadpage(url, headers={'Referer': item.url}).data
        # ~ logger.debug(data)
        bloque = scrapertools.find_single_match(data, '"sources":\s*\[(.*?)\]')
        for enlace in scrapertools.find_multiple_matches(bloque, "\{(.*?)\}"):
            v_url = scrapertools.find_single_match(enlace,
                                                   '"file":\s*"([^"]+)')
            if not v_url: continue
            v_type = scrapertools.find_single_match(enlace,
                                                    '"type":\s*"([^"]+)')
            if v_type == 'hls':
                itemlist.append(item.clone(url=v_url, server='m3u8hls'))
            else:
                v_lbl = scrapertools.find_single_match(enlace,
                                                       '"label":\s*"([^"]+)')
                itemlist.append([v_lbl, v_url])

    elif url:
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
示例#12
0
def play(item):
    logger.info("play: %s" % item.url)
    itemlist = []

    if item.url:
        data = do_downloadpage(item.url)
        # ~ logger.debug(data)
        new_url = scrapertools.find_single_match(
            data, '<a id="link"[^>]* href="([^"]+)')
        if new_url:
            if '&url=' in new_url: new_url = new_url.split('&url=')[1]
            if 'cinetux.me' in new_url:
                data = do_downloadpage(new_url)
                # ~ logger.debug(data)
                new_url = scrapertools.find_single_match(
                    data, "<a class='cta' href='([^']+)")
                if new_url:
                    itemlist.append(item.clone(server='', url=new_url))
                    itemlist = servertools.get_servers_itemlist(
                        itemlist
                    )  # para corregir la url con los patrones del server
                else:
                    itemlist = extraer_video(item, data)
            else:
                new_url = servertools.normalize_url(item.server, new_url)
                itemlist.append(item.clone(url=new_url))

    else:
        post = urllib.urlencode({
            'action': 'doo_player_ajax',
            'post': item.dpost,
            'nume': item.dnume,
            'type': item.dtype
        })
        data = do_downloadpage(CHANNEL_HOST + 'wp-admin/admin-ajax.php',
                               post=post,
                               headers={'Referer': item.referer})
        # ~ logger.debug(data)
        new_url = scrapertools.find_single_match(data, "src='([^']+)'")
        if not new_url:
            new_url = scrapertools.find_single_match(data, 'src="([^"]+)"')
        if new_url:
            if 'cinetux.me' in new_url:
                data = do_downloadpage(new_url)
                # ~ logger.debug(data)
                itemlist = extraer_video(item, data)
            else:
                itemlist.append(
                    item.clone(
                        url=new_url,
                        server=servertools.get_server_from_url(new_url)))

    return itemlist
示例#13
0
def play(item):
    logger.info()
    itemlist = []
    
    headers = {'Referer': item.referer}

    if not 'get-player' in item.url:
        data = do_downloadpage(item.url, headers = headers)
        # ~ logger.debug(data)

    if item.url.startswith('https://wmovies.co/u/'):
        url = scrapertools.find_single_match(data,'<a class="Go_V2" href="([^"]+)')

    else:
        if not 'get-player' in item.url:
            if not '"status":200' in data or not '"data":"' in data: return 'El vídeo no está disponible'
            item.url = 'https://wmovies.co/get-player/' + scrapertools.find_single_match(data,'"data":"([^"]+)')
        
        resp = httptools.downloadpage(item.url, headers = headers, follow_redirects = False)
        
        if 'refresh' in resp.headers:
            url = scrapertools.find_single_match(resp.headers['refresh'], ';\s*(.*)')
        elif 'location' in resp.headers:
            url = resp.headers['location']
        else: # directos
            # ~ logger.debug(resp.data)
            url = None
            bloque = scrapertools.find_single_match(resp.data, '"sources":\s*\[(.*?)\]')
            if not bloque: return 'No se encuentran fuentes para este vídeo'
            for enlace in scrapertools.find_multiple_matches(bloque, "\{(.*?)\}"):
                v_url = scrapertools.find_single_match(enlace, '"file":\s*"([^"]+)')
                if not v_url: continue
                v_lbl = scrapertools.find_single_match(enlace, '"label":\s*"([^"]+)')
                if not v_lbl: v_lbl = scrapertools.find_single_match(enlace, '"type":\s*"([^"]+)')
                if not v_lbl: v_lbl = 'mp4'
                itemlist.append([v_lbl, v_url])

            if len(itemlist) > 1:
                return sorted(itemlist, key=lambda x: int(x[0]) if x[0].isdigit() else 0)

    if url:
       url = url.replace('www.privatecrypt.me', 'www.fembed.com') #.replace('isthebest.rest', '.com')
       servidor = servertools.get_server_from_url(url)
       if servidor != 'directo':
           url = servertools.normalize_url(servidor, url)
           itemlist.append(item.clone( url = url, server = servidor ))

    return itemlist
示例#14
0
def play(item):
    logger.info()
    itemlist = []

    if 'streamcrypt.net/' in item.url:  # Ej: https://streamcrypt.net/[embed/]flashx.tv/...
        url = scrapertools.decode_streamcrypt(item.url)

        if not url: return itemlist
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))
    else:
        itemlist.append(item.clone())

    return itemlist
示例#15
0
def play(item):
    logger.info()
    itemlist = []

    if host in item.url:
        data = do_downloadpage(item.url)
        url = scrapertools.find_single_match(data, '<a id="link".*?href="([^"]+)')
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone( url=url, server=servidor ))

    else:
        itemlist.append(item.clone())

    return itemlist
示例#16
0
def get_video_url(page_url, url_referer=''):
    logger.info("(page_url='%s')" % page_url)

    if not page_url.startswith("http"):
        page_url = "http://www.youtube.com/watch?v=%s" % page_url
        logger.info(" page_url->'%s'" % page_url)

    page_url = servertools.normalize_url('youtube', page_url)

    data = httptools.downloadpage(page_url).data
    if "File was deleted" in data:
        return 'El archivo no existe o ha sido borrado'

    video_id = scrapertools.find_single_match(page_url, 'v=([A-z0-9_-]{11})')
    video_urls = extract_videos(video_id)

    return video_urls
def play(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    from lib import serieslanresolver
    url = serieslanresolver.decode_url(data)
    # ~ logger.info(url)
    if url:
        servidor = servertools.get_server_from_url(url)
        if servidor and (servidor != 'directo' or 'googleusercontent' in url):
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
示例#18
0
文件: tupelihd.py 项目: Jaloga/xiaomi
def play(item):
    logger.info()
    itemlist = []

    if item.url.startswith(host) and '/safelink/' in item.url:
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)

        url = scrapertools.find_single_match(data, ' action="([^"]+)')
        if not url: return itemlist
        if url.startswith('/'): url = host + url[1:]

        post = {}
        inputs = scrapertools.find_multiple_matches(
            data,
            '<input type="hidden" name="([^"]+)"(?: autocomplete="off"|) value="([^"]+)"'
        )
        for nom, val in inputs:
            post[nom] = val

        espera = scrapertools.find_single_match(
            data, '<span id="timer" class="timer">\s*(\d+)')
        if espera:
            import time
            from platformcode import platformtools
            platformtools.dialog_notification(
                'Cargando', 'Espera de %s segundos requerida' % espera)
            time.sleep(int(espera))

        headers = {'Referer': item.url, 'X-Requested-With': 'XMLHttpRequest'}
        data = httptools.downloadpage(url, post=post, headers=headers).data
        # ~ logger.debug(data)

        url = scrapertools.find_single_match(data.replace('\\/', '/'),
                                             '"url":"([^"]+)')
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))

    else:
        itemlist.append(item.clone())

    return itemlist
def findvideos(item):
    logger.info()
    itemlist = []

    data = do_downloadpage(item.url)
    # ~ logger.debug(data)

    for lang in ['la', 'es', 'sub']:
        bloque = scrapertools.find_single_match(
            data,
            '<ul id="source-list">\s*(<li><a id="op1_%s".*?)</ul>' % lang)
        matches = scrapertools.find_multiple_matches(bloque, '<li(.*?)</li>')
        for match in matches:
            url = scrapertools.find_single_match(match, ' rel="([^"]+)')
            if not url: continue

            url = url.replace('www.pelisup.com/v/', 'www.fembed.com/v/')
            url = url.replace('www.pelispp.com/v/', 'www.fembed.com/v/')
            if HOST in url:
                servidor = 'directo'  # '' indeterminado sería más correcto pero como parecen todos de googleapis ponemos directo
            else:
                servidor = servertools.get_server_from_url(
                    url, disabled_servers=True)
                if servidor:
                    url = servertools.normalize_url(servidor, url)
                else:
                    servidor = scrapertools.find_single_match(
                        match, ' title="([^"]+)')

            if url in [it.url for it in itemlist]:
                continue  # evitar duplicados
            qlty = scrapertools.find_single_match(
                match, '<span class="q">([^<]+)').strip()

            itemlist.append(
                Item(channel=item.channel,
                     action='play',
                     server=servidor,
                     title='',
                     url=url,
                     language=IDIOMAS.get(lang, lang),
                     quality=qlty))

    return itemlist
def play(item):
    logger.info()
    itemlist = []
    item.url = item.url.replace('&#038;', '&')

    if '/o.php?l=' in item.url:
        url = scrapertools.find_single_match(item.url, '/o\.php\?l=(.*)$')
        for i in range(9):
            url = base64.b64decode(url)
            if url.startswith('http'): break
        if not url.startswith('http'): return itemlist

    elif 'trdownload=' in item.url:
        url = httptools.downloadpage(item.url,
                                     headers={
                                         'Referer': item.referer
                                     },
                                     follow_redirects=False,
                                     only_headers=True).headers.get(
                                         'location', '')

    else:
        data = httptools.downloadpage(item.url,
                                      headers={
                                          'Referer': item.referer
                                      }).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data, '<iframe.*? src="([^"]+)')

    if url:
        if url.startswith('//'): url = 'https:' + url
        url = url.replace(
            'https://uptostream/',
            'https://uptostream.com/')  # corregir url errónea en algunos links
        url = url.replace('https://www.seriemega.site/',
                          'https://www.fembed.com/')

        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
示例#21
0
def play(item):
    logger.info()
    itemlist = []

    if item.linkepi == -1:
        post = 'set=LoadOptions&action=Step3&id=%s&type=%s' % (item.linkid,
                                                               item.linktype)
    else:
        post = 'set=LoadOptionsEpisode&action=Step3&id=%s&type=%s&episode=%d' % (
            item.linkid, item.linktype, item.linkepi)

    data = httptools.downloadpage(host + 'playview', post=post).data
    # ~ logger.debug(data)

    url = scrapertools.find_single_match(
        data, 'data-url="([^"]+)"><span class="pull-left">Link directo')
    if 'http' not in url: url = None
    if not url:
        url = scrapertools.find_single_match(
            data, '<iframe class="[^"]*" src="([^"]+)')
    if not url:
        url = scrapertools.find_single_match(data, '<iframe src="([^"]+)')
    if not url: url = scrapertools.find_single_match(data, 'data-url="([^"]+)')

    if url.startswith(host):
        url = httptools.downloadpage(url,
                                     follow_redirects=False,
                                     only_headers=True).headers.get(
                                         'location', '')
        if url and 'http' not in url:
            if item.server == 'jetload': url = 'https://jetload.net/e/' + url
            else: url = None

    if url:
        if not item.servidor:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))
        else:
            itemlist.append(item.clone(url=url))

    return itemlist
示例#22
0
def play(item):
    logger.info()
    itemlist = []

    if HOST in item.url:
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data,
                                             "sources:.*?'file':\s*'([^']+)")
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo' or 'storage.googleapis.com/' in url:
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))

    elif item.server and item.url:
        itemlist.append(item.clone())

    return itemlist
def play(item):
    logger.info()
    itemlist = []

    url = item.url.replace('&amp;', '&')

    if url.startswith(host):
        url = httptools.downloadpage(url,
                                     follow_redirects=False,
                                     only_headers=True).headers.get(
                                         'location', '')

    if url:
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
示例#24
0
def play(item):
    logger.info()
    itemlist = []

    if item.url.startswith(HOST):
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data,
                                             '<a id="link"[^>]*href="([^"]+)')
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))

    else:
        itemlist.append(item.clone())

    return itemlist
示例#25
0
def findvideos(item):
    logger.info()
    itemlist = []

    IDIOMAS = {'es': 'Esp', 'mx': 'Lat', 'en': 'VOSE'}

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Fuentes de vídeo
    bloque = scrapertools.find_single_match(
        data, "<ul id='playeroptionsul'>(.*?)</ul>")

    matches = scrapertools.find_multiple_matches(
        bloque, "<li id='player-option-\d+'(.*?)</li>")
    for enlace in matches:
        # ~ logger.debug(enlace)

        dtype = scrapertools.find_single_match(enlace, "data-type='([^']+)")
        dpost = scrapertools.find_single_match(enlace, "data-post='([^']+)")
        dnume = scrapertools.find_single_match(enlace, "data-nume='([^']+)")
        if not dtype or not dpost or not dnume or dnume == 'trailer': continue

        url = get_url(dpost, dnume, dtype, item.url)
        if not url: continue
        servidor = servertools.get_server_from_url(url)
        if not servidor or servidor == 'directo': continue
        url = servertools.normalize_url(servidor, url)

        lang = scrapertools.find_single_match(enlace,
                                              "/img/flags/([^.']+)").lower()

        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 server=servidor,
                 title='',
                 url=url,
                 language=IDIOMAS.get(lang, lang)))

    return itemlist
示例#26
0
文件: zoowoman.py 项目: Jaloga/xiaomi
def play(item):
    logger.info()
    itemlist = []

    if host in item.url:
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data,
                                             "window.location.href='([^']+)")
        if url:
            # ~ logger.info(url)
            if item.server == 'torrent': servidor = 'torrent'
            elif '.us.archive.org' in url: servidor = 'directo'
            else:
                servidor = servertools.get_server_from_url(url)
                if not servidor or servidor == 'directo': return itemlist
                url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))
    else:
        itemlist.append(item.clone())

    return itemlist
示例#27
0
def play(item):
    logger.info()
    itemlist = []

    post = urllib.urlencode({
        'action': 'doo_player_ajax',
        'post': item.dpost,
        'nume': item.dnume,
        'type': item.dtype
    })
    data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php',
                                  post=post,
                                  headers={
                                      'Referer': item.url
                                  }).data

    url = scrapertools.find_single_match(data, "src='([^']+)'")

    if url.startswith(host):
        locationurl = httptools.downloadpage(url,
                                             follow_redirects=False,
                                             only_headers=True).headers.get(
                                                 'location', '')
        if locationurl != '':
            try:
                b64url = scrapertools.find_single_match(locationurl, "y=(.*)")
                if b64url != '': url = base64.b64decode(b64url)
                else: url = locationurl
            except:
                url = locationurl

    if url:
        servidor = servertools.get_server_from_url(url)
        if servidor and servidor != 'directo':  # si no encuentra el server o está desactivado
            url = servertools.normalize_url(servidor, url)
            itemlist.append(item.clone(url=url, server=servidor))

    return itemlist
示例#28
0
def play(item):
    logger.info()
    itemlist = []

    if item.url.startswith(host):
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)
        url = scrapertools.find_single_match(data,
                                             '<a id="link"[^>]*href="([^"]+)')
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))

    elif 'stream-mx.com/' in item.url:
        data = httptools.downloadpage(item.url).data
        # ~ logger.debug(data)
        bloque = scrapertools.find_single_match(data, '"sources":\s*\[(.*?)\]')
        for enlace in scrapertools.find_multiple_matches(bloque, "\{(.*?)\}"):
            v_url = scrapertools.find_single_match(enlace,
                                                   '"file":\s*"([^"]+)')
            if not v_url: continue
            v_type = scrapertools.find_single_match(enlace,
                                                    '"type":\s*"([^"]+)')
            if v_type == 'hls':
                itemlist.append(item.clone(url=v_url, server='m3u8hls'))
            else:
                v_lbl = scrapertools.find_single_match(enlace,
                                                       '"label":\s*"([^"]+)')
                itemlist.append([v_lbl, v_url])

    else:
        itemlist.append(item.clone())

    return itemlist
示例#29
0
def play(item):
    logger.info()
    itemlist = []

    if item.dpost and item.dnume and item.dtype:
        post = {
            'action': 'doo_player_ajax',
            'post': item.dpost,
            'nume': item.dnume,
            'type': item.dtype
        }
        data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php',
                                      post=post,
                                      headers={
                                          'Referer': item.referer
                                      }).data
        data = data.replace('\\/', '/')
        url = scrapertools.find_single_match(data, "src='([^']+)")
        if not url: url = scrapertools.find_single_match(data, 'src="([^"]+)')
        if not url: url = scrapertools.find_single_match(data, 'src=([^ >]+)')
        if not url:
            url = scrapertools.find_single_match(data, '"embed_url":"([^"]+)')
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))
            elif servidor == 'directo' and 'stream-mx.com/' in url:
                item.url = url
        if not item.url: return itemlist

    if item.url.startswith(host):
        data = httptools.downloadpage(item.url).data
        url = scrapertools.find_single_match(data,
                                             '<a id="link"[^>]*href="([^"]+)')
        if not url:
            url = scrapertools.find_single_match(
                data, '<a id=link[^>]*href=([^ >]+)')
        if url:
            servidor = servertools.get_server_from_url(url)
            if servidor and servidor != 'directo':
                url = servertools.normalize_url(servidor, url)
                itemlist.append(item.clone(url=url, server=servidor))

    elif 'stream-mx.com/' in item.url:
        item.url = item.url.replace('v=2&', '') + '&sub=&ver=si'
        data = httptools.downloadpage(item.url,
                                      headers={
                                          'Referer': item.referer
                                      }).data
        # ~ logger.debug(data)
        bloque = scrapertools.find_single_match(data, '"sources":\s*\[(.*?)\]')
        for enlace in scrapertools.find_multiple_matches(bloque, "\{(.*?)\}"):
            v_url = scrapertools.find_single_match(enlace,
                                                   '"file":\s*"([^"]+)')
            if not v_url: continue
            v_type = scrapertools.find_single_match(enlace,
                                                    '"type":\s*"([^"]+)')
            if v_type == 'hls':
                itemlist.append(item.clone(url=v_url, server='m3u8hls'))
            else:
                v_lbl = scrapertools.find_single_match(enlace,
                                                       '"label":\s*"([^"]+)')
                itemlist.append([v_lbl, v_url])

    else:
        itemlist.append(item.clone())

    return itemlist
示例#30
0
def findvideos(item):
    logger.info()
    itemlist = []

    IDIOMAS = {
        'es': 'Esp',
        'mx': 'Lat',
        'en': 'VOSE',
        'castellano': 'Esp',
        'latino': 'Lat',
        'subtitulado': 'VOSE'
    }

    data = httptools.downloadpage(item.url).data
    # ~ logger.debug(data)

    # Fuentes de vídeo
    bloque = scrapertools.find_single_match(
        data, "<ul id='playeroptionsul'[^>]*>(.*?)</ul>")

    matches = scrapertools.find_multiple_matches(
        bloque, "<li id='player-option-(\d+)'(.*?)</li>")
    for numero, enlace in matches:
        # ~ logger.debug(enlace)

        # ~ <div id='source-player-1' class='source-box'><div class='pframe'><iframe class='metaframe rptss' src='https://upstream.to/embed-m63t64z83gfn.html'
        url = scrapertools.find_single_match(
            data,
            "<div id='source-player-%s[^>]*><div class='pframe'><iframe[^>]* src='([^']+)"
            % numero)
        if not url: continue
        servidor = servertools.get_server_from_url(url)
        if not servidor or servidor == 'directo': continue
        url = servertools.normalize_url(servidor, url)
        lang = scrapertools.find_single_match(enlace,
                                              "/img/flags/([^.']+)").lower()

        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 server=servidor,
                 title='',
                 url=url,
                 language=IDIOMAS.get(lang, lang)))

    # Ver en línea
    bloque = scrapertools.find_single_match(
        data, "<div id='videos'(.*?)</table></div></div></div>")

    matches = scrapertools.find_multiple_matches(
        bloque, "<tr id='link-[^']+'>(.*?)</tr>")
    for enlace in matches:
        # ~ logger.debug(enlace)

        url = scrapertools.find_single_match(enlace, " href='([^']+)")
        servidor = corregir_servidor(
            scrapertools.find_single_match(enlace, "domain='([^'.]+)"))
        if not url or not servidor: continue
        uploader = scrapertools.find_single_match(
            enlace, "author/[^/]+/'>([^<]+)</a>")
        tds = scrapertools.find_multiple_matches(enlace, '<td>(.*?)</td>')
        quality = scrapertools.find_single_match(
            tds[1], "<strong class='quality'>([^<]+)")
        lang = tds[2].lower()

        itemlist.append(
            Item(channel=item.channel,
                 action='play',
                 server=servidor,
                 title='',
                 url=url,
                 language=IDIOMAS.get(lang, lang),
                 quality=quality,
                 quality_num=puntuar_calidad(quality),
                 other=uploader))

    # Descarga ?
    # ~ bloque = scrapertools.find_single_match(data, "<div id='download'(.*?)</table></div></div></div>")

    return itemlist