Exemple #1
0
def descargas(args):
   
    url = args.get('url_', None)
    html = tools.getUrl(url[0])
    pagedata = BeautifulSoup(html, "html.parser")
    ldesc = pagedata.find_all("article")
    for link in ldesc:
        try:
            ldiv=link.findAll("div")
            href = ldiv[0].find("a").get("href")
            title = str(link.find("h3").text.encode("utf-8", "ignore")).translate(None, '\t\n')
#             title = ""
            thumbnail = ldiv[0].find("img").get("src").encode("utf-8", "ignore")
            action=canalAction+"descargaspage"
            url = tools.build_url({'action':action,'url_':href})
            tools.addItemMenu(label = title,thumbnail= thumbnail, url= url,IsPlayable = 'false', isFolder= True)     
        except Exception as e:
            logger.debug(str(e))      
            
    nextpage = pagedata.find_all("a", class_ = "next page-numbers")
    try:
        href = nextpage[0].get("href")
        title =config.get_localized_string(30012)
        thumbnail = ''
        action=canalAction+"descargas"
        url = tools.build_url({'action':action,'url_':href})
        tools.addItemMenu(label = title,thumbnail= thumbnail, url= url,IsPlayable = 'false', isFolder= True)     
    except Exception as e:
        logger.debug(str(e))      
    logger.info("")
Exemple #2
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info()
    itemlist = []

    header = {}
    if "|" in page_url:
        page_url, referer = page_url.split("|", 1)
        header = {'Referer': referer}

    data = tools.getUrl(page_url)

    subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')

    try:
        code = scrapertools.find_single_match(data, '<p style="" id="[^"]+">(.*?)</p>' )
        _0x59ce16 = eval(scrapertools.find_single_match(data, '_0x59ce16=([^;]+)').replace('parseInt', 'int'))
        _1x4bfb36 = eval(scrapertools.find_single_match(data, '_1x4bfb36=([^;]+)').replace('parseInt', 'int'))
        parseInt  = eval(scrapertools.find_single_match(data, '_0x30725e,(\(parseInt.*?)\),').replace('parseInt', 'int'))
        url = decode(code, parseInt, _0x59ce16, _1x4bfb36)
        url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get('location')
        extension = scrapertools.find_single_match(url, '(\..{,3})\?')
        itemlist.append([extension, url, 0,subtitle])

    except Exception:
        logger.info()
        if config.get_setting('api', __file__):
            url = get_link_api(page_url)
            extension = scrapertools.find_single_match(url, '(\..{,3})\?')
            if url:
                itemlist.append([extension, url, 0,subtitle])
    logger.debug(itemlist)

    return itemlist
Exemple #3
0
def tvshow_tmdb(idShow):
    urltvshow = "https://api.themoviedb.org/3/tv/" + str(
        idShow) + "?api_key=" + key_tmdb + "&language=es-ES"
    logger.debug(urltvshow)
    datatvshow = tools.getUrl(urltvshow)
    dict_datatvshow = jsontools.load(datatvshow)
    return dict_datatvshow
Exemple #4
0
def menu():
    #     label='Latelete.tv'
    #     thumbnail=''
    #     url=''
    #     tools.addItemMenu(label = label,thumbnail= thumbnail, url= url,IsPlayable = 'false', isFolder= True)
    #     label='Configurar'
    #     thumbnail=''
    #     url=''
    #     tools.addItemMenu(label = label,thumbnail= thumbnail, url= url,IsPlayable = 'false', isFolder= True)
    domain = "http://latelete.tv/"
    html = tools.getUrl(domain)
    # <center>(.*?)</center>
    pattern = '<center>(.*?)</center>'
    canalesTabla = tools.findall(pattern, html, re.DOTALL)[0]
    logger.debug(canalesTabla)
    pattern = '<td>(.*?)</td>'
    canallista = tools.findall(pattern, canalesTabla, re.DOTALL)
    for canal in canallista:
        try:
            pattern = '<a href.*title="(.*?)"><.*></a>'
            label = tools.findall(pattern, canal, re.DOTALL)[0]
            pattern = '<img src="(.*?)" height'
            thumbnail = domain + tools.findall(pattern, canal, re.DOTALL)[0]
            pattern = '<a href="(.*?)" title=".*</a>'
            url_ = tools.findall(pattern, canal, re.DOTALL)[0]
            url = tools.build_url({'action': 'lateletetvAction', 'url_': url_})
            tools.addItemMenu(label=label,
                              thumbnail=thumbnail,
                              url=url,
                              IsPlayable='false',
                              isFolder=True)
        except Exception as e:
            logger.debug(str(e))
Exemple #5
0
def mainlistotro(args):
    html = tools.getUrl(host)
    logger.debug(html)
    pagedata = BeautifulSoup(html, "html.parser")
    table = pagedata.find_all("table", class_ = "wdn_responsive_table flush-left")
    tabledata = BeautifulSoup(table[0].__str__(), "html.parser")
    
    links=tabledata.find_all("td")
    for link in links:
        href = link.find("a").get("href")
        title = link.find("a").get("href")
        thumbnail = link.find("img").get("src")
        url = tools.build_url({'action':'eventoshq','url_':href})
        tools.addItemMenu(label = title,thumbnail= thumbnail, url= url,IsPlayable = 'false', isFolder= True)
Exemple #6
0
def f12018(item):
    logger.info("")
    itemlist = []
    data = tools.getUrl(item.url)
    patron = '<tr>(.*?)</tr>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for linea in matches:
        patron2 = '<td.*;">.*<span.*>(.*?)</span>.*</td>.*<td.*>.*<a href="(.*)">.*<span.*>(.*?)</span>.*</td>.*<td.*>.*<a href="(.*?)">.*</td>'
        matches2 = re.compile(patron2, re.DOTALL).findall(linea)
        if len(matches2)>0:
            logger.info()       
            itemlist.append(Item(channel=item.channel, title=matches2[0][0]+" "+matches2[0][2], action="findvideos", url=matches2[0][1], thumbnail='', fanart=''))
            itemlist.append(Item(channel=item.channel, title=matches2[0][0], action="findvideos", url=matches2[0][3], thumbnail='', fanart=''))

    return itemlist
Exemple #7
0
def envivo(args):
    logger.info("")
    url = args.get('url_', None)
    html = tools.getUrl(url[0])
    pagedata = BeautifulSoup(html, "html.parser")
    lcat = pagedata.find_all("td")
    for link in lcat:
        try:
            if "t535230_row_0" in link.get("id"):
                href = link.find("a").get("href")
                title = link.find("a").get("href")
                thumbnail = link.find("img").get("src")
                action=canalAction+"envivov2"
                url = tools.build_url({'action':action,'url_':href})
                tools.addItemMenu(label = title,thumbnail= thumbnail, url= url,IsPlayable = 'false', isFolder= True)     
        except Exception as e:
            logger.debug(str(e))       
    logger.info("")
Exemple #8
0
def todas(item):
    logger.info()
    itemlist = []
    data = tools.getUrl(item.url)
    patron = '<div id="video.*".*><div.*thumb.*<a href="(.*)"><.*data-src="(.*)" data-idcdn.*<p>.*title="(.*)".*</p></div>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        url = scrapedurl
        title = scrapedtitle.decode('utf-8')
        thumbnail = scrapedthumbnail
        fanart = ''
        itemlist.append(
            Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=fanart))

    # Paginacion
    title = ''
    siguiente = scrapertools.find_single_match(data,
                                               '<a rel="nofollow" class="next page-numbers" href="([^"]+)">Siguiente &raquo;<\/a><\/div>')
    title = 'Pagina Siguiente >>> '
    fanart = ''
    itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart))
    return itemlist
Exemple #9
0
def descargaspage(args):
   
    url = args.get('url_', None)
    html = tools.getUrl(url[0])
#     html = tools.getUrl("http://descargas.eventoshq.me/2018/04/08/descargar-formula-1-gp-bahrein-2018-carrera-espanol/")
    pagedata = BeautifulSoup(html, "html.parser")
    ldesc = pagedata.find_all("iframe")
    for link in ldesc:
        try:
            href = link.get("src")
            patron = 'http.*\/(.*)'
            matches = re.compile(patron, re.DOTALL).findall(href)
            texto=matches[0]
            logger.debug(texto)
            title = config.get_localized_string(30013) +" "+texto
#             title = ""
            thumbnail = ""
            action=canalAction+"descargasplay"
            url = tools.build_url({'action':action,'url_':href})
            tools.addItemMenu(label = title,thumbnail= thumbnail, url= url,IsPlayable = 'true', isFolder= False)     
        except Exception as e:
            logger.debug(str(e))      
            
    logger.info("")
Exemple #10
0
def getvideo(url_):
    logger.debug(url_)
    domain = url_
    html = tools.getUrl(domain)
    #     logger.debug(html)
    # <center>(.*?)</center>
    #     pattern = '<iframe.*src="(.*?)"></iframe>'
    #     videoframe = tools.findall(pattern, html )[0]
    #     html = tools.getUrl(videoframe)
    #     pattern = '<iframe.*src="(.*?)"></iframe>'
    #     videoframe = tools.findall(pattern, html )[0]
    #     logger.debug(videoframe)
    label = 'prueba'
    thumbnail = ''
    url = 'rtmp://31.220.0.187/privatestream/ playpath=partidos965?keys=WVTsGhsO-0Apepn4-vkzWg&keyt=1448255210'
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()
    playlist.add(url, xlistitem)
    xbmc_player.play(playlist, xlistitem)
    tools.addItemMenu(label=label,
                      thumbnail=thumbnail,
                      url=url,
                      IsPlayable='true',
                      isFolder=False)
Exemple #11
0
def search_themoviedb(ShowName):
    scrapedurl = "https://api.themoviedb.org/3/search/tv?api_key=" + key_tmdb + "&language=es-ES&query=" + ShowName.encode(
        "utf-8", "ignore").replace(" ", "+")
    data = tools.getUrl(scrapedurl)
    dict_data = jsontools.load(data)
    return dict_data