Пример #1
0
def drawBbcCoUkNew(url):
    htmlContent = Downloader.getContentFromUrl(url=url)
    title = Decoder.extract('<p class="story-body__introduction">', "</p><div", htmlContent)
    if 'property="articleBody"' in htmlContent:
        body = Decoder.extract(
            'property="articleBody"',
            "                                                                                                </div>",
            htmlContent,
        )
        body = body.replace('<span class="off-screen">Image copyright</span>', "")
        body = body.replace('<span class="story-image-copyright">AFP</span>', "")
        body = body.replace('<span class="story-image-copyright">Reuters</span>', "")
        body = body.replace('<span class="off-screen">Image caption</span>', "")
        body = body.replace('<span class="off-screen">Media caption</span>', "")
        while '<span class="media-caption__text">' in body:
            line = Decoder.extractWithRegex('<span class="media-caption__text">', "</span>", body)
            body = body.replace(line, "")
    elif 'class="text-wrapper"' in htmlContent:
        # special content
        body = Decoder.extract('class="text-wrapper"', "</p>\n", htmlContent)
        dates = Decoder.extractWithRegex('<div class="date', "</div>", body)
        lastUpdate = Decoder.extractWithRegex('<p class="date ', "</p>", body)
        body = body.replace(dates, "")
        body = body.replace(lastUpdate, "")
    elif '<figcaption class="sp-media-asset' in htmlContent:
        body = Decoder.extract('<figcaption class="sp-media-asset', "</p><div ", htmlContent)
        if ">" in body:
            body = body[body.find(">") + 1 :]
    body = Decoder.removeHTML(body).replace(".", ".\n").replace(">", "")
    logger.debug("body is: " + body)
    drawNew(textContent=(body))
Пример #2
0
 def getChannels(page):
     x = []
     if str(page) == "0":
         page = Live9net.MAIN_URL
     html = Live9net.getContentFromUrl(page, "", Live9net.cookie, "")
     # print html
     if html.find("ESPN</") > -1:  # it's a list, needs decode
         table = Decoder.extract("ESPN</", "<div>", html)
         x = Live9net.extractElements(table)
         logger.debug("live9 channels logic done!")
     else:
         iframeUrl = Decoder.extract('src="', '"></iframe>', html)
         html2 = Live9net.getContentFromUrl(iframeUrl, "", Live9net.cookie, page)
         # print html2
         if html2.find('src="http://sawlive.tv/') > -1 or html2.find('src="http://www3.sawlive') > -1:
             if html2.find('src="http://sawlive.tv/') > -1:
                 scriptSrc = Decoder.extractWithRegex("http://sawlive", '"></script>', html2).replace(
                     '"></script>', ""
                 )
             else:
                 scriptSrc = Decoder.extractWithRegex("http://www3.sawlive", '"></script>', html2).replace(
                     '"></script>', ""
                 )
             finalRtmpUrl = Decoder.extractSawlive(scriptSrc, iframeUrl)
             element = {}
             element["link"] = finalRtmpUrl
             element["title"] = "Watch channel"
             element["permalink"] = True
             logger.debug("finished append element!")
             x.append(element)
     return x
Пример #3
0
 def getChannels(page='0'):
     x = []
     if str(page) == '0':
         page = Yomvies.CHANNELS_PAGE
         logger.debug("loading json data from: "+page)
         bruteJSON = Yomvies.getContentFromUrl(page,"",Yomvies.cookie,Yomvies.MAIN_URL)
         logger.debug("parsing string to json...")
         i = 0
         for jsonChannel in bruteJSON.split('{"CodCadenaTv":'):
             if i>0:
                 element = {}
                 codTv = Decoder.extract('"','"',jsonChannel)
                 element["title"] = Decoder.extract('"Nombre":"','"',jsonChannel)
                 element["thumbnail"] = Decoder.extract('"Logo":"','"',jsonChannel).replace("\\","")
                 m3u8Url = Decoder.extract('"PuntoReproduccion":"','"',jsonChannel).replace("{network}",Yomvies.NETWORK).replace("\\","")
                 logger.debug("Appending channel: "+element["title"]+", with url: "+m3u8Url+", img: "+element["thumbnail"])
                 headers = 'Referer='+codTv
                 element["link"] = m3u8Url+"|"+headers
                 x.append(element)
             i+=1
     else:
         link = Yomvies.extractTargetVideo(page)
         element = {}
         element["title"] = page
         element["link"] = link
         element["finalLink"] = True
         x.append(element)
     return x
Пример #4
0
    def searchLists(param):
        url = (
            "https://www.googleapis.com/customsearch/v1element?"
            "key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY"
            "&rsz=filtered_cse"
            "&num=20"
            "&hl=en"
            "&prettyPrint=false"
            "&source=gcsc"
            "&gss=.com"
            "&sig=8bdfc79787aa2b2b1ac464140255872c"
            "&cx=013305635491195529773:0ufpuq-fpt0"
        )
        url += "&q=" + param + "&sort=date&googlehost=www.google.com&callback=google.search.Search.apiary846"

        results = Pastebin.getContentFromUrl(url)
        x = []
        jsonString = Decoder.extract(',"results":', "]});", results)
        logger.debug(jsonString)
        for jsonResult in results.split('{"GsearchResultClass"'):
            element = {}
            link = Decoder.extract('"url":"', '","', jsonResult)
            if "pastebin.com" in link and "/raw/" not in link:
                link = link[: link.rfind("/")] + "/raw/" + link[link.rfind("/") + 1 :]
            title = Decoder.extract('"title":"', '","titleNoFormatting"', jsonResult)
            if "http" in link:
                logger.debug("appending result: " + title + ", url: " + link)
                element["title"] = title
                element["link"] = link
                x.append(element)
        return x
Пример #5
0
 def extractTargetVideoJSON(page):
     title = ''
     link = ''
     thumbnail = ''
     headers = Youtube.buildHeaders()
     response = Youtube.getContentFromUrl(url=str(page + "?pbj=1"), headers=headers)
     logger.debug("response is: "+response)
     try:
         responseJ = Decoder.extract('ytplayer.config = ','};',response)+"}"
         #logger.debug("json extracted is: " + responseJ)
         jsonResponse = json.loads(responseJ)
         logger.debug("json loaded")
         bruteVideoInfo = jsonResponse["args"]
         logger.debug("obtained brute video info...")
         title = bruteVideoInfo["title"]
         url = bruteVideoInfo["adaptive_fmts"]
         url = Decoder.extract('url=',",",url)
         url = urllib.unquote(url)
         #url = url[:-1]
         thumbnail = bruteVideoInfo["thumbnail_url"]
         logger.debug("extracted final url: "+url)
     except:
         logger.error("error parsing video info")
         pass
     element = {}
     element["title"] = title
     element["link"] = link
     element["thumbnail"] = thumbnail
     element["finalLink"] = True
     return element
Пример #6
0
 def extractChannel(html,referer):
     element = {}
     logger.debug('processing html...')
     if html.find('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="')>-1:
         element = Cineestrenostv.extractIframeChannel(html,referer)
     elif html.find('.php')>-1 and referer.find(".php")==-1:
         logger.debug("proccessing level 1, cookie: "+Cineestrenostv.cookie)
         iframeUrl = Decoder.extractWithRegex('http://','.php',html)
         if iframeUrl.find('"')>-1:
             iframeUrl = iframeUrl[0:iframeUrl.find('"')]
         html2 = Cineestrenostv.getContentFromUrl(iframeUrl,"",Cineestrenostv.cookie,referer)
         if html2.find('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="')>-1 or '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" allowfullscreen width="653" height="403" src="' in html2:
             element = Cineestrenostv.extractIframeChannel(html2,iframeUrl)
     elif html.find('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="500" src="')>-1:
         iframeUrl = Decoder.extract('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="500" src="','"></iframe>',html) #same case with different width and height: TODO: change to regex!!
         html2 = Cineestrenostv.getContentFromUrl(iframeUrl,"","",referer)
         if html2.find('<th scope="col"><a href="/')>-1:
             partialLink = Decoder.extract('<th scope="col"><a href="/','"><font color="ffffff">',html2)
             completeLink = Cineestrenostv.MAIN_URL+"/"+partialLink
             html3 = Cineestrenostv.getContentFromUrl(completeLink,"",Cineestrenostv.cookie,iframeUrl)
             if html3.find('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="')>-1:
                 element = Cineestrenostv.extractIframeChannel(html3,completeLink)
     elif referer.find("php")!=-1:
         referer = referer.replace("ñ","%C3%B1")
         html2 = Cineestrenostv.getContentFromUrl(referer,"",Cineestrenostv.cookie,referer)
         element = Cineestrenostv.extractIframeChannel(html2,referer)
     return element
Пример #7
0
 def extractElements(table):
     x = []
     i = 0
     for value in table.split('<a '):
         logger.debug("loop: "+str(i))
         if i>0:
             element = {}
             logger.debug("processing html: "+value)
             if value.find('<img ')==-1:
                 title = Decoder.rExtract(">",'</a></li>',value)
                 link = Decoder.extract("href=\"/",'"',value)
                 if title == '</a':
                     title = Decoder.extract('class="menu-item">','<',value).replace("&nbsp;","")
                 element["title"] = title
                 element["link"] = Skylinewebcamscom.MAIN_URL+link
                 if len(title)>0 and link.find("#")==-1 and len(element["link"])>len(Skylinewebcamscom.MAIN_URL) and (title.find("<")==-1 and title.find(">")==-1):
                     logger.debug("append: "+title+", link: "+element["link"])
                     x.append(element)
                 else:
                     logger.debug("discarted: "+title+", link: "+element["link"])
             else:
                 img = "http://"+Decoder.extract("data-original=\"//",'" ',value)
                 title = Decoder.extract("class=\"title\">",'</span>',value)
                 link = Decoder.extract("href=\"/",'"',value)
                 element["title"] = title
                 element["link"] = Skylinewebcamscom.MAIN_URL+link
                 element["thumbnail"] = img
                 element["permaLink"] = True
                 if link.find(".html")>-1 and (title.find("<")==-1 and title.find(">")==-1):
                     logger.debug("append: "+title+", link: "+element["link"]+", img: "+element["thumbnail"])
                     x.append(element)
                 else:
                     logger.debug("discarted: "+title+", link: "+element["link"]+", img: "+element["thumbnail"])
         i+=1
     return x
Пример #8
0
 def extractElementsAlbum(table):
     x = []
     i = 0
     for value in table.split(
             '<li class="topListenedBox click js-lateral-info" onclick=\'albumClick('
     ):
         if i > 0:
             element = {}
             title = Decoder.extract('<div class="topListenedBoxDiv">',
                                     '</div>',
                                     value).replace('<br>', " - ")
             link = Decoder.extract('","', '");\'>', value)
             img = Decoder.extract(' data-src="', '" ', value)
             element["title"] = title
             if link.find(".mp3") == -1:
                 element["link"] = base64.standard_b64encode(
                     Fildonet.ALBUM + link)
             else:
                 element["link"] = link
             element["thumbnail"] = img
             logger.info("append3: " + title + ", link: " +
                         element["link"] + ", thumbnail: " +
                         element["thumbnail"])
             x.append(element)
         i += 1
     return x
Пример #9
0
 def getChannels(page):
     x = []
     start = False
     if str(page) == '0':
         start = True
         page=Sports4u.MAIN_URL
     html = Sports4u.getContentFromUrl(page,"",Sports4u.cookie,"")
     #print html
     if start and 'live-channels-list">' in html: #it's a list, needs decode
         table = Decoder.extract('live-channels-list">','</li><br>',html)
         logger.debug("using menu table: "+table)
         x = Sports4u.extractElements(table)
         logger.debug("channel list logic done!")
     else:
         iframeUrl = Decoder.extract('<iframe frameborder="0" marginheight="0" marginwidth="0" height="490" ','"></iframe>',html)
         iframeUrl = Decoder.extract('src="','"',iframeUrl)
         logger.debug("iframeUrl is: "+iframeUrl)
         html2 = Sports4u.getContentFromUrl(url=iframeUrl,referer=page)
         logger.debug("html is: "+html2)
         file = Cricfreetv.seekIframeScript(html2,page,iframeUrl)
         logger.debug("Finished file logic, obtained file: "+file)
         element = {}
         element["link"] = file
         element["title"] = "Watch streaming"
         x.append(element)
     return x
Пример #10
0
 def extractFinalRtmpUrl(url,referer):
     rtmpUrl = ""
     html = Mamahdcom.getContentFromUrl(url,"",Mamahdcom.cookie,referer)
     if 'file:\'' in html:
         file = Decoder.extract("file:'",'\',',html)
         rtmp = file[0:file.rfind("/") + 1]
         playpath = file[file.rfind("/") + 1:]
         swfUrl = ""
         secureToken = "SECURET0KEN#yw%.?()@W!"
         if url.find("hdcast.org") > -1:
             swfUrl = "http://player.hdcast.org/jws/jwplayer.flash.swf"
         rtmpUrl = rtmp + " playPath=" + playpath + " swfUrl=" + swfUrl + " pageUrl=" + url + " flashver=WIN/2019,0,0,226 live=true timeout=14 token=" + secureToken
         logger.debug("built final rtmp link: " + rtmpUrl)
     elif 'allowtransparency="true" src=' in html:
             logger.debug("using second way...")
             secondIframe = Decoder.extract('allowtransparency="true" src=', ' ', html).replace("&amp;","&")
             logger.debug("found second way url: " + secondIframe+", referer: "+url)
             headers = {
                 "User-Agent": Downloader.USER_AGENT,
                 "Accept-Language": "en-US,en;q=0.8,es-ES;q=0.5,es;q=0.3",
                 "Upgrade-Insecure-Requests" : "1",
                 "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
                 "Referer": url
             }
             html2 = Mamahdcom.getContentFromUrl(url=secondIframe,headers=headers)
             logger.debug("html2 is: "+html2)
             if 'file:"' in html2:
                 rtmpUrl = Decoder.extract('file:"', '",', html2)
                 logger.debug("using m3u8 for: "+rtmpUrl)
     return rtmpUrl
Пример #11
0
 def getChannels(page):
     x = []
     logger.debug("using tvshowme...")
     if str(page) == '0':
         page=Tvshowme.MAIN_URL
         html = Tvshowme.getContentFromUrl(page,"",Tvshowme.cookie,"")
         table = Decoder.extract('<span class="yawp_wim_title">Latest 150 Posts</span> <ul>','</ul>',html)
         x = Tvshowme.extractElements(table)
     elif page=="search":
         #display keyboard, it will wait for result
         keyboard = XBMCUtils.getKeyboard()
         keyboard.doModal()
         text = ""
         if (keyboard.isConfirmed()):
             text = keyboard.getText()
             page = "http://www.tvshow.me/?s="+urllib.quote(text)
         html = Tvshowme.getContentFromUrl(url=page)
         logger.debug(html)
         table = Decoder.extract('<div id="content"', '<h3 class="assistive-text">', html)
         logger.debug("table is: "+table)
         x = Tvshowme.extractLinks2(table)
     else:
         html = Tvshowme.getContentFromUrl(page, "", Tvshowme.cookie, Tvshowme.MAIN_URL)
         logger.debug(html)
         table = Decoder.extract('<div id="content"', '</article>', html)
         x = Tvshowme.extractLinks(table)
     return x
Пример #12
0
 def getChannels(page):
     x = []
     if str(page) == '0':
         html = Mamahdcom.getContentFromUrl(Mamahdcom.MAIN_URL,"",Mamahdcom.cookie,"")
     else:
         html = Mamahdcom.getContentFromUrl(page,"",Mamahdcom.cookie,"")
     #print html
     if page=='0': #menu
         table = Decoder.extract('<div class="standard row channels">','</div>',html)
         logger.debug("table is: "+table)
         x = Mamahdcom.extractElements(table)
         logger.debug("mamahd channels logic done!")
     else:
         iframeHtml = Decoder.extract("<iframe ","</iframe>",html)
         iframeUrl = Decoder.extract('src="','"',iframeHtml)
         html2 = Mamahdcom.getContentFromUrl(url=iframeUrl,referer=page)
         logger.debug("obtained html from iframe: "+iframeUrl+"; html: "+html2)
         if 'src="http://hdcast.org' in html2:
             logger.debug("found script, launching logic...")
             scriptUrl = Decoder.extract('<script type="text/javascript" src="','"></script>',html2)
             logger.debug("extracting script url... from: "+scriptUrl)
             iframeUrl2 = Mamahdcom.extractScriptIframeUrl(html2,scriptUrl,iframeUrl)
             logger.debug("script url extracted: "+iframeUrl2)
             finalRtmpUrl = Mamahdcom.extractFinalRtmpUrl(iframeUrl2,iframeUrl)
             logger.debug("rtmp extracted is: "+finalRtmpUrl)
             element = {}
             element["link"] = finalRtmpUrl
             element["title"] = "Watch channel"
             element["permaLink"] = True
             logger.debug("finished append element!")
             x.append(element)
     return x
Пример #13
0
 def extractAllVideosFromHtml(html):
     x = []
     tableHtml = Decoder.extract('class="item-section">','<div class="branded-page-box search-pager',html)
     i=0
     for rowHtml in tableHtml.split('<div class="yt-lockup-dismissable yt-uix-tile">'):
         if i>0:
             logger.debug("row html is: "+rowHtml)
             element = {}
             link = "/watch?"+Decoder.extract('href="/watch?', '"', rowHtml)
             title = Decoder.extract('  title="','"', rowHtml)
             if 'youtube.com' not in link:
                 link = Youtube.MAIN_URL+link
             logger.debug("link: " + link + ", title is: " + title)
             image = Decoder.extractWithRegex('https://i.ytimg.com/','"',rowHtml).replace('"','')
             element["title"] = title
             element["page"] = link
             if '&amp;list=' not in link:
                 element["finalLink"] = True
             element["thumbnail"] = image
             x.append(element)
         i+=1
     #add next if pagination exists
     if '<div class="branded-page-box search-pager  spf-link ">' in html:
         bruteHtmlPaginate = Decoder.rExtract('<div class="branded-page-box search-pager  spf-link ">','<div class="branded-page-v2-secondary-col">',html)
         title = Decoder.rExtract(">","</span></a>",bruteHtmlPaginate)
         title = title[:len(title)-2]
         link = Decoder.rExtract('href="','" class="yt-uix-button', bruteHtmlPaginate)
         if 'youtube.com' not in link:
             link = Youtube.MAIN_URL + link
         element = {}
         element["title"] = title
         element["page"] = link
         logger.debug("link: " + link + ", title is: " + title)
         x.append(element)
     return x
Пример #14
0
 def getWidthAndHeightParams(html):
     subUrl = ""
     if html.find("; v_width=")>-1:
         width = Decoder.extract("; v_width=",";",html)
         height = Decoder.extract("; v_height=",";",html)
         subUrl = "&vw="+width+"&vh="+height
         logger.debug("width-height subUrl now is: "+subUrl)
     return subUrl
Пример #15
0
 def getWidthAndHeightParams(html):
     subUrl = ""
     if html.find("; width='")>-1:
         width = Decoder.extract("; width='","'",html)
         height = Decoder.extract("; height='","'",html)
         subUrl = "&width="+width+"&height="+height
     elif html.find("; v_height=")>-1:
         width = Decoder.extract("; v_width=",";",html)
         height = Decoder.extract("; v_height=",";",html)
         subUrl = "&vw="+width+"&vh="+height
     return subUrl
Пример #16
0
 def extractElements(table):
     x = []
     for fieldHtml in table.split('<li>'):
         if fieldHtml.find("<a href=")>-1:
             element = {}
             element["link"] = Decoder.extract('<a href="','"',fieldHtml)
             element["title"] = Decoder.extract('alt="','">',fieldHtml)
             element["thumbnail"] = Decoder.extract('src="','" ',fieldHtml)
             logger.debug("found title: "+element["title"]+", link: "+element["link"]+", thumbnail: "+element["thumbnail"])
             if len(element["title"])>0:
                 x.append(element)
     return x
Пример #17
0
 def extractScriptIframeUrl(html,scriptUrl,referer):
     iframeUrl = ""
     logger.debug("extracting script iframe... url: "+scriptUrl)
     scriptContent = Mamahdcom.getContentFromUrl(scriptUrl,"",Mamahdcom.cookie,referer)
     #print scriptContent
     iframeUrl = Decoder.extract('src="',"'",scriptContent)
     logger.debug("brute iframeUrl is: "+iframeUrl)
     if iframeUrl.find("?u=")>-1:
         if '<script type="text/javascript"> fid="' in html:
             id = Decoder.extract('<script type="text/javascript"> fid="','"; ',html)
         iframeUrl = iframeUrl+id+Mamahdcom.getWidthAndHeightParams(html)
     return iframeUrl
Пример #18
0
 def extractElements(table):
     x = []
     for fieldHtml in table.split('</a>'):
         logger.debug("using html: "+fieldHtml)
         element = {}
         element["link"] = Decoder.extract('href="','"',fieldHtml)
         element["title"] = Decoder.extract("<span>","</span>",fieldHtml)
         element["thumbnail"] = Decoder.extract('<img src="','"',fieldHtml)
         element["permaLink"] = True
         logger.debug("found title: "+element["title"]+", link: "+element["link"]+", thumb: "+element["thumbnail"])
         if "http" in element["link"]:
             x.append(element)
     return x
Пример #19
0
 def extractLinks2(html):
     x = []
     i=0
     for value in html.split('<h2 class="entry-title ">'):
         if i>1:
             logger.debug("partial html is: "+value)
             element = {}
             title = Decoder.extract(' title="','"', value)
             link = Decoder.extract('href="','"', value)
             element["title"] = title.replace("Permalink to ","").replace("&#8211;","-")
             element["link"] = link
             x.append(element)
         i+=1
     return x
Пример #20
0
 def extractIframeChannel(contentHtml,referer):
     logger.debug("proccessing level 2, cookie: "+Cineestrenostv.cookie)
     iframeUrl2 = "dummy url"
     if contentHtml.find('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="653" height="403" src="')>-1:
         iframeUrl2 = Decoder.extract('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="653" height="403" src="','"></iframe>',contentHtml).replace("ñ","%C3%B1") #same case with different width and height: TODO: change to regex!!
     elif contentHtml.find('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="')>-1:
         iframeUrl2 = Decoder.extract('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" width="650" height="400" src="','"></iframe>',contentHtml).replace("ñ","%C3%B1") #normal cases, standar width and height
     elif '<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" allowfullscreen width="653" height="403" src="' in contentHtml:
         iframeUrl2 = Decoder.extract('<iframe scrolling="no" marginwidth="0" marginheight="0" frameborder="0" allowfullscreen width="653" height="403" src="','"></iframe>', contentHtml).replace("ñ", "%C3%B1")  # normal cases, standar width and height
     if iframeUrl2!="dummy url":
         html3 = Cineestrenostv.getContentFromUrl(iframeUrl2,"","",referer)
         return Cineestrenostv.mainLogicExtractIframeChannel(html3,iframeUrl2)
     else:
         return Cineestrenostv.mainLogicExtractIframeChannel(contentHtml,referer)
Пример #21
0
 def extractElements(table,domain):
     x = []
     i = 0
     for value in table.split('"name"'):
         if i>0:
             element = {}
             title = Decoder.extract('"','"',value).replace('- ','')
             link = Decoder.extract('shortcut":"','"',value)
             element["title"] = title
             element["link"] = domain+"/channel/"+link+"/frame"
             logger.debug("append: "+title+", link: "+element["link"])
             x.append(element)
         i+=1
     return x
Пример #22
0
 def extractElements(table):
     x = []
     i = 0
     for value in table.split('<li>'):
         logger.debug("loop: "+str(i))
         if i>0:
             element = {}
             title = Decoder.extract(">",'</a></li>',value)
             link = Decoder.extract("href=\"",'"',value)
             element["title"] = title
             element["link"] = base64.b64encode(str(Zonasportsme.MAIN_URL+link))
             logger.debug("append: "+title+", link: "+element["link"])
             x.append(element)
         i+=1
     return x
Пример #23
0
 def extractElementsArtist(html):
     x = []
     i=0
     for value in html.split('<li class="topListenedBox click js-lateral-info " >'):
         if i>0:
             element = {}
             title = Decoder.extract('<div class="topListenedBoxDiv click js-lateral-info ">','</div>',value)
             element["title"] = title
             element["link"] = base64.standard_b64encode(Fildonet.ARTIST+title)
             if value.find('data-src="')!=-1:
                 element["thumbnail"] = Fildonet.MAIN_URL+Decoder.extract('data-src="','" ',value)
             logger.info("append1: "+title+", link: "+element["link"])
             x.append(element)
         i+=1
     return x
Пример #24
0
 def extractElements(page,tableHtml):
     x = []
     i = 0
     for value in tableHtml.split('<a '):
         if i>0:
             element = {}
             title = Decoder.extract(">","</a>",value)
             link = Decoder.extract("href=\"","\"",value)
             element["title"] = page+" ver. "+title
             element["link"] = page[0:1]+"00/"+link
             if "://" not in element["link"]:
                 element["link"] = RTVE.MAIN_URL+element["link"]
             logger.debug("append: "+element["title"]+", link: "+element["link"])
             x.append(element)
         i+=1
     return x
Пример #25
0
    def parseListSongs(songs,html):
        x = []
        #jsonSongs = json.load(songs)
        for elementHtml in html.split("<Channel "):
            element = {}
            title = Decoder.extract('name="','"',elementHtml)
            channel = Decoder.extract('<RTMP>','</RTMP>',elementHtml)
            url = Decoder.extract("<URL>","</URL>",elementHtml)
            element["title"] = title
            element["link"] = channel+" playpath="+(url.replace(' ','%20'))+" pageUrl="+R977Musiccom.MAIN_URL+" swfUrl="+R977Musiccom.REFERER
            element["finalLink"] = True
            if element["link"].find("rtmp")==0:
                logger.info("found element: "+element["title"]+", with link: "+element["link"])

                x.append(element)
        return x
Пример #26
0
 def extractElementsAlbum(table):
     x = []
     i = 0
     for value in table.split('<div class="album">'):
         if i>0:
             element = {}
             title = Decoder.extract(' alt="','"/>',value)
             link = Decoder.extract('<a href="','" ',value)
             img = Decoder.extract('src="','" ',value)
             element["title"] = title
             element["link"] = Redmp3cc.MAIN_URL+link
             element["thumbnail"] = Redmp3cc.MAIN_URL+img
             logger.info("append: "+title+", link: "+element["link"])
             x.append(element)
         i+=1
     return x
Пример #27
0
 def extractLinks(html):
     x = []
     i=0
     for value in html.split(' href='):
         if i>1:
             element = {}
             title = Decoder.extract('>','</a>', value).replace("&#8211;","-")
             link = Decoder.extract('"','"', value)
             element["title"] = title
             element["link"] = link
             element["finalLink"] = True
             if "<img" not in title and "tvshow.me" not in link:
                 logger.debug("append: " + title + ", link: " + element["link"])
                 x.append(element)
         i+=1
     return x
Пример #28
0
 def extractIframe(html,referer):
     iframeUrl = ''
     if '<iframe frameborder="0" marginheight="0" allowfullscreen="true" marginwidth="0" height="555" src="' in html:
         iframeUrl = Decoder.extract('<iframe frameborder="0" marginheight="0" allowfullscreen="true" marginwidth="0" height="555" src="','"',html)
     elif '<iframe frameborder="0" marginheight="0" marginwidth="0" height="490" src="' in html:
         iframeUrl = Decoder.extract('<iframe frameborder="0" marginheight="0" marginwidth="0" height="490" src="','"',html)
     if "'" in iframeUrl:
         iframeUrl = iframeUrl[0:iframeUrl.find("'")]
     logger.debug("level 1, iframeUrl: "+iframeUrl+", cookie: "+Cricfreetv.cookie)
     if iframeUrl!='':
         html = Cricfreetv.getContentFromUrl(iframeUrl,"",Cricfreetv.cookie,referer)
     file = Cricfreetv.seekIframeScript(html,referer,iframeUrl)
     item = {}
     item["title"] = referer
     item["link"] = file
     return item
Пример #29
0
 def getChannels(page):
     x = []
     if str(page) == '0':
         page=Acetvru.MAIN_URL
         html = Acetvru.getContentFromUrl(page,"",Acetvru.cookie,"")
         html = Decoder.extract('var playlist = [',']',html)
         x = Acetvru.extractElements(html)
     return x
Пример #30
0
    def extractElements(table):
        x = []
        for fieldHtml in table.split(' href="'):
            if fieldHtml.find("<li>")>-1 or fieldHtml.find(' <img src="')>-1:
                element = {}
                element["link"] = Zoptvcom.MAIN_URL+fieldHtml[0:fieldHtml.find('"')]
                if fieldHtml.find(' <img src="')>-1:
                    element["title"] = Decoder.extract("<span>","</span>",fieldHtml)
                    element["thumbnail"] = Zoptvcom.MAIN_URL+Decoder.extract('<img src="','"> <span>',fieldHtml)
                    logger.debug("found thumbnail: "+element["thumbnail"])
                else:
                    element["title"] = fieldHtml[fieldHtml.find('">')+2:].replace("<li>","").replace("</li>","").replace("</a>","").replace("<a","").rstrip(os.linesep)
                logger.debug("found title: "+element["title"]+", link: "+element["link"])
                if len(element["title"])>0:
                    x.append(element)

        return x
Пример #31
0
 def getChannels(page):
     x = []
     if page == '0':
         page = RedeneobuxCom.LIST_PAGE
         results = RedeneobuxCom.getContentFromUrl(page)
         i=0
         for result in results.split('<div class="media">'):
             if i>0:
                 element = {}
                 img = Decoder.extract('<img src=\'',"'",result)
                 link = Decoder.extract('location.href=\'', "'", result)
                 title = Decoder.extract('\' alt=\'', "'", result)
                 if "http" in link:
                     logger.debug("appending result: "+title+", url: "+link)
                     element["title"] = title
                     element["link"] = link
                     element["thumbnail"] = img
                     x.append(element)
             i+=1
     else:
         content = RedeneobuxCom.getContentFromUrl(url=page,referer=RedeneobuxCom.LIST_PAGE)
         logger.debug("list content is: " + content)
         url = Decoder.extractWithRegex('http'," ",content).replace(" ","")
         logger.debug("url is: " + url)
         if 'adf' in url:
             listUrl = Decoder.decodeAdfly(url)
             logger.debug("list obtained is: "+listUrl)
             m3uContent = Downloader.getSimpleDownload(listUrl) #simple urllib2 download
             logger.debug("content: "+m3uContent)
             i=0
             for lineContent in m3uContent.split('#EXTINF:'):
                 if i>0:
                     title = Decoder.extract(',','\n',lineContent)
                     lineContent = lineContent[lineContent.find("\n"):]
                     urlContent = Decoder.extractWithRegex('http://',"\n",lineContent).replace('\n','')
                     element = {}
                     element["title"] = title
                     element["link"] = urlContent#+"|"+Downloader.getHeaders(listUrl)
                     element["thumbnail"] = ''
                     element["finalLink"] = True
                     if "://" in urlContent:
                         logger.debug("added: " + title + ", content: " + urlContent)
                         x.append(element)
                 i+=1
     return x
Пример #32
0
 def extractElements(table):
     x = []
     element = {}
     element["title"] = "Search"
     element["link"] = "search"
     x.append(element)
     i=0
     for value in table.split('<li>'):
         if i>0:
             element = {}
             title = Decoder.extract('/">','</a>',value).replace("&#8211;","-")
             link = Decoder.extract('<a href="','"',value)
             element["title"] = title
             element["link"] = link
             logger.debug("append: "+title+", link: "+element["link"])
             x.append(element)
         i+=1
     return x
Пример #33
0
 def extractMainChannels(html):
     x = []
     i = 0
     for value in html.split('guide-item yt-uix-sessionlink yt-valign spf-link'):
         if i>0 and value.find("href=\"")>-1 and value.find('title="')>-1:
             element = {}
             title = Decoder.extract('title="','"',value)
             link = Youtube.MAIN_URL+Decoder.extract('href="','"',value)
             element["title"] = title
             element["page"] = link
             if value.find('<img src="')>-1:
                 element["thumbnail"] = Decoder.extract('<img src="','"',value)
                 logger.debug("thumbnail: "+element["thumbnail"])
             logger.debug("append: "+title+", link: "+element["page"])
             if "Home" not in title and "Movies" not in title:
                 x.append(element)
         i+=1
     return x
Пример #34
0
 def extractElementsArtist(html):
     x = []
     i = 0
     for value in html.split(
             '<li class="topListenedBox click js-lateral-info " >'):
         if i > 0:
             element = {}
             title = Decoder.extract(
                 '<div class="topListenedBoxDiv click js-lateral-info ">',
                 '</div>', value)
             element["title"] = title
             element["link"] = base64.standard_b64encode(Fildonet.ARTIST +
                                                         title)
             if value.find('data-src="') != -1:
                 element["thumbnail"] = Fildonet.MAIN_URL + Decoder.extract(
                     'data-src="', '" ', value)
             logger.info("append1: " + title + ", link: " + element["link"])
             x.append(element)
         i += 1
     return x
Пример #35
0
 def getChannels(page, cookie=''):
     x = []
     print page
     if str(page) == '0':
         page = Radionet.MAIN_URL
         stations_lists = Radionet.getContentFromUrl(
             "http://www.radio.net/stations/", "", Radionet.cookie,
             Radionet.REFERER)
         x = Radionet.parseListStations(stations_lists)
     else:
         html = Radionet.getContentFromUrl(page, "", cookie,
                                           Radionet.REFERER)
         element = {}
         element["title"] = Decoder.extract('"seoTitle":"', '",', html)
         element["link"] = Decoder.extract('"streamUrl":"', '",', html)
         element["thumbnail"] = Decoder.extract('"logo100x100":"', '",',
                                                html)
         element["finalLink"] = True
         x.append(element)
     return x
Пример #36
0
def getCatalogSearchMenu(params=[]):
    text = ""
    page = 1
    if type(params) is list:
        logger.debug("text")
        for element in params:
            logger.debug("text %s" % str(element))
            if "text" in element:
                text = element["text"]
            if "page" in element:
                page = element["page"]
    url = 'https://www.lexaloffle.com/bbs/lister.php?use_hurl=1&cat=7&sub=2&page=%s&sub=2&mode=carts&orderby=featured&search=%s' % (
        page, text)
    http = urllib3.PoolManager()
    logger.debug("lexaoffle url is: %s" % url)
    r = http.request('GET', url, preload_content=False)
    html = r.data.decode()
    bruteCards = Decoder.extract('pdat=[', '];', html)
    cards = []
    for bruteCardLine in bruteCards.split(",``],"):
        logger.debug("line is %s" % bruteCardLine)
        if '[' in bruteCardLine:
            element = {}
            bruteCardLine = bruteCardLine[:bruteCardLine.find('],')]
            name = Decoder.extract(', `', '`,', bruteCardLine)
            id = Decoder.extract("['", "'", bruteCardLine)
            link = 'https://www.lexaloffle.com/bbs/get_cart.php?cat=7&play_src=0&lid=%s' % id
            out = re.sub('[^A-Za-z0-9.\-,\ ]+', '', name) + ".p8.png"
            out = ROMS_PATH + "/pico8/" + out
            command = 'curl "%s" --create-dirs --output "%s" \n' % (link, out)
            element["external"] = command
            element["action"] = 'command-exit'
            element["title"] = name
            cards.append(element)
    #back
    element = {}
    element["title"] = "Back"
    element["action"] = 'function'
    element["external"] = 'getCatalogMenu'
    cards.append(element)
    return cards
Пример #37
0
 def extractElementsPlayer(html):
     x = []
     i = 0
     for value in html.split("<li class='hotSongsItem' songid="):
         if i > 0:
             element = {}
             title = Decoder.extract("songname='", "'",
                                     value) + " - " + Decoder.extract(
                                         "songartist='", "'", value)
             link = Decoder.extract("songmp3='", "'", value)
             element["title"] = title
             if link.find(".mp3") == -1:
                 element["link"] = base64.standard_b64encode(
                     Fildonet.MAIN_URL + link)
             else:
                 element["link"] = link
             element["thumbnail"] = Fildonet.PLAY
             logger.info("append2: " + title + ", link: " + element["link"])
             x.append(element)
         i += 1
     return x
Пример #38
0
    def extractElements(table,html=""):
        x = []
        splitter = 'data-playlist-id="'
        splitter2 = 'Flash Player">'
        splitter3 = "new_player_block.nss_load_playlist('"
        for fieldHtml in table.split('<li '):
            if fieldHtml.find(splitter)>-1:
                element = {}
                playlistId = Decoder.extract(splitter,'" >',fieldHtml)
                title = Decoder.extract(splitter2,'</a>',fieldHtml).strip()
                url = Decoder.extract(splitter3,"' + jQuery(",fieldHtml).replace("&amp;","&")
                rel = Decoder.rExtract('" rel="','">'+title+"</a>",html)
                url = R977Musiccom.MAIN_URL+url+rel+"&userId=0"
                element["title"] = title
                element["link"] = base64.standard_b64encode(url)
                logger.info("found title: "+element["title"]+", link: "+element["link"])
                if len(element["title"])>0:
                    #TODO: now we tries to extract the content from json 'html'
                    x.append(element)

        return x
Пример #39
0
 def parseListStations(html):
     x = []
     html = Decoder.extract('<div class="col-sm-7 col-md-8 station-list">',
                            'aside class="col-sm-5 col-md-4">', html)
     for elementHtml in html.split(
             '<a class="stationinfo-info-toggle" href="" ng-click="toggle()"></a>'
     ):
         element = {}
         link = Decoder.extract('<a href="//',
                                '" class="stationinfo-link">', elementHtml)
         title = Decoder.extract('<strong>', '</strong>', elementHtml)
         img = Decoder.extract('<img src="', '"', elementHtml)
         element["title"] = title
         element["link"] = link
         element["thumbnail"] = img
         if element["link"].find("http") != 0:
             element["link"] = "http://" + element["link"]
         if element["title"].find('</div>') == -1:
             logger.info("found element: " + element["title"] +
                         ", with link: " + element["link"])
             x.append(element)
     return x
Пример #40
0
 def getChannels(page,cookie=''):
     x = []
     referer = R977Musiccom.REFERER
     #print page
     if str(page) == '0':
         page=R977Musiccom.MAIN_URL
         html = R977Musiccom.getContentFromUrl(page,"",R977Musiccom.cookie,referer)
         stations_lists = R977Musiccom.getContentFromUrl("http://www.977music.com/ajax/stations_list_xml.php","",R977Musiccom.cookie,referer)
         #print "station list: "+stations_lists
     else:
         page = base64.standard_b64decode(page)
         logger.info("launching petition from page: "+page)
         #extract id
         id = Decoder.extract("?id=","&",page)
         songId = Decoder.extract("song_id=","&",page)
         if cookie=='':
             logger.info("launching to get an virgin cookie...")
             R977Musiccom.getContentFromUrl(R977Musiccom.MAIN_URL,"","","") #get a right cookie
             cookie = R977Musiccom.cookie
         else:
             logger.info("Detected a cookie: "+cookie)
         #logger.info("using always the same cookie: "+cookie)
         logger.info("simulating get referer (swf)... cookie: "+cookie)
         flashPlayer = R977Musiccom.getContentFromUrl(referer,"",cookie,R977Musiccom.MAIN_URL)
         logger.info("obtaining songs...")
         songs = R977Musiccom.getContentFromUrl("http://www.977music.com/ajax/rest/playlist.php/"+id+"/","",cookie,R977Musiccom.MAIN_URL,True)
         #print "songs: "+str(songs)
         logger.info("now is the vital petition, launching... ")
         html = R977Musiccom.getContentFromUrl(page,'',cookie,referer)
         x = R977Musiccom.parseListSongs(songs,html)
     #print "html: "+html
     if html.find('<div class="list_box">')>-1: #it's a list, needs decode
         table = Decoder.extract('<div class="list_box">','</ul>',html)
         x = R977Musiccom.extractElements(table,html)
         #print "done!"
     return x
Пример #41
0
 def extractElementsPlayer(html):
     x = []
     i = 0
     for value in html.split('<div class="player"'):
         if i>0:
             element = {}
             title = Decoder.extract('data-title="','">',value)
             link = Decoder.extract('data-mp3url="','" ',value)
             element["title"] = title
             element["link"] = Redmp3cc.MAIN_URL+link
             if value.find('<img src="')!=-1:
                 element["thumbnail"] = Redmp3cc.MAIN_URL+Decoder.extract('<img src="','" ',value)
             logger.info("append: "+title+", link: "+element["link"])
             x.append(element)
         i+=1
     if len(x)>0 and html.find(' class="button">Next page')!=-1:
         nextLink = "search.html/"+Decoder.rExtract('/mp3-','" class="button">Next page',html)
         nextText = "Next page"
         element = {}
         element["link"] = nextLink
         element["title"] = nextText
         logger.info("append next search with link: "+nextLink)
         x.append(element)
     return x
Пример #42
0
 def search(text):
     start = 'window["ytInitialData"] = '
     end = ";"
     page = Youtube.SEARCH_URL % urllib.parse.quote_plus(text)
     logger.debug("search url is: %s" % page)
     html = Youtube.getContentFromUrl(page)
     logger.debug("now extracting json search results...")
     jsonString = Decoder.extract(start,end,html)
     logger.debug("done, loading json...")
     jsonList = json.loads(jsonString)
     with open('/tmp/data.json', 'w') as outfile:
         json.dump(jsonList, outfile)
     logger.debug("parsed and saved!")
     listed = Youtube.extractVideosSearchFromJSON(jsonList)
     logger.debug("json to lists done")
     return listed
Пример #43
0
 def getChannels(page,cookie='',referer=''):
     x = []
     html = ""
     if str(page) == '0':
         x = Redmp3cc.getMainSections()
     elif str(page) == 'songs.html':
         page=Redmp3cc.MAIN_URL+"/"
         html = Downloader.getContentFromUrl(page,"",cookie,"")
         x = Redmp3cc.extractElementsPlayer(html)
     elif str(page).find('search.html')!=-1:
         if str(page).find('search.html/')==-1:
             keyboard = xbmc.Keyboard("")
             keyboard.doModal()
             text = ""
             if (keyboard.isConfirmed()):
                 text = keyboard.getText()
                 x = Redmp3cc.search(text)
         else:
             text = Decoder.rExtract('search.html/','/',page)
             page = int(page[page.rfind('/')+1:])
             x = Redmp3cc.search(text,page)
     elif str(page).find(".html")!=-1:
         if str(page) == 'albums.html'!=-1:
             page = Redmp3cc.MAIN_URL
             html = Downloader.getContentFromUrl(page,"",cookie,"")
             x = Redmp3cc.extractElementsAlbum(html)
         else:
             html = Downloader.getContentFromUrl(page,"",cookie,"")
             x = Redmp3cc.extractElementsPlayer(html)
     else:
         logger.info("page is: "+page)
         response = Redmp3cc.getContentFromUrl(page,"",cookie,Redmp3cc.MAIN_URL,True)
         #logger.info("will be used a mp3 url: "+Decoder.extract('<a href="','">here',response))
         host = response[response.find("://")+len("://"):]
         if host.find("/")>-1:
             host = host[0:host.find("/")]
         cookie = Redmp3cc.cookie
         referer = page
         logger.info("cookie is: "+cookie+", referer is: "+referer)
         headers = downloadtools.buildMusicDownloadHeaders(host,cookie,referer)
         filename= Decoder.extract('filename=','&',response)
         #ROOT_DIR = xbmcaddon.Addon(id='org.harddevelop.kodi.juke').getAddonInfo('path')
         ROOT_DIR = xbmc.translatePath('special://temp/')
         logger.info("using special root folder: "+ROOT_DIR)
         downloadtools.downloadfile(response,ROOT_DIR+"/"+filename,headers,False,True)
         x.append(Redmp3cc.buildDownloadedFile(xbmc.makeLegalFilename(ROOT_DIR+"/"+filename)))
     return x
Пример #44
0
def getCatalogMenu(params=[]):
    page = 1
    if type(params) is list:
        logger.debug("page")
        for element in params:
            logger.debug("page %s" % str(element))
            if "page" in element:
                page = element["page"]
    #/bbs/cposts/sc/
    http = urllib3.PoolManager()
    url = "https://www.lexaloffle.com/bbs/lister.php?use_hurl=1&cat=7&sub=2&page=%s&mode=carts&orderby=featured" % page
    logger.debug("lexaoffle url is: %s" % url)
    r = http.request('GET', url, preload_content=False)
    html = r.data.decode()
    bruteCards = Decoder.extract('pdat=[', '];', html)
    cards = []
    if page > 1:
        element = {}
        element["title"] = "next p. %s" % (int(page) - 1)
        element["action"] = "function"
        element["external"] = 'getCatalogMenu'
        element["params"] = [{'page': int(page) - 1}]
        cards.append(element)
    for bruteCardLine in bruteCards.split(",``],"):
        logger.debug("line is %s" % bruteCardLine)
        if '[' in bruteCardLine:
            element = {}
            bruteCardLine = bruteCardLine[:bruteCardLine.find('],')]
            name = Decoder.extract(', `', '`,', bruteCardLine)
            id = Decoder.extract("['", "'", bruteCardLine)
            link = 'https://www.lexaloffle.com/bbs/get_cart.php?cat=7&play_src=0&lid=%s' % id
            out = re.sub('[^A-Za-z0-9.\-,\ ]+', '', name) + ".p8.png"
            out = ROMS_PATH + "/pico8/" + out
            command = 'curl "%s" --create-dirs --output "%s" \n' % (link, out)
            element["external"] = command
            element["action"] = 'command-exit'
            element["title"] = name
            cards.append(element)
    #next page
    if len(cards) > 0:
        element = {}
        element["title"] = "next p. %s" % (int(page) + 1)
        element["action"] = "function"
        element["external"] = 'getCatalogMenu'
        element["params"] = [{'page': int(page) + 1}]
        cards.append(element)
    element = {}
    element["title"] = "Search"
    element["action"] = 'function-text'
    element["external"] = 'getCatalogSearchMenu'
    element["params"] = [{'webpage': link, 'final': False}]
    #TODO append getCatalogSearchMenu search
    cards.append(element)
    #back
    element = {}
    element["title"] = "Back"
    if int(page) != 1:
        element["action"] = 'function'
        element["external"] = 'getCatalogMenu'
    else:
        element["action"] = 'menu'
        element["external"] = 'webpages'
    cards.append(element)
    return cards