Пример #1
0
def showing(url):
	xml = ''
	string = url.split()
	TheXml,TheCode = string[0],string[1]
	TheCode = TheCode.replace("get/","")
	TheCode = base64.b64decode(TheCode)
	input = ''
	keyboard = xbmc.Keyboard(input, '[COLOR red]So Your Wanting The Naughty Bits Are You ?? Get The Tissues At The Ready[/COLOR]')
	keyboard.doModal()
	if keyboard.isConfirmed():
		input = keyboard.getText()
	if input == TheCode: 
		listhtml = getHtml(TheXml)
		match = re.compile(
				'([^"]+)', 
				re.IGNORECASE | re.DOTALL).findall(listhtml)
		for xmlContent in match:
			xml += xmlContent
	else:
		xml += "<dir>"\
			   "<title>[COLOR yellow]Wrong Answer, Are you sure your old enough ??[/COLOR]</title>"\
			   "<thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\
			   "</dir>"
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #2
0
def get_network(url):
    xml = ""
    last = url.split("/")[-2]
    num = url.split("/")[-1]
    html = "https://www.tvmaze.com/shows?Show%5Bnetwork_id%5D="+last+"&page="+num
    html2= requests.get(html).content
    match = re.compile('<div class="card primary grid-x">.+?<a href="(.+?)".+?<img src="(.+?)".+?<a href=".+?">(.+?)</a>',re.DOTALL).findall(html2)
    for link, image, name in match:
        link = link.split("/")[-2]
        thumb = "http:"+image
        xml += "<dir>"\
               "<title>%s</title>"\
               "<thumbnail>%s</thumbnail>"\
               "<tvmaze>show/%s/%s</tvmaze>"\
               "</dir>" % (name, thumb, name, link)
    try:
        match2 = re.compile('<ul class="pagination">.+?<li class="current"><a href="(.+?)"',re.DOTALL).findall(html2)[0]
        page = match2.split(";")[-1]
        page = page.replace("page=","")
        page = int(page)
        next_page = page+1
        xml += "<dir>"\
               "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
               "<tvmaze>network/%s/%s</tvmaze>"\
               "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\
               "</dir>" % (last, next_page)
    except:
        pass                           
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type()) 
Пример #3
0
def imdbactors(url):
	xml = ""
	url = url.replace("http://www.imdb.com","").replace("actors","list").replace("actor","")
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<img alt=".+?"\nheight="209"\nsrc="(.+?)"\nwidth="140" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n.+?<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n<a href="/name/(.+?)"\n>(.+?)\n</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, name in match:
		thumbnail = thumbnail.replace("@._V1_UY209_CR10,0,140,209_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg")
		thumbnail = thumbnail.replace("._V1_UY209_CR5,0,140,209_AL_.jpg","._V1_UX520_CR0,0,520,700_AL_.jpg")
		xml += "<dir>"\
			   "<title>%s</title>"\
			   "<imdburl>name/%s</imdburl>"\
			   "<thumbnail>%s</thumbnail>"\
			   "</dir>" % (name, imdb ,thumbnail)
	next_page = re.compile(
				'<a class="flat-button lister-page-next next-page" href="(.+?)">\n.+?Next\n.+?</a>', 
				re.IGNORECASE | re.DOTALL).findall(listhtml)
	for url in next_page:
		try:
			xml += "<dir>"\
				   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
				   "<imdburl>actor%s</imdburl>"\
				   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
				   "</dir>" % (url)
		except:
			pass
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #4
0
def imdbactorspage(url):
	xml = ""
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<div class="film.+?" id="act.+?">\n<span class="year_column">\n&nbsp;(.+?)\n</span>\n<b><a href="/title/(.+?)/.+?ref_=.+?"\n>(.+?)</a></b>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for year, imdb, title in match:
		name = title + " (" + year + ")"
		xml += "<item>"\
				"<title>%s</title>"\
				"<meta>"\
				"<content>movie</content>"\
				"<imdb>%s</imdb>"\
				"<title>%s</title>"\
				"<year>%s</year>"\
				"</meta>"\
				"<link>"\
				"<sublink>search</sublink>"\
				"<sublink>searchsd</sublink>"\
				"</link>"\
				"<thumbnail></thumbnail>"\
				"<fanart></fanart>"\
				"</item>" % (name, imdb, title, year)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())	
Пример #5
0
def open_table():
    xml = ""
    at = Airtable('appJ1nGNe5G1za9fg', 'NHL', api_key='keyikW1exArRfNAWj')
    match = at.get_all(maxRecords=700, view='Grid view') 
    for field in match:
        try:
            res = field['fields']   
            name = res['Name']
            name = remove_non_ascii(name)
            thumbnail = res['thumbnail']
            fanart = res['fanart']
            link1 = res['link1']
            link2 = res['link2']
            link3 = res['link3']
            time = res['Time']
            dsp = time + "  -  " + name                                     
            xml += "<item>"\
                   "<title>[COLOR darkmagenta]%s[/COLOR]</title>"\
                   "<thumbnail>%s</thumbnail>"\
                   "<fanart>%s</fanart>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "<sublink>%s</sublink>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "</item>" % (dsp,thumbnail,fanart,link1,link2,link3)
        except:
            pass                                                                     
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())    
Пример #6
0
def imdbcharttv(url):
	xml = ""
	url = url.replace("charttv/","chart/")
	url = 'http://www.imdb.com/' + url
	listhtml = getHtml(url)
	match = re.compile(
			'<a href="/title/(.+?)/.+?pf_rd_m=.+?pf_rd_i=.+?&ref_=.+?"\n> <img src="(.+?)" width=".+?" height=".+?"/>\n</a>.+?</td>\n.+?<td class="titleColumn">\n.+?\n.+?<a href=".+?"\ntitle=".+?" >(.+?)</a>\n.+?<span class="secondaryInfo">(.+?)</span>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for imdb, thumbnail, title, year in match:
		tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
		tmdbhtml = requests.get(tmdb_url).content
		Poster_path = re.compile(
					'"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', 
					re.DOTALL).findall(tmdbhtml)
		for poster_path, backdrop_path in Poster_path:
			name = title + " " + year
			year = year.replace("(","").replace(")","")
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<meta>"\
				   "<content>tvshow</content>"\
				   "<imdb>%s</imdb>"\
				   "<imdburl>season/%s</imdburl>"\
				   "<tvdb></tvdb>"\
				   "<tvshowtitle>%s</tvshowtitle>"\
				   "<year>%s</year>"\
				   "</meta>"\
				   "<link></link>"\
				   "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
					"<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
				   "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #7
0
def imdbNextPage(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight=".+?"\nsrc=".+?"\nwidth=".+?" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>',
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
		name = title + " " + year
		year = year.replace("(","").replace(")","")
		thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg")
		xml += "<item>"\
				"<title>%s</title>"\
				"<meta>"\
				"<content>movie</content>"\
				"<imdb>%s</imdb>"\
				"<title>%s</title>"\
				"<year>%s</year>"\
				"</meta>"\
				"<link>"\
				"<sublink>search</sublink>"\
				"<sublink>searchsd</sublink>"\
				"</link>"\
				"<thumbnail>%s</thumbnail>"\
				"<fanart></fanart>"\
				"</item>" % (name, imdb, title, year, thumbnail)
	next_page = re.compile(
				'<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />', 
				re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
	xml += "<dir>"\
		   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
		   "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
		   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
		   "</dir>" % (next_page)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #8
0
def get_MRUPlayMedia(url):
    xml = ""
    url = url.replace('mru_play/', '')
    try:
        import cookielib, urllib2
        cookieJar = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
        conn = urllib2.Request(url)
        connection = opener.open(conn)
        f = connection.read()
        connection.close()
        js = json.loads(f)
        for cookie in cookieJar:
            token = cookie.value
        js = js['videos']
        for el in js:
            link = 'http:'+el['url']+'|Cookie=video_key='+token
            xml += "<item>"\
                   "    <title>%s</title>"\
                   "    <link>%s</link>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "</item>" % (el['key'],link,addon_icon)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #9
0
def imdbseason(url):
	xml = ""
	url = url.replace("season/","")
	imdb = url
	url = 'http://www.imdb.com/title/' + imdb
	listhtml = getHtml(url)
	match = re.compile(
			'href="/title/'+imdb+'/episodes.+?season=.+?&ref_=tt_eps_sn_.+?"\n>(.+?)</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for season in match:
			episodeURL = 'http://www.imdb.com/title/' + imdb + "/episodes?season=" + season
			name = "Season: [COLOR dodgerblue]" + season + "[/COLOR]"
			xml +=  "<dir>"\
					"<title>%s</title>"\
					"<meta>"\
					"<content>season</content>"\
					"<imdb>%s</imdb>"\
					"<imdburl>theepisode/%s</imdburl>"\
					"<tvdb></tvdb>"\
					"<tvshowtitle></tvshowtitle>"\
					"<year></year>"\
					"<season>%s</season>"\
					"</meta>"\
					"<link></link>"\
					"<thumbnail></thumbnail>"\
					"<fanart></fanart>"\
					"</dir>" % (name, imdb, episodeURL, season)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #10
0
def trakt_season(slug):
    __builtin__.content_type = "episodes"
    splitted = slug.replace("trakt_id", "").split(",")
    trakt_id = splitted[0]
    season = splitted[1]
    year = splitted[2]
    tvtitle = ",".join(splitted[3:-2])
    tmdb = splitted[-2]
    imdb = splitted[-1]
    url = "https://api.trakt.tv/shows/%s/seasons/%s?extended=full"
    url = url % (trakt_id, season)
    headers = {
        'Content-Type': 'application/json',
        'trakt-api-version': '2',
        'trakt-api-key': TRAKT_API_KEY
    }
    xml, __builtin__.content_type = fetch_from_db(url) or (None, None)
    if not xml:
        __builtin__.content_type = "episodes"
        xml = ""
        response = requests.get(url, headers=headers).json()

        if type(response) == list:
            for item in response:
                xml += get_episode_xml(item, trakt_id, year, tvtitle, tmdb,
                                       imdb)
            xml = remove_non_ascii(xml)
            save_to_db((xml, __builtin__.content_type), url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), __builtin__.content_type)
Пример #11
0
def FullMatch_WWE_Replays(url):
    url = url.replace('wwe_replay/', '')
    page_id = url
    url = base_full_match % ((json_cat_url % (wwe_info['per_page'], wwe_info['category'], page_id))) 

    try:
        xml = ""
        response = requests.get(url,headers).json()
        try:
            if 'invalid' in response['code']:
                return
        except:
            pass
        for post in response:
            title   = clean_titles(post['title']['rendered'])
            if not 'wwe' in title.lower():
                continue
            content = post['content']['rendered']
            description = decodeEntities(re.compile('<h2>(.+?)</h2>').findall(content)[0])

            try:
                icon_js = requests.get(post['_links']['wp:featuredmedia'][0]['href'].replace('\\', ''))
                icon_js = json.loads(icon_js.text)
                icon = str(icon_js['guid']['rendered'])
            except:
                icon = addon_icon

            sources = dom_parser.parseDOM(str(content), 'iframe', ret='src')
            if len(sources) > 0:
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <link>" % (title,description)

                for source in sources:
                    if not 'http' in source:
                        source = 'http:%s' % source
                    host = urlparse.urlparse(source).netloc.capitalize()
                    xml += "        <sublink>%s(%s)</sublink>" % (source,host)

                xml += "    </link>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (icon)
    except:
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <fullmatch>wwe_replay/%s</fullmatch>"\
               "</dir>" % (str(int(page_id)+1))
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #12
0
def testings(file_name="testings.xml"):
    """
parses local xml file as a jen list
    :param str file_name: local file name to parse
    :return: list of jen items
    :rtype: list[dict[str,str]]
    """
    profile_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
    test_file = xbmcvfs.File(os.path.join(profile_path, file_name))
    xml = test_file.read()
    test_file.close()
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #13
0
def get_shows(url):
    xml = ""
    try:    
        url = "https://www.arconaitv.us/"
        headers = {'User_Agent':User_Agent}
        html = requests.get(url,headers=headers).content
        block2 = re.compile('<div class="content">(.+?)<div class="stream-nav shows" id="shows">',re.DOTALL).findall(html)
        match2 = re.compile('href=(.+?) title=(.+?)<img src=(.+?) alt=(.+?) />',re.DOTALL).findall(str(block2))
        for link2,title2,image2,name2 in match2:
            name2 = name2.replace("\\'", "")
            link2 = link2.replace("\\'", "")
            image2 = image2.replace("\\'", "")
            title2 = title2.replace("\\'", "")
            title2 = title2.replace(" class=poster-link>","")
            image2 = "https://www.arconaitv.us"+image2
            link2 = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/"+link2                
            if not name2:            
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "</plugin>" % (title2,link2,image2)
            else:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "</plugin>" % (name2,link2,image2)
        block3 = re.compile('<div class="stream-nav movies" id="movies">(.+?)<div class="donation-form" id="donate">',re.DOTALL).findall(html)
        match3 = re.compile('href=(.+?) title=(.+?)>',re.DOTALL).findall(str(block3))
        for link3,name3 in match3:
            name3 = name3.replace("\\'", "")
            link3 = link3.replace("\\'", "")
            link3 = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/"+link3
            image3 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png"
            xml += "<plugin>"\
                   "<title>%s</title>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "<thumbnail>%s</thumbnail>"\
                   "</plugin>" % (name3,link3,image3)                                                      
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type()) 
Пример #14
0
def get_list(url):
    """display jen list"""
    global content_type
    jen_list = JenList(url)
    if not jen_list:
        koding.dolog(_("returned empty for ") + url)
    items = jen_list.get_list()
    content = jen_list.get_content_type()
    if items == []:
        return False
    if content:
        content_type = content
    display_list(items, content_type)
    return True
Пример #15
0
def get_NHLCupArchives(url):
    xml = ""
    url = url.replace('sh_nhl_sc/', '')
    offset  = url.split('/')[0]
    account = url.split('/')[1].decode('base64')
    url = base_mail_url % (account, offset, per_page['nhl'])
    if offset == '1':
        offset = '0'
    try:
        response = requests.get(url).content
        results = json.loads(response)
        results = results[2]['items']
        for item in results:
            try:
                title = item['Title']
                if 'true' in nhl_tonight:
                    pass
                else:
                    if 'nhl tonight' in title.lower():
                        continue
                meta_url = item['MetaUrl']
                icon = item['ImageUrlP']
                title = clean_mru_title(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <sportshub>mru_play/%s</sportshub>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,meta_url,icon)
            except:
                failure = traceback.format_exc()
                xbmcgui.Dialog().textviewer('Item Exception',str(failure))
                pass
    except:
        failure = traceback.format_exc()
        xbmcgui.Dialog().textviewer('a',str(failure))
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <sportshub>sh_nhl_sc/%s/%s</sportshub>"\
               "</dir>" % (str(int(offset)+int(per_page['nhl'])),account.encode('base64'))
    except:
        failure = traceback.format_exc()
        xbmcgui.Dialog().textviewer('a',str(failure))
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #16
0
def imdbuser(url):
	xml = ""
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<a class="list-name" href="(.+?)">(.+?)</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for url, name in match:
		xml += "<dir>"\
			   "<title>%s</title>"\
			   "<imdburl>%s</imdburl>"\
			   "<thumbnail>https://image.ibb.co/fR6AOm/download.jpg</thumbnail>"\
			   "</dir>" % (name, url)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #17
0
def imdbKeywords(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<a href="/keyword/(.+?)/.+?ref_=fn_kw_kw_.+?" >.+?</a>(.+?)</td>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for keywords, count in match:
			name = keywords + count
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<imdburl>keyword/%s</imdburl>"\
				   "<thumbnail></thumbnail>"\
				   "</dir>" % (name, keywords)	
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #18
0
def get_NBAReplayHD(url):
    xml = ""
    url = url.replace('nbareplayhd/', '') # Strip our category tag off.
    cat_item = url.split('/')
    if cat_item[1] == None or cat_item[1] == '':
        cat_item[1] = '1'
    orig_cat  = cat_item[0]
    orig_page = cat_item[1]
    url = urlparse.urljoin(archives['nbareplayhd'], (json_cat_url % (per_page['nba'], cat_item[0], cat_item[1]))) 
    try:
        response = requests.get(url).content
        results = re.compile('"id":(.+?),',re.DOTALL).findall(response)
        count = 0
        for post_id in results:
            count += 1
            try:
                url = urlparse.urljoin(archives['nbareplayhd'], ('/wp-json/wp/v2/posts/%s' % (post_id)))
                page = requests.get(url).content
                page = page.replace('\\','')
                try:
                    src = 'http:' + re.compile('src="(.+?)"',re.DOTALL).findall(page)[0]
                except:
                    continue

                title = re.compile('"title".+?"rendered":"(.+?)"',re.DOTALL).findall(page)[0]
                title = remove_non_ascii(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <link>%s</link>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,src,addon_icon)
            except:
                pass
    except:
        pass

    try:
        if count == int(per_page['nba']):
            xml += "<dir>"\
                   "    <title>Next Page >></title>"\
                   "    <sportshub>nbareplayhd/%s/%s</sportshub>"\
                   "</dir>" % (orig_cat,str((int(orig_page)+1)))
    except:
        pass

    if count > 0:
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #19
0
def tmdb_tv_show(url):
    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        splitted = url.replace("tmdb_id", "").split(",")
        tmdb_id = splitted[0]
        year = splitted[1]
        tvtitle = ",".join(splitted[2:])
        response = tmdbsimple.TV(tmdb_id).info()
        seasons = response["seasons"]
        xml = ""
        for season in seasons:
            xml += get_season_xml(season, tmdb_id, year, tvtitle)
        save_to_db(xml, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #20
0
def imdbBothSearch(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<img src="(.+?)" /></a> </td> <td class="result_text"> <a href="/title/(.+?)/.+?ref_=fn_al_tt_.+?" >(.+?)</a>(.+?)</td>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
			tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
			tmdbhtml = requests.get(tmdb_url).content
			Poster_path = re.compile(
							'"poster_path":"(.+?)"', 
							re.DOTALL).findall(tmdbhtml)
			Backdrop_path = re.compile(
							'"backdrop_path":"(.+?)"', 
							re.DOTALL).findall(tmdbhtml)
			for poster_path in Poster_path:
				for backdrop_path in Backdrop_path:
					if not 'Series' in year:
						year = year.split(')', 1)[0]
						name = title + " " + year + ')'
						year = year.replace("(","").replace(")","")
						xml += "<item>"\
								"<title>%s</title>"\
								"<meta>"\
								"<content>movie</content>"\
								"<imdb>%s</imdb>"\
								"<title>%s</title>"\
								"<year>%s</year>"\
								"</meta>"\
								"<link>"\
								"<sublink>search</sublink>"\
								"<sublink>searchsd</sublink>"\
								"</link>"\
								"<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
								"<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
								"</item>" % (name, imdb, title, year, poster_path, backdrop_path)
					else:
						name = title + " " + year
						xml += "<dir>"\
							   "<title>%s</title>"\
							   "<imdburl>season/%s</imdburl>"\
							   "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
							   "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
							   "</dir>" % (name, imdb, poster_path, backdrop_path)	
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #21
0
def tmdb_season(url):
    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        splitted = url.replace("tmdb_id", "").split(",")
        tmdb_id = splitted[0]
        season = splitted[1]
        year = splitted[2]
        tvtitle = ",".join(splitted[3:])
        response = tmdbsimple.TV_Seasons(tmdb_id, season).info()
        episodes = response["episodes"]
        xml = ""
        for episode in episodes:
            xml += get_episode_xml(episode, tmdb_id, year, tvtitle)
        save_to_db(xml, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #22
0
def m3u(url):
    xml = ""
    if not xml:
        xml = ""
        if '.m3u' in url:
            listhtml = getHtml(url)
            match = re.compile('#EXTINF:.+?,(.+?)\n([^"]+)\n',
                               re.IGNORECASE | re.DOTALL).findall(listhtml)
            for name, url in match:
                name = name
                url = url
                xml += "<item>"\
                       "<title>%s</title>"\
                       "<link>%s</link>"\
                       "<thumbnail></thumbnail>"\
                       "</item>" % (name, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #23
0
def get_season(url):
    xml = "" 
    sea_num = url.split("/")[-1]
    if len(sea_num)==1:
        sea_num = "0"+sea_num
    tmdb_id = url.split("/")[-2]
    tvdb = url.split("/")[-3]
    imdb = url.split("/")[-4]
    tv_title = url.split("/")[-6]
    fanart = url.split("/")[-5]
    tmdb_fanart = "https://image.tmdb.org/t/p/original/"+str(fanart)
    html = "https://api.themoviedb.org/3/tv/%s/season/%s?api_key=%s&language=en-US" % (tmdb_id, sea_num, TMDB_api_key)
    html = requests.get(html).json()
    eps = html['episodes']
    for episodes in eps:
        thumb = episodes['still_path']
        thumb = "https://image.tmdb.org/t/p/original"+str(thumb)
        title = episodes['name']
        air_date = episodes['air_date']
        year = air_date.split("-")[0]
        episode_num = episodes['episode_number']
        xml += "<item>"\
              "<title>%s</title>"\
              "<meta>"\
              "<imdb>%s</imdb>"\
              "<tvdb>%s</tvdb>"\
              "<content>episode</content>"\
              "<tvshowtitle>%s</tvshowtitle>"\
              "<year>%s</year>"\
              "<premiered></premiered>"\
              "<season>%s</season>"\
              "<episode>%s</episode>"\
              "</meta>"\
              "<link>"\
              "<sublink>search</sublink>"\
              "<sublink>searchsd</sublink>"\
              "</link>"\
              "<thumbnail>%s</thumbnail>"\
              "<fanart>%s</fanart>"\
              "</item>" % (title, imdb, tvdb, tv_title, year, sea_num, episode_num, thumb, tmdb_fanart) 

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #24
0
def imdbyears(url):
	xml = ""
	url = url.replace("years/","")
	url = 'http://www.imdb.com/search/title?year=' + url + '&title_type=feature'
	listhtml = getHtml(url)
	match = re.compile(
			'<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight="98"\nsrc=".+?"\nwidth="67" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
		tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
		tmdbhtml = requests.get(tmdb_url).content
		Poster_path = re.compile(
						'"backdrop_path":"(.+?)".+?"overview":".+?","poster_path":"(.+?)"}', 
						re.DOTALL).findall(tmdbhtml)
		for backdrop_path, poster_path in Poster_path:
			name = title + " " + year
			year = year.replace("(","").replace(")","")
			thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg")
			xml += "<item>"\
					"<title>%s</title>"\
					"<meta>"\
					"<content>movie</content>"\
					"<imdb>%s</imdb>"\
					"<title>%s</title>"\
					"<year>%s</year>"\
					"</meta>"\
					"<link>"\
					"<sublink>search</sublink>"\
					"<sublink>searchsd</sublink>"\
					"</link>"\
					"<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
					"<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
					"</item>" % (name, imdb, title, year, poster_path, backdrop_path)
	next_page = re.compile(
				'<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />', 
				re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
	xml += "<dir>"\
		   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
		   "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
		   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
		   "</dir>" % (next_page)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #25
0
def new_releases():
    xml = ""
    at = Airtable('apppx7NENxSaqMkM5', 'Sports_channels', api_key='keyOHaxsTGzHU9EEh')
    match = at.get_all(maxRecords=700, sort=['channel'])
    results = re.compile("fanart': u'(.+?)'.+?link': u'(.+?)'.+?thumbnail': u'(.+?)'.+?channel': u'(.+?)'.+?summary': u'(.+?)'",re.DOTALL).findall(str(match))
    for fanart,link,thumbnail,channel,summary in results:
        if "plugin" in link:

            xml += "<plugin>"\
                   "<title>%s</title>"\
                   "<meta>"\
                   "<content>movie</content>"\
                   "<imdb></imdb>"\
                   "<title>%s</title>"\
                   "<year></year>"\
                   "<thumbnail>%s</thumbnail>"\
                   "<fanart>%s</fanart>"\
                   "<summary>%s</summary>"\
                   "</meta>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "</plugin>" % (channel,channel,thumbnail,fanart,summary,link)
                
        else:
            xml +=  "<item>"\
                    "<title>%s</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year></year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "</link>"\
                    "</item>" % (channel,channel,thumbnail,fanart,summary,link)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #26
0
def imdbseries(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight=".+?"\nsrc=".+?"\nwidth=".+?" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
		tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
		tmdbhtml = requests.get(tmdb_url).content
		Poster_path = re.compile(
					'"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', 
					re.DOTALL).findall(tmdbhtml)
		for poster_path, backdrop_path in Poster_path:
			name = title + " " + year
			year = year.replace("(","").replace(")","")
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<meta>"\
				   "<content>tvshow</content>"\
				   "<imdb>%s</imdb>"\
				   "<imdburl>season/%s</imdburl>"\
				   "<tvdb></tvdb>"\
				   "<tvshowtitle>%s</tvshowtitle>"\
				   "<year>%s</year>"\
				   "</meta>"\
				   "<link></link>"\
				   "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
				   "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
				   "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path)
	try:
		next_page = re.compile(
					'<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />', 
					re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
		xml += "<dir>"\
			   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
			   "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
			   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
			   "</dir>" % (next_page)
	except:
		pass
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #27
0
def all_episodes(url):
    global content_type
    import pickle
    import xbmcgui
    season_urls = pickle.loads(url)
    result_items = []
    dialog = xbmcgui.DialogProgress()
    dialog.create(addon_name, _("Loading items"))
    num_urls = len(season_urls)
    for index, season_url in enumerate(season_urls):
        if dialog.iscanceled():
            break
        percent = ((index + 1) * 100) / num_urls
        dialog.update(percent, _("processing lists"), _("%s of %s") % (
            index + 1,
            num_urls))

        jen_list = JenList(season_url)
        result_items.extend(jen_list.get_list(skip_dialog=True))
    content_type = "episodes"
    display_list(result_items, "episodes")
Пример #28
0
def get_show(url):
    xml = ""
    tv_title = url.split("/")[-2]
    Title = remove_non_ascii(tv_title)
    Title = Title.lower()
    Title = Title.encode('utf8')
    Title = Title.replace(" ", "%20")
    html = "https://api.themoviedb.org/3/search/tv?api_key=%s&language=en-US&query=%s&page=1" % (TMDB_api_key, Title)
    html2 = requests.get(html).json()
    result = html2['results'][0]
    tmdb_id = result['id']
    fanart = result['backdrop_path']
    fanart = fanart.replace("/", "")
    tmdb_fanart = "https://image.tmdb.org/t/p/original/"+str(fanart)
    url3 = "https://api.themoviedb.org/3/tv/%s/external_ids?api_key=%s&language=en-US" % (tmdb_id, TMDB_api_key)
    html4 = requests.get(url3).json()
    imdb = html4['imdb_id']
    tvdb = html4['tvdb_id']
    url2 = "https://api.themoviedb.org/3/tv/%s?api_key=%s&language=en-US" % (tmdb_id, TMDB_api_key)
    html3 = requests.get(url2).json()
    seas = html3['seasons']
    for seasons in seas:
        thumb = seasons['poster_path']
        thumb = "https://image.tmdb.org/t/p/original"+str(thumb)
        title = seasons['name']
        sea_num = seasons['season_number']
        air_date = seasons['air_date']
        year = air_date.split("-")[0]
        xml += "<dir>"\
               "<title>%s</title>"\
               "<year>%s</year>"\
               "<thumbnail>%s</thumbnail>"\
               "<fanart>%s</fanart>"\
               "<tvmaze>season/%s/%s/%s/%s/%s/%s</tvmaze>"\
               "</dir>" % (title, year, thumb, tmdb_fanart, tv_title, fanart, imdb, tvdb, tmdb_id, sea_num)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #29
0
def get_swiftstreamz_category(url):
    xml = ""
    url = url.replace('swiftcategory/', '') # Strip our category tag off.
    try:
        url = base_cat_url % (url)
        headers = {'Authorization': 'Basic QFN3aWZ0MTEjOkBTd2lmdDExIw', 'User-Agent': User_Agent}
        response = requests.get(url,headers=headers)
        if 'Erreur 503' in response.content:
            xml += "<dir>"\
                   "    <title>[B]System down for maintenance[/B]</title>"\
                   "    <meta>"\
                   "        <summary>System down for maintenance</summary>"\
                   "    </meta>"\
                   "    <heading></heading>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "</dir>" % (addon_icon)
        else:
            response = response.json(strict=False)
            for a in response['LIVETV']:
                if not 'm3u8' in a['channel_url']:
                    continue
                name = a['channel_title']
                url  = a['channel_url']
                icon = base_ico_url % (a['channel_thumbnail'])
                desc = a['channel_desc']
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <swift>swiftplay/%s/%s</swift>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (name,desc,name,url,icon)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #30
0
def get_MLBArchives(url):
    xml = ""
    url = url.replace('sh_mlb/', '')
    offset  = url.split('/')[0]
    account = url.split('/')[1].decode('base64')
    url = base_mail_url % (account, offset, per_page['mlb'])
    if offset == '1':
        offset = '0'
    try:
        response = requests.get(url).content
        results = json.loads(response)
        results = results[2]['items']
        for item in results:
            try:
                title = item['Title']
                meta_url = item['MetaUrl']
                icon = item['ImageUrlP']
                title = clean_mru_title(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <sportshub>mru_play/%s</sportshub>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,meta_url,icon)
            except:
                continue
    except:
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <sportshub>sh_mlb/%s/%s</sportshub>"\
               "</dir>" % (str(int(offset)+int(per_page['mlb'])),account.encode('base64'))
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #31
0
def trakt(url):
    if url == "search":
        term = koding.Keyboard("Search For")
        url = "https://api.trakt.tv/search/movie,show,person,list?query=%s" % term
    headers = {
        'Content-Type': 'application/json',
        'trakt-api-version': '2',
        'trakt-api-key': TRAKT_API_KEY
    }
    if "sync" in url or "user" in url or "recommendations" in url:
        if "list" not in url or "/me/" in url or "like" in url or "sync" in url:
            auth = authenticate()
            if auth:
                headers['Authorization'] = 'Bearer ' + auth
            else:
                return ""
    pages = None
    xml, __builtin__.content_type = fetch_from_db(url) or (None, None)

    if not xml:
        xml = ""
        response = requests.get(url, headers=headers)
        response_headers = response.headers
        response = response.json()
        page = response_headers.get("X-Pagination-Page", "")
        if page:
            pages = response_headers.get("X-Pagination-Page-Count")
            response = (response, pages)

        if type(response) == tuple:  # paginated
            pages = response[1]
            response = response[0]

        __builtin__.content_type = "files"
        if type(response) == dict:
            if "people" in url:
                for job in response:
                    for item in response[job]:
                        if "movie" in item:
                            xml += get_movie_xml(item["movie"])
                            __builtin__.content_type = "movies"
                        elif "show" in item:
                            xml += get_show_xml(item["show"])
                            __builtin__.content_type = "tvshows"

        elif type(response) == list:
            for item in response:
                if "/search/" in url:
                    xml += get_search_xml(item)
                elif "lists" in url:
                    if "items" not in url and "likes" not in url:
                        user_id = url.split("/")[4]
                        xml += get_lists_xml(item, user_id)
                    if "likes/lists" in url:
                        xml += get_likes_xml(item)
                if "movie" in item:
                    xml += get_movie_xml(item["movie"])
                    __builtin__.content_type = "movies"
                elif "show" in item:
                    xml += get_show_xml(item["show"])
                    __builtin__.content_type = "tvshows"
                elif "person" in item:
                    xml += get_person_xml(item)
                else:  # one of the annoying types
                    if "movies" in url:
                        xml += get_movie_xml(item)
                        __builtin__.content_type = "movies"
                    elif "shows" in url and "season" not in url:
                        xml += get_show_xml(item)
                        __builtin__.content_type = "tvshows"
        if pages:
            splitted = url.split("?")
            if len(splitted) > 1:
                args = urlparse.parse_qs(splitted[1])
                page = int(args.get("page", [1])[0])
                if not args.get("page", ""):
                    args["page"] = 2
                else:
                    args["page"] = str(page + 1)
                next_url = "%s?%s" % (splitted[0], urllib.urlencode(args))
            else:
                page = 1
                next_url = urlparse.urljoin(splitted[0], "?page=2")

            xml += "<dir>\n"\
                   "\t<title>Next Page >></title>\n"\
                   "\t<trakt>%s</trakt>\n"\
                   "\t<summary>Go To Page %s</summary>\n"\
                   "</dir>" % (next_url, page + 1)
        xml = remove_non_ascii(xml)
        save_to_db((xml, __builtin__.content_type), url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), __builtin__.content_type)
Пример #32
0
def newest_releases(url):
    xml = ""
    url = url.replace("newest_releases/", "")
    at = Airtable('app4O4BNC5yEy9wNa',
                  'Releases_Newest',
                  api_key='keyOHaxsTGzHU9EEh')
    match = at.get_all(maxRecords=700, view='Grid view')
    results = re.compile(
        "link5': u'(.+?)'.+?link4': u'(.+?)'.+?tmdb': u'(.+?)'.+?link1': u'(.+?)'.+?link3': u'(.+?)'.+?link2': u'(.+?)'.+?title': u'(.+?)'.+?year': u'(.+?)'",
        re.DOTALL).findall(str(match))
    total = len(results)
    if total > 1:
        Page1 = results[0:24]
    if total > 25:
        Page2 = results[25:49]
    if total > 49:
        Page3 = results[50:74]
    if total > 74:
        Page4 = results[75:99]
    if total > 99:
        Page5 = results[100:124]
    if total > 124:
        Page6 = results[125:149]
    if total > 149:
        Page7 = results[150:174]
    if total > 174:
        Page8 = results[175:199]
    if total > 199:
        Page9 = results[200:224]
    if total > 224:
        Page10 = results[225:249]
    if total > 249:
        Page11 = results[250:274]
    if total > 274:
        Page12 = results[275:299]
    if url == "page1":
        page_num = Page1
        call = "page2"
    if url == "page2":
        page_num = Page2
        call = "page3"
    if url == "page3":
        page_num = Page3
        call = "page4"
    if url == "page4":
        page_num = Page4
        call = "page5"
    if url == "page5":
        page_num = Page5
        call = "page6"
    if url == "page6":
        page_num = Page6
        call = "page7"
    if url == "page7":
        page_num = Page7
        call = "page8"
    if url == "page8":
        page_num = Page8
        call = "page9"
    if url == "page9":
        page_num = Page9
        call = "page10"
    if url == "page10":
        page_num = Page10
        call = "page11"
    if url == "page11":
        page_num = Page11
        call = "page12"
    if url == "page12":
        page_num = Page12
        call = "page13"

    for link5, link4, tmdb, link1, link3, link2, title, year in page_num:
        if "-*-" in link2:
            (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb)
            summary = remove_non_ascii(summary)
            title = remove_non_ascii(title)
            link2 = link2.replace("-*-", "")
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb>%s</imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1)
        elif "-*-" in link3:
            (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb)
            summary = remove_non_ascii(summary)
            title = remove_non_ascii(title)
            link3 = link3.replace("-*-", "")
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb>%s</imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2)
        elif "-*-" in link4:
            (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb)
            summary = remove_non_ascii(summary)
            title = remove_non_ascii(title)
            link4 = link4.replace("-*-", "")
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb>%s</imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2,link3)
        elif "-*-" in link5:
            (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb)
            summary = remove_non_ascii(summary)
            title = remove_non_ascii(title)
            link5 = link5.replace("-*-", "")
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb>%s</imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2,link3,link4)
        else:
            (thumbnail, fanart, imdb, summary) = pull_tmdb(title, year, tmdb)
            summary = remove_non_ascii(summary)
            title = remove_non_ascii(title)
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb>%s</imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,imdb,title,year,thumbnail,fanart,summary,link1,link2,link3,link4,link5)
    xml += "<dir>"\
           "<title>[COLOR white ]%s[/COLOR]                [COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
           "<Airtable>newest_releases/%s</Airtable>"\
           "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\
           "</dir>" % (url, call)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #33
0
def tmdb(url):
    page = 1
    try:
        xml, __builtin__.content_type = fetch_from_db(url) or (None, None)
    except Exception:
        xml, __builtin__.content_type = None, None
    if not xml:
        content = "files"
        xml = ""
        response = None
        if url.startswith("movies"):
            if url.startswith("movies/popular"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.Movies().popular(page=page)
            if url.startswith("movies/now_playing"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.Movies().now_playing(page=page)
            if url.startswith("movies/top_rated"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.Movies().top_rated(page=page)

            for item in response["results"]:
                xml += get_movie_xml(item)
                content = "movies"
        elif url.startswith("people"):
            if url.startswith("people/popular"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.People().popular(page=page)
            for item in response["results"]:
                xml += get_person_xml(item)
                content = "movies"
        elif url.startswith("movie"):
            if url.startswith("movie/upcoming"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.Movies().upcoming(page=page)
            for item in response["results"]:
                xml += get_trailer_xml(item)
                content = "movies"
        elif url.startswith("tv"):
            if url.startswith("tv/popular"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.TV().popular(page=page)
            elif url.startswith("tv/top_rated"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.TV().top_rated(page=page)
            elif url.startswith("tv/today"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.TV().airing_today(page=page)
            elif url.startswith("tv/on_the_air"):
                last = url.split("/")[-1]
                if last.isdigit():
                    page = int(last)
                if not response:
                    response = tmdbsimple.TV().on_the_air(page=page)
            for item in response["results"]:
                xml += get_show_xml(item)
                content = "tvshows"
        elif url.startswith("list"):
            list_id = url.split("/")[-1]
            if not response:
                response = tmdbsimple.Lists(list_id).info()
            for item in response.get("items", []):
                if "title" in item:
                    xml += get_movie_xml(item)
                    content = "movies"
                elif "name" in item:
                    xml += get_show_xml(item)
                    content = "tvshows"
        elif url.startswith("trailer"):
            movie_id = url.split("/")[-1]
            if not response:
                response = tmdbsimple.Movies(movie_id).videos()
            for item in response["results"]:
                if "type" in item:
                    xml += get_trailer_video_xml(item)
                    content = "movies"
        elif url.startswith("person"):
            split_url = url.split("/")
            person_id = split_url[-1]
            media = split_url[-2]
            if media == "movies":
                if not response:
                    response = tmdbsimple.People(person_id).movie_credits()
            elif media == "shows":
                if not response:
                    response = tmdbsimple.People(person_id).tv_credits()

            for job in response:
                if job == "id":
                    continue
                for item in response[job]:
                    if media == "movies":
                        xml += get_movie_xml(item)
                        content = "movies"
                    elif media == "shows":
                        xml += get_show_xml(item)
                        content = "tvshows"
        elif url.startswith("genre"):
            split_url = url.split("/")
            if len(split_url) == 3:
                url += "/1"
                split_url.append(1)
            page = int(split_url[-1])
            genre_id = split_url[-2]
            media = split_url[-3]
            if media == "movies":
                if not response:
                    response = tmdbsimple.Discover().movie(
                        with_genres=genre_id, page=page)
            elif media == "shows":
                if not response:
                    response = tmdbsimple.Discover().tv(with_genres=genre_id,
                                                        page=page)

            for item in response["results"]:
                if media == "movies":
                    xml += get_movie_xml(item)
                    content = "movies"
                elif media == "shows":
                    xml += get_show_xml(item)
                    content = "tvshows"
        elif url.startswith("year"):
            split_url = url.split("/")
            if len(split_url) == 3:
                url += "/1"
                split_url.append(1)
            page = int(split_url[-1])
            release_year = split_url[-2]
            media = split_url[-3]
            if media == "movies":
                if not response:
                    response = tmdbsimple.Discover().movie(
                        primary_release_year=release_year, page=page)
            for item in response["results"]:
                if media == "movies":
                    xml += get_movie_xml(item)
                    content = "movies"
        elif url.startswith("network"):
            split_url = url.split("/")
            if len(split_url) == 3:
                url += "/1"
                split_url.append(1)
            page = int(split_url[-1])
            network_id = split_url[-2]
            media = split_url[-3]
            if media == "shows":
                if not response:
                    response = tmdbsimple.Discover().tv(
                        with_networks=network_id, page=page)
            for item in response["results"]:
                if media == "shows":
                    xml += get_show_xml(item)
                    content = "tvshows"
        elif url.startswith("company"):
            split_url = url.split("/")
            if len(split_url) == 3:
                url += "/1"
                split_url.append(1)
            page = int(split_url[-1])
            company_id = split_url[-2]
            media = split_url[-3]
            if media == "movies":
                if not response:
                    response = tmdbsimple.Discover().movie(
                        with_companies=company_id, page=page)
            for item in response["results"]:
                if media == "movies":
                    xml += get_movie_xml(item)
                    content = "movies"
        elif url.startswith("keyword"):
            split_url = url.split("/")
            if len(split_url) == 3:
                url += "/1"
                split_url.append(1)
            page = int(split_url[-1])
            keyword_id = split_url[-2]
            media = split_url[-3]
            if media == "movies":
                if not response:
                    response = tmdbsimple.Discover().movie(
                        with_keywords=keyword_id, page=page)
            elif media == "shows":
                if not response:
                    response = tmdbsimple.Discover().tv(
                        with_keywords=keyword_id, page=page)

            for item in response["results"]:
                if media == "movies":
                    xml += get_movie_xml(item)
                    content = "movies"
                elif media == "shows":
                    xml += get_show_xml(item)
                    content = "tvshows"
        elif url.startswith("collection"):
            split_url = url.split("/")
            collection_id = split_url[-1]
            if not response:
                response = tmdbsimple.Collections(collection_id).info()

            for item in response["parts"]:
                xml += get_movie_xml(item)
                content = "movies"
        elif url.startswith("search"):
            if url == "search":
                term = koding.Keyboard("Search For")
                url = "search/%s" % term
            split_url = url.split("/")
            if len(split_url) == 2:
                url += "/1"
                split_url.append(1)
            page = int(split_url[-1])
            term = split_url[-2]
            response = tmdbsimple.Search().multi(query=term, page=page)

            for item in response["results"]:
                if item["media_type"] == "movie":
                    xml += get_movie_xml(item)
                elif item["media_type"] == "tv":
                    xml += get_show_xml(item)
                elif item["media_type"] == "person":
                    name = item["name"]
                    person_id = item["id"]
                    if item.get("profile_path", ""):
                        thumbnail = "https://image.tmdb.org/t/p/w1280/" + item[
                            "profile_path"]
                    else:
                        thumbnail = ""
                    xml += "<dir>\n"\
                           "\t<title>%s Shows TMDB</title>\n"\
                           "\t<tmdb>person/shows/%s</tmdb>\n"\
                           "\t<thumbnail>%s</thumbnail>\n"\
                           "</dir>\n\n" % (name.capitalize(),
                                           person_id,
                                           thumbnail)

                    xml += "<dir>\n"\
                           "\t<title>%s Movies TMDB</title>\n"\
                           "\t<tmdb>person/movies/%s</tmdb>\n"\
                           "\t<thumbnail>%s</thumbnail>\n"\
                           "\t</dir>\n\n" % (name.capitalize(),
                                             person_id,
                                             thumbnail)

        if response and page < response.get("total_pages", 0):
            base = url.split("/")
            if base[-1].isdigit():
                base = base[:-1]
            next_url = "/".join(base) + "/" + str(page + 1)
            xml += "<dir>"\
                   "<title>Next Page >></title>"\
                   "<tmdb>%s</tmdb>"\
                   "<summary>Go To Page %s</summary>"\
                   "</dir>" % (next_url, page + 1)
        __builtin__.content_type = content
        save_to_db((xml, __builtin__.content_type), url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), __builtin__.content_type)
Пример #34
0
def open_selected_show(url):
    pins = "PLuginotbtvshowseason"+url
    Items = fetch_from_db2(pins)
    if Items: 
        display_data(Items) 
    else:     
        pins = "PLuginotbtvshowseason"+url
        xml = ""
        title = url.split("|")[-3]
        key = url.split("|")[-2]
        sea_name = url.split("|")[-1]
        result = title+"_"+sea_name
        at = Airtable(key, title, api_key='keyu3sl4tsBzw02pw')
        match = at.search('category', result,view='Grid view')
        for field in match:
            try:
                res = field['fields']
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                summary = res['summary']
                if not summary:
                    summary = ""
                else:
                    summary = remove_non_ascii(summary)                   
                name = res['name']
                name = remove_non_ascii(name)
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                if link2 == "-":                                                
                    xml += "<item>"\
                           "<title>%s</title>"\
                           "<meta>"\
                           "<content>movie</content>"\
                           "<imdb></imdb>"\
                           "<title></title>"\
                           "<year></year>"\
                           "<thumbnail>%s</thumbnail>"\
                           "<fanart>%s</fanart>"\
                           "<summary>%s</summary>"\
                           "</meta>"\
                           "<link>"\
                           "<sublink>%s</sublink>"\
                           "</link>"\
                           "</item>" % (name,thumbnail,fanart,summary,link1) 
                elif link3 == "-":
                    xml += "<item>"\
                           "<title>%s</title>"\
                           "<meta>"\
                           "<content>movie</content>"\
                           "<imdb></imdb>"\
                           "<title></title>"\
                           "<year></year>"\
                           "<thumbnail>%s</thumbnail>"\
                           "<fanart>%s</fanart>"\
                           "<summary>%s</summary>"\
                           "</meta>"\
                           "<link>"\
                           "<sublink>%s</sublink>"\
                           "<sublink>%s</sublink>"\
                           "</link>"\
                           "</item>" % (name,thumbnail,fanart,summary,link1,link2)
                else:
                    xml += "<item>"\
                           "<title>%s</title>"\
                           "<meta>"\
                           "<content>movie</content>"\
                           "<imdb></imdb>"\
                           "<title></title>"\
                           "<year></year>"\
                           "<thumbnail>%s</thumbnail>"\
                           "<fanart>%s</fanart>"\
                           "<summary>%s</summary>"\
                           "</meta>"\
                           "<link>"\
                           "<sublink>%s</sublink>"\
                           "<sublink>%s</sublink>"\
                           "<sublink>%s</sublink>"\
                           "</link>"\
                           "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3)                                                               
            except:
                pass                  
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins) 
Пример #35
0
def open_table(url):
    band = url.split("|")[2]
    pins = "PLuginotbmusic" + url
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        cat2 = url.split("|")[1]
        band = url.split("|")[2]
        gen = url.split("|")[3]
        table = url.split("|")[4]
        airtab = gen + "_" + cat2
        if gen == "Rock":
            table = rock_keys
        elif gen == "Pop":
            table = pop_keys
        elif gen == "Metal":
            table = metal_keys
        elif gen == "Country":
            table = country_keys
        elif gen == "Electronic":
            table = electronic_keys
        key = table[cat2]
        at = Airtable(key, airtab, api_key='keyikW1exArRfNAWj')
        match = at.search('category', band, view='Grid view')
        for field in match:
            try:
                res = field['fields']
                name = res['Name']
                name = remove_non_ascii(name)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                summary = res['summary']
                summary = remove_non_ascii(summary)
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                if link2 == "-":
                    xml +=  "<item>"\
                            "<title>%s</title>"\
                            "<thumbnail>%s</thumbnail>"\
                            "<fanart>%s</fanart>"\
                            "<summary>%s</summary>"\
                            "<link>"\
                            "<sublink>%s</sublink>"\
                            "</link>"\
                            "</item>" % (name,thumbnail,fanart,summary,link1)
                elif link3 == "-":
                    xml +=  "<item>"\
                            "<title>%s</title>"\
                            "<thumbnail>%s</thumbnail>"\
                            "<fanart>%s</fanart>"\
                            "<summary>%s</summary>"\
                            "<link>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "</link>"\
                            "</item>" % (name,thumbnail,fanart,summary,link1,link2)
                elif link4 == "-":
                    xml +=  "<item>"\
                            "<title>%s</title>"\
                            "<thumbnail>%s</thumbnail>"\
                            "<fanart>%s</fanart>"\
                            "<summary>%s</summary>"\
                            "<link>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "</link>"\
                            "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3)
                elif link5 == "-":
                    xml +=  "<item>"\
                            "<title>%s</title>"\
                            "<thumbnail>%s</thumbnail>"\
                            "<fanart>%s</fanart>"\
                            "<summary>%s</summary>"\
                            "<link>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "</link>"\
                            "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4)
                else:
                    xml +=  "<item>"\
                            "<title>%s</title>"\
                            "<thumbnail>%s</thumbnail>"\
                            "<fanart>%s</fanart>"\
                            "<summary>%s</summary>"\
                            "<link>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "<sublink>%s</sublink>"\
                            "</link>"\
                            "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,link5)

            except:
                pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #36
0
def open_movies(url):
    pins = "PLugin_bnw_movies"
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        at = Airtable('appChKwhoXApFfXik',
                      'OTB BNW',
                      api_key='keyikW1exArRfNAWj')
        start_time = time.time()
        match = at.get_all(maxRecords=1200, sort=['name'])
        for field in match:
            try:
                res = field['fields']
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                name = res['name']
                name = remove_non_ascii(name)
                trailer = res['trailer']
                summary = res['summary']
                summary = remove_non_ascii(summary)
                if link2 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,trailer)
                elif link3 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2,trailer)
                elif link4 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,trailer)
                else:
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,trailer)
            except:
                pass

        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #37
0
def get_cable2(url):
    pins = ""
    xml = ""
    try:
        url = "https://www.arconaitv.us/"
        headers = {'User_Agent': User_Agent}
        html = requests.get(url, headers=headers).content
        block4 = re.compile(
            '<div class="stream-nav cable" id="cable">(.+?)<div class="acontainer">',
            re.DOTALL).findall(html)
        match4 = re.compile('href=(.+?) title=(.+?)>',
                            re.DOTALL).findall(str(block4))
        for link4, title4 in match4:
            title4 = title4.replace("\\'", "")
            title4 = remove_non_ascii(title4)
            link4 = link4.replace("\\'", "")
            link4 = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link4
            image4 = get_thumb(title4, html)
            if image4:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\
                       "<summary>Random TV Shows</summary>"\
                       "</plugin>" % (title4,link4,image4)
            elif not image4:
                image5 = get_other(title4, html)
                if title4 == "ABC":
                    image5 = "https://vignette.wikia.nocookie.net/superfriends/images/f/f2/Abc-logo.jpg/revision/latest?cb=20090329152831"
                elif title4 == "Animal Planet":
                    image5 = "https://seeklogo.com/images/D/discovery-animal-planet-logo-036312EA16-seeklogo.com.png"
                elif title4 == "Bravo Tv":
                    image5 = "https://kodi.tv/sites/default/files/styles/medium_crop/public/addon_assets/plugin.video.bravo/icon/icon.png?itok=VXH52Iyf"
                elif title4 == "CNBC":
                    image5 = "https://i2.wp.com/republicreport.wpengine.com/wp-content/uploads/2014/06/cnbc1.png?resize=256%2C256"
                elif title4 == "NBC":
                    image5 = "https://designobserver.com/media/images/mondrian/39684-NBC_logo_m.jpg"
                elif title4 == "SYFY":
                    image5 = "https://kodi.tv/sites/default/files/styles/medium_crop/public/addon_assets/plugin.video.syfy/icon/icon.png?itok=ZLTAqywa"
                elif title4 == "USA Network ":
                    image5 = "https://crunchbase-production-res.cloudinary.com/image/upload/c_lpad,h_256,w_256,f_auto,q_auto:eco/v1442500192/vzcordlt6w0xsnhcsloa.png"
                elif title4 == "WWOR-TV":
                    image5 = "https://i.ytimg.com/vi/TlhcM0jciZo/hqdefault.jpg"

                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\
                       "<summary>Random TV Shows</summary>"\
                       "</plugin>" % (title4,link4,image5)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def open_table(url):
    pins = ""
    xml = ""
    table = url.split("/")[-2]
    key = url.split("/")[-1]
    cat = url.split("/")[-3]
    at = Airtable(key, table, api_key='keybx0HglywRKFmyS')
    match = at.search('category', cat, view='Grid view')
    for field in match:
        try:
            res = field['fields']
            name = res['name']
            name = remove_non_ascii(name)
            thumbnail = res['thumbnail']
            fanart = res['fanart']
            category = res['category']
            score = res['score']
            if score == "-":
                score = ""
            link1 = res['link1']
            link2 = res['link2']
            link3 = res['link3']
            link4 = res['link4']
            link5 = res['link5']
            dsp = name + "    " + "[B][COLORdodgerblue]%s[/COLOR][/B]" % score
            if link2 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (dsp,thumbnail,fanart,link1)
            elif link3 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (dsp,thumbnail,fanart,link1,link2)
            elif link4 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (dsp,thumbnail,fanart,link1,link2,link3)
            elif link5 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (dsp,thumbnail,fanart,link1,link2,link3,link4)
            else:
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (dsp,thumbnail,fanart,link1,link2,link3,link4,link5)

        except:
            pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def open_table(url):
    pins = ""
    xml = ""
    table = url.split("/")[-2]
    key = url.split("/")[-1]
    at = Airtable(key, table, api_key='keybx0HglywRKFmyS')
    match = at.get_all(maxRecords=700, view='Grid view')
    for field in match:
        try:
            res = field['fields']
            name = res['name']
            name = remove_non_ascii(name)
            thumbnail = res['thumbnail']
            fanart = res['fanart']
            link1 = res['link1']
            link2 = res['link2']
            link3 = res['link3']
            link4 = res['link4']
            if link2 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (name,thumbnail,fanart,link1)
            elif link3 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (name,thumbnail,fanart,link1,link2)
            elif link4 == "-":
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (name,thumbnail,fanart,link1,link2,link3)
            else:
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "<sublink>%s</sublink>"\
                        "</link>"\
                        "</item>" % (name,thumbnail,fanart,link1,link2,link3,link4)

        except:
            pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #40
0
def tvmaze_list(url):
    import traceback
    xml = ""
    url = url.split('|')[0]
    try:
        result = requests.get(url).content
        items = json.loads(result)
    except:
        return

    for item in items:
        try:
            title = item['name']
            title = remove_non_ascii(title)

            season = item['season']
            if not season: season = '0'

            episode = item['number']
            if not episode: episode = '0'

            tvshowtitle = item['show']['name']
            tvshowtitle = remove_non_ascii(tvshowtitle)

            year = item['show']['premiered']
            year = re.findall('(\d{4})', year)[0]
            year = year.encode('utf-8')

            imdb = item['show']['externals']['imdb']
            if imdb is None or imdb == '':
                imdb = '0'
            else:
                imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
            imdb = imdb.encode('utf-8')

            tvdb = item['show']['externals']['thetvdb']
            if tvdb is None or tvdb == '': continue
            tvdb = re.sub('[^0-9]', '', str(tvdb))
            tvdb = tvdb.encode('utf-8')

            try:
                poster = item['show']['image']['original']
            except:
                poster = '0'
            if poster is None or poster == '':
                poster = '0'
            poster = poster.encode('utf-8')

            try:
                thumb1 = item['show']['image']['original']
            except:
                thumb1 = '0'
            try:
                thumb2 = item['image']['original']
            except:
                thumb2 = '0'
            if thumb2 is None or thumb2 == '0':
                thumb = thumb1
            else:
                thumb = thumb2
            if thumb is None or thumb == '': thumb = '0'
            thumb = thumb.encode('utf-8')

            premiered = item['airdate']
            try:
                premiered = re.findall('(\d{4}-\d{2}-\d{2})', premiered)[0]
            except:
                premiered = '0'
            premiered = premiered.encode('utf-8')

            final_title = '{0} - {1}x{2} - {3}'.format(tvshowtitle, season,
                                                       episode, title)
            xml += "<item>" \
                   "<title>%s</title>"\
                   "<meta>" \
                   "<imdb>%s</imdb>" \
                   "<tvdb>%s</tvdb>" \
                   "<content>episode</content>" \
                   "<tvshowtitle>%s</tvshowtitle>" \
                   "<year>%s</year>" \
                   "<title>%s</title>" \
                   "<premiered>%s</premiered>" \
                   "<season>%s</season>" \
                   "<episode>%s</episode>" \
                   "</meta>" \
                   "<link>" \
                   "<sublink>search</sublink>" \
                   "<sublink>searchsd</sublink>" \
                   "</link>" \
                   "<thumbnail>%s</thumbnail>" \
                   "<fanart>%s</fanart>" \
                   "</item>" % (final_title, imdb, tvdb, tvshowtitle, year, title,
                                premiered, int(season), int(episode), thumb, poster)

        except Exception:
            import xbmcgui
            failure = traceback.format_exc()
            xbmcgui.Dialog().textviewer('Exception', str(failure))

    xml = remove_non_ascii(xml)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #41
0
def fxxx_tags(url):
    pins = ""
    url = url.replace('fxmtag/', '')
    orig_tag = url.split("/")[0]
    url = urlparse.urljoin('http://fullxxxmovies.net/tag/', url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            headers = {'User_Agent': User_Agent}
            html = requests.get(url, headers=headers).content
            try:
                tag_divs = dom_parser.parseDOM(html,
                                               'div',
                                               attrs={'id':
                                                      'mainAnninapro'})[0]
                vid_entries = dom_parser.parseDOM(tag_divs, 'article')
                for vid_section in vid_entries:
                    thumbnail = re.compile('src="(.+?)"', re.DOTALL).findall(
                        str(vid_section))[0]
                    vid_page_url, title = re.compile(
                        'h3 class="entry-title"><a href="(.+?)" rel="bookmark">(.+?)</a></h3',
                        re.DOTALL).findall(str(vid_section))[0]
                    xml += "<item>"\
                           "    <title>%s</title>"\
                           "    <meta>"\
                           "        <summary>%s</summary>"\
                           "    </meta>"\
                           "    <fxxxmovies>%s</fxxxmovies>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "</item>" % (title,title,vid_page_url,thumbnail)
            except:
                pass

            try:
                try:
                    next_page = dom_parser.parseDOM(
                        html,
                        'a',
                        attrs={'class': 'next page-numbers'},
                        ret='href')[0]
                    next_page = next_page.split("/")[-2]
                    xml += "<dir>"\
                           "    <title>Next Page</title>"\
                           "    <meta>"\
                           "        <summary>Click here for more p**n bitches!</summary>"\
                           "    </meta>"\
                           "    <fxxxmovies>fxmtag/%s/page/%s</fxxxmovies>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "</dir>" % (orig_tag,next_page,next_icon)
                except:
                    pass
            except:
                pass
        except:
            pass

        save_to_db(xml, url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #42
0
def get_wcsearch(url):
    pins = ""
    xml = ""
    url = url.replace(
        'wcsearch/',
        '')  # Strip our search tag off when used with keywords in the xml
    url = url.replace('wcsearch',
                      '')  # Catch plain case, for when overall search is used.

    if url != None and url != "":
        search = url
    else:
        keyboard = xbmc.Keyboard('', 'Search for Movies')
        keyboard.doModal()
        if keyboard.isConfirmed() != None and keyboard.isConfirmed() != "":
            search = keyboard.getText()
        else:
            return

    if search == None or search == "":
        xml += "<item>"\
               "    <title>Search Cancelled</title>"\
               "    <link>plugin://plugin.video.squadcontrol/?mode=section_item</link>"\
               "    <thumbnail>%s</thumbnail>"\
               "</item>" % (addon_icon)
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
        return

    total = 0

    try:
        search_url = 'http://www.toonova.net/toon/search?key=%s' % search.replace(
            ' ', '+')
        html = requests.get(search_url).content
        thedivs = dom_parser.parseDOM(html,
                                      'div',
                                      attrs={'class': 'series_list'})[0]
        list_items = dom_parser.parseDOM(thedivs, 'li')
        for content in list_items:
            try:
                info_header = dom_parser.parseDOM(content, 'h3')[0]
                show_url, title = re.compile('<a href="(.+?)">(.+?)</a>',
                                             re.DOTALL).findall(info_header)[0]
                title = refreshtitle(title).replace('Episode ', 'EP:')
                title = remove_non_ascii(title)
                show_icon = re.compile('src="(.+?)"',
                                       re.DOTALL).findall(content)[0]
                xml += "<dir>"\
                       "    <title>%s</title>"\
                       "    <wctoon>wcepisode/%s</wctoon>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <summary>%s</summary>"\
                       "</dir>" % (title,show_url,show_icon,title)
                total += 1
            except:
                continue

        pagination = dom_parser.parseDOM(html,
                                         'ul',
                                         attrs={'class': 'pagination'})[0]
        if len(pagination) > 0:
            list_items = dom_parser.parseDOM(pagination, 'li')
            next_li = list_items[(len(list_items) - 1)]
            next_url = 'popular-cartoon/%s' % (re.compile(
                'href="http://www.toonova.net/popular-cartoon/(.+?)"',
                re.DOTALL).findall(next_li)[0])
            xml += "<dir>"\
                   "    <title>Next Page >></title>"\
                   "    <wctoon>%s</wctoon>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "    <summary>Next Page</summary>"\
                   "</dir>" % (next_url,show_icon)
    except:
        pass

    if total > 0:
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def open_movies():
    pins = "PLuginharrypottermovies"
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)  
    else:    
        xml = ""
        at = Airtable(table_id, table_name, api_key=workspace_api_key)
        match = at.get_all(maxRecords=1200, sort=['name'])  
        for field in match:
            try:
                res = field['fields']   
                name = res['name']
                name = remove_non_ascii(name)
                summary = res['summary']
                summary = remove_non_ascii(summary)
                fanart = res['fanart']
                thumbnail = res['thumbnail']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                if link2 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1)
                elif link3 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2) 
                elif link4 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3)
                elif link5 == "-":
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4)           
                else:
                    xml += "<item>"\
                         "<title>%s</title>"\
                         "<meta>"\
                         "<content>movie</content>"\
                         "<imdb></imdb>"\
                         "<title></title>"\
                         "<year></year>"\
                         "<thumbnail>%s</thumbnail>"\
                         "<fanart>%s</fanart>"\
                         "<summary>%s</summary>"\
                         "</meta>"\
                         "<link>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>%s</sublink>"\
                         "<sublink>(Trailer)</sublink>"\
                         "</link>"\
                         "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,link5) 
            except:
                pass                                                                     
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #44
0
def category_eporner(url):
    xml = ""
    try:
        if len(url) > 6:
            search = url.replace('search', '')
        else:
            keyboard = xbmc.Keyboard('', 'Search for')
            keyboard.doModal()
            if keyboard.isConfirmed() != None and keyboard.isConfirmed() != "":
                search = keyboard.getText()
            else:
                return

            if search == None or search == "":
                xml += "<item>"\
                       "    <title>Search Cancelled</title>"\
                       "    <heading></heading>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (addon_icon)
                jenlist = JenList(xml)
                display_list(jenlist.get_list(), jenlist.get_content_type())
                return

        total = 0

        try:
            search_url = 'https://www.eporner.com/search/%s' % search.replace(
                ' ', '-')
            html = requests.get(search_url).content
            results = dom_parser.parseDOM(html,
                                          'div',
                                          attrs={'class': 'mb hdy'})

            for vid_section in results:
                thumbnail = re.compile('src="(.+?)"',
                                       re.DOTALL).findall(str(vid_section))[0]
                vid_page_url, title = re.compile(
                    'href="(.+?)"+\stitle="(.+?)"',
                    re.DOTALL).findall(str(vid_section))[0]
                vid_page_url = urlparse.urljoin('https://www.eporner.com/',
                                                vid_page_url)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <eporner>%s</eporner>"\
                       "    <summary>%s</summary>"\
                       "</item>" % (title,thumbnail,vid_page_url, title)
            total += 1

            results = dom_parser.parseDOM(html, 'div', attrs={'class': 'mb'})
            for vid_section in results:
                thumbnail = re.compile('src="(.+?)"',
                                       re.DOTALL).findall(str(vid_section))[0]
                vid_page_url, title = re.compile(
                    'href="(.+?)"+\stitle="(.+?)"',
                    re.DOTALL).findall(str(vid_section))[0]
                vid_page_url = urlparse.urljoin('https://www.eporner.com/',
                                                vid_page_url)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <eporner>%s</eporner>"\
                       "    <summary>%s</summary>"\
                       "</item>" % (title,thumbnail,vid_page_url, title)
                total += 1
        except:
            pass

        try:
            next_page = dom_parser.parseDOM(html,
                                            'a',
                                            attrs={'title': 'Next page'},
                                            ret='href')[0]
            next_page = next_page.replace('/', '', 1)
            xml += "<dir>"\
                   "    <title>Next Page</title>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "    <eporner>%s</eporner>"\
                   "</dir>" % (next_icon,next_page)
        except:
            pass
    except:
        pass

    if total > 0:
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #45
0
def get_movies2(url):
    pins = ""
    xml = ""
    try:
        url = "https://www.arconaitv.us/"
        headers = {'User_Agent': User_Agent}
        html = requests.get(url, headers=headers).content
        block5 = re.compile(
            '<div class="stream-nav movies" id="movies">(.+?)<div class="acontainer">',
            re.DOTALL).findall(html)
        match5 = re.compile('href=(.+?) title=(.+?)>',
                            re.DOTALL).findall(str(block5))
        for link5, title5 in match5:
            title5 = title5.replace("\\'", "")
            title5 = remove_non_ascii(title5)
            link5 = link5.replace("\\'", "")
            link5 = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link5
            image5 = get_other(title5, html)
            if image5:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
                       "<summary>Random Movies</summary>"\
                       "</plugin>" % (title5,link5,image5)
            elif not image5:
                image6 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png"
                if title5 == "Action":
                    image6 = "http://icons.iconarchive.com/icons/sirubico/movie-genre/256/Action-3-icon.png"
                if title5 == "Animation Movies":
                    image6 = "http://www.filmsite.org/images/animated-genre.jpg"
                if title5 == "Christmas Movies":
                    image6 = "https://i2.wp.com/emailsantanow.com/wp-content/uploads/2015/11/cropped-email-santa-2015.png?fit=512%2C512&ssl=1"
                if title5 == "Comedy Movies":
                    image6 = "https://thumb9.shutterstock.com/display_pic_with_logo/882263/116548462/stock-photo-clap-film-of-cinema-comedy-genre-clapperboard-text-illustration-116548462.jpg"
                if title5 == "Documentaries ":
                    image6 = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRc8s5haFPMPgDNmfetzNm06V3BB918tV8TG5JiJe7FaEqn-Cgx"
                if title5 == "Harry Potter ":
                    image6 = "http://icons.iconarchive.com/icons/aaron-sinuhe/tv-movie-folder/256/Harry-Potter-2-icon.png"
                if title5 == "Horror Movies":
                    image6 = "http://www.filmsite.org/images/horror-genre.jpg"
                if title5 == "Mafia Movies":
                    image6 = "https://cdn.pastemagazine.com/www/blogs/lists/2012/04/05/godfather-lead.jpg"
                if title5 == "Movie Night":
                    image6 = "http://jesseturri.com/wp-content/uploads/2013/03/Movie-Night-Logo.jpg"
                if title5 == "Musical Movies":
                    image6 = "http://ww1.prweb.com/prfiles/2016/03/18/13294162/Broadway_Movie_Musical_Logo.jpg"
                if title5 == "Mystery Movies":
                    image6 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Mystery-icon.png"
                if title5 == "Random Movies":
                    image6 = "https://is1-ssl.mzstatic.com/image/thumb/Purple118/v4/a2/93/b8/a293b81e-9781-5129-32e9-38fb63ff52f8/source/256x256bb.jpg"
                if title5 == "Romance Movies":
                    image6 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Romance-icon.png"
                if title5 == "Star Wars ":
                    image6 = "http://icons.iconarchive.com/icons/aaron-sinuhe/tv-movie-folder/256/Star-Wars-2-icon.png"
                if title5 == "Studio Ghibli":
                    image6 = "https://orig00.deviantart.net/ec8a/f/2017/206/5/a/studio_ghibli_collection_folder_icon_by_dahlia069-dbho9mx.png"

                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
                       "<summary>Random Movies</summary>"\
                       "</plugin>" % (title5,link5,image6)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #46
0
def open_genre_movies(url):
    pins = ""
    xml = ""
    genre = url.split("/")[-1]
    at = Airtable(table_id, table_name, api_key=workspace_api_key)
    try:
        match = at.search('type', genre)
        for field in match:
            res = field['fields']
            name = res['name']
            name = remove_non_ascii(name)
            summary = res['summary']
            summary = remove_non_ascii(summary)
            fanart = res['fanart']
            thumbnail = res['thumbnail']
            link1 = res['link1']
            link2 = res['link2']
            link3 = res['link3']
            link4 = res['link4']
            link5 = res['link5']
            if link2 == "-":
                xml += "<item>"\
                     "<title>%s</title>"\
                     "<meta>"\
                     "<content>movie</content>"\
                     "<imdb></imdb>"\
                     "<title></title>"\
                     "<year></year>"\
                     "<thumbnail>%s</thumbnail>"\
                     "<fanart>%s</fanart>"\
                     "<summary>%s</summary>"\
                     "</meta>"\
                     "<link>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>(Trailer)</sublink>"\
                     "</link>"\
                     "</item>" % (name,thumbnail,fanart,summary,link1)
            elif link3 == "-":
                xml += "<item>"\
                     "<title>%s</title>"\
                     "<meta>"\
                     "<content>movie</content>"\
                     "<imdb></imdb>"\
                     "<title></title>"\
                     "<year></year>"\
                     "<thumbnail>%s</thumbnail>"\
                     "<fanart>%s</fanart>"\
                     "<summary>%s</summary>"\
                     "</meta>"\
                     "<link>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>(Trailer)</sublink>"\
                     "</link>"\
                     "</item>" % (name,thumbnail,fanart,summary,link1,link2)
            elif link4 == "-":
                xml += "<item>"\
                     "<title>%s</title>"\
                     "<meta>"\
                     "<content>movie</content>"\
                     "<imdb></imdb>"\
                     "<title></title>"\
                     "<year></year>"\
                     "<thumbnail>%s</thumbnail>"\
                     "<fanart>%s</fanart>"\
                     "<summary>%s</summary>"\
                     "</meta>"\
                     "<link>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>(Trailer)</sublink>"\
                     "</link>"\
                     "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3)
            elif link5 == "-":
                xml += "<item>"\
                     "<title>%s</title>"\
                     "<meta>"\
                     "<content>movie</content>"\
                     "<imdb></imdb>"\
                     "<title></title>"\
                     "<year></year>"\
                     "<thumbnail>%s</thumbnail>"\
                     "<fanart>%s</fanart>"\
                     "<summary>%s</summary>"\
                     "</meta>"\
                     "<link>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>(Trailer)</sublink>"\
                     "</link>"\
                     "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4)
            else:
                xml += "<item>"\
                     "<title>%s</title>"\
                     "<meta>"\
                     "<content>movie</content>"\
                     "<imdb></imdb>"\
                     "<title></title>"\
                     "<year></year>"\
                     "<thumbnail>%s</thumbnail>"\
                     "<fanart>%s</fanart>"\
                     "<summary>%s</summary>"\
                     "</meta>"\
                     "<link>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>%s</sublink>"\
                     "<sublink>(Trailer)</sublink>"\
                     "</link>"\
                     "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,link5)
    except:
        pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #47
0
def get_stream(url):
    url = urlparse.urljoin('http://collectionofbestporn.com/', url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            headers = {'User_Agent': User_Agent}
            html = requests.get(url, headers=headers).content
            vid_divs = dom_parser.parseDOM(
                html,
                'div',
                attrs={'class': 'video-item col-sm-5 col-md-4 col-xs-10'})
            count = 0
            for vid_section in vid_divs:
                thumb_div = dom_parser.parseDOM(vid_section,
                                                'div',
                                                attrs={'class':
                                                       'video-thumb'})[0]
                thumbnail = re.compile('<img src="(.+?)"',
                                       re.DOTALL).findall(str(thumb_div))[0]
                vid_page_url = re.compile('href="(.+?)"',
                                          re.DOTALL).findall(str(thumb_div))[0]

                title_div = dom_parser.parseDOM(vid_section,
                                                'div',
                                                attrs={'class': 'title'})[0]
                title = remove_non_ascii(
                    re.compile('title="(.+?)"',
                               re.DOTALL).findall(str(title_div))[0])
                count += 1

                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <cobp>%s</cobp>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,title,vid_page_url,thumbnail)

            try:
                pagination = dom_parser.parseDOM(html,
                                                 'li',
                                                 attrs={'class': 'next'})[0]
                next_page = dom_parser.parseDOM(pagination, 'a', ret='href')[0]
                xml += "<dir>"\
                       "    <title>Next Page</title>"\
                       "    <meta>"\
                       "        <summary>Click here for more p**n bitches!</summary>"\
                       "    </meta>"\
                       "    <cobp>%s</cobp>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</dir>" % (next_page,next_icon)
            except:
                pass
            save_to_db(xml, url)
        except:
            pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #48
0
def get_stream(url):
    xml = ""
    try:
        url = "http://www.sports-stream.net/schedule.html"
        headers = {'User_Agent': User_Agent}
        html = requests.get(url, headers=headers).content
        block1 = re.compile('<br><font color="red">(.+?)',
                            re.DOTALL).findall(html)
        site_hour = strftime("%H", gmtime())
        site_hour2 = int(site_hour) + 3
        if site_hour2 == 25:
            site_hour2 = 1
        if site_hour2 == 26:
            site_hour2 = 2
        if site_hour2 == 27:
            site_hour2 = 3
        site_hour3 = str(site_hour2)
        site_minute = strftime("%M", gmtime())
        site_time = site_hour3 + ":" + site_minute
        xml += "<item>"\
               "<title>[COLOR blue]Sports Streams Time GMT+3 = (%s)[/COLOR]</title>"\
               "<thumbnail>https://teamdna.pw/DNA/ARTWORK/ICON/sports.png</thumbnail>"\
               "<fanart>https://teamdna.pw/DNA/ARTWORK/FANART/sports.jpg</fanart>"\
               "<link></link>"\
               "</item>" % (site_time)
        try:
            match = re.compile('<h3>(.+?)<input onclick=',
                               re.DOTALL).findall(html)
            head1 = match[0]
            head1 = head1.replace("&nbsp;", "")
            xml += "<item>"\
                   "<title>[COLOR blue]%s[/COLOR]</title>"\
                   "<thumbnail>https://teamdna.pw/DNA/ARTWORK/ICON/sports.png</thumbnail>"\
                   "<fanart>https://teamdna.pw/DNA/ARTWORK/FANART/sports.jpg</fanart>"\
                   "<link></link>"\
                   "</item>" % (head1)

        except:
            pass
        try:
            block3 = re.compile(
                '<br><font color="red"><h3>(.+?)<br><font color="red"><h3>',
                re.DOTALL).findall(html)
            match5 = re.compile(
                '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',
                re.DOTALL).findall(str(block3))
            for time, name, link in match5:
                time2 = time.split(":")[0]
                link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=" + link
                xml += "<plugin>"\
                       "<title>%s - %s</title>"\
                       "<thumbnail>https://teamdna.pw/DNA/ARTWORK/ICON/sports.png</thumbnail>"\
                       "<fanart>https://teamdna.pw/DNA/ARTWORK/FANART/sports.jpg</fanart>"\
                       "<link>%s</link>"\
                       "</plugin>" % (time,name,link)
        except:
            match1 = re.compile(
                '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',
                re.DOTALL).findall(html)
            for time, name, link in match1:
                time2 = time.split(":")[0]
                link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=" + link
                xml += "<plugin>"\
                       "<title>%s - %s</title>"\
                       "<thumbnail>https://teamdna.pw/DNA/ARTWORK/ICON/sports.png</thumbnail>"\
                       "<fanart>https://teamdna.pw/DNA/ARTWORK/FANART/sports.jpg</fanart>"\
                       "<link>%s</link>"\
                       "</plugin>" % (time,name,link)
        try:
            match3 = re.compile(
                '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<input onclick=',
                re.DOTALL).findall(html)
            for head2 in match3:
                head2 = head2.replace("&nbsp;", "")
                xml += "<item>"\
                       "<title>[COLOR blue]%s[/COLOR]</title>"\
                       "<thumbnail>https://teamdna.pw/DNA/ARTWORK/ICON/sports.png</thumbnail>"\
                       "<fanart>https://teamdna.pw/DNA/ARTWORK/FANART/sports.jpg</fanart>"\
                       "<link></link>"\
                       "</item>" % (head2)
        except:
            pass
        try:
            block2 = re.compile(
                '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<script data-cfasync',
                re.DOTALL).findall(html)
            match4 = re.compile(
                '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',
                re.DOTALL).findall(str(block2))
            for time, name, link in match4:
                link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=" + link
                xml += "<plugin>"\
                       "<title>%s - %s</title>"\
                       "<thumbnail>https://teamdna.pw/DNA/ARTWORK/ICON/sports.png</thumbnail>"\
                       "<fanart>https://teamdna.pw/DNA/ARTWORK/FANART/sports.jpg</fanart>"\
                       "<link>%s</link>"\
                       "</plugin>" % (time,name,link)
        except:
            pass
    except:
        pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
def open_bml_search(url):
    pins = ""
    xml = ""
    show = koding.Keyboard(heading='Movie Name')
    movie_list = []
    at = Airtable('app1aK3wfaR0xDxSK', 'OTB Big Movie List', api_key='keyikW1exArRfNAWj')
    match = at.get_all(maxRecords=1200, sort=['name'])
    for field in match:
        res = field['fields']        
        name = res['name']
        movie_list.append(name)
    at3 = Airtable('appaVv9EN3EJnvUz4', 'OTB Big Movie List 2', api_key='keyikW1exArRfNAWj')
    match3 = at3.get_all(maxRecords=1200, sort=['name'])  
    for field3 in match3:       
        res3 = field3['fields']        
        name3 = res3['name']
        movie_list.append(name3)                         
    search_result = koding.Fuzzy_Search(show, movie_list)
    if not search_result:
        xbmc.log("--------no results--------",level=xbmc.LOGNOTICE)
        xml += "<item>"\
            "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\
            "</item>"
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)    
    for item in search_result:
        item2 = str(item)
        item2 = remove_non_ascii(item2)           
        try:
            match2 = at.search('name', item2)
            for field2 in match2:
                res2 = field2['fields']        
                name = res2['name']
                name = remove_non_ascii(name)
                fanart = res2['fanart']
                thumbnail = res2['thumbnail']
                summary = res2['summary']
                summary = remove_non_ascii(summary)
                link_a = res2['link_a']
                link_b = res2['link_b']
                link_c = res2['link_c']
                link_d = res2['link_d']
                link_e = res2['link_e']
                trailer = res2['trailer']
                xml += display_xml(name,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e)
        except:
            pass        
        try:
            match2 = at3.search('name', item2)
            for field2 in match2:
                res2 = field2['fields']        
                name = res2['name']
                name = remove_non_ascii(name)
                fanart = res2['fanart']
                thumbnail = res2['thumbnail']
                summary = res2['summary']
                summary = remove_non_ascii(summary)
                link_a = res2['link_a']
                link_b = res2['link_b']
                link_c = res2['link_c']
                link_d = res2['link_d']
                link_e = res2['link_e']
                trailer = res2['trailer']
                xml += display_xml(name,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e)                   
        except:
            pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)        
Пример #50
0
def new_releases(url):
    xml = ""
    at = Airtable('app4O4BNC5yEy9wNa',
                  'Releases_Newest',
                  api_key='keyOHaxsTGzHU9EEh')
    match = at.get_all(maxRecords=700, view='Grid view')
    results = re.compile(
        "link5': u'(.+?)'.+?link4': u'(.+?)'.+?tmdb': u'(.+?)'.+?link1': u'(.+?)'.+?link3': u'(.+?)'.+?link2': u'(.+?)'.+?title': u'(.+?)'.+?year': u'(.+?)'",
        re.DOTALL).findall(str(match))
    url2 = "https://api.themoviedb.org/3/list/68656?api_key=586aa0e416c8d3350aee09a2ebc178ac&language=en-US"
    html2 = requests.get(url2).content
    match2 = json.loads(html2)
    for link5, link4, tmdb, link1, link3, link2, title, year in results:
        (thumbnail, fanart, summary) = tmdb_info(tmdb, match2)
        if "-*-" in link2:
            title = remove_non_ascii(title)
            summary = remove_non_ascii(summary)
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,title,year,thumbnail,fanart,summary,link1)
        elif "-*-" in link3:
            title = remove_non_ascii(title)
            summary = remove_non_ascii(summary)
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,title,year,thumbnail,fanart,summary,link1,link2)
        elif "-*-" in link4:
            title = remove_non_ascii(title)
            summary = remove_non_ascii(summary)
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,title,year,thumbnail,fanart,summary,link1,link2,link3)
        elif "-*-" in link5:
            title = remove_non_ascii(title)
            summary = remove_non_ascii(summary)
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,title,year,thumbnail,fanart,summary,link1,link2,link3,link4)
        else:
            title = remove_non_ascii(title)
            summary = remove_non_ascii(summary)
            xml += "<item>"\
                    "<title>[COLORwhite][B]%s[/COLOR][/B]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year>%s</year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>%s</sublink>"\
                    "<sublink>search</sublink>"\
                    "<sublink>searchsd</sublink>"\
                    "</link>"\
                    "</item>" % (title,title,year,thumbnail,fanart,summary,link1,link2,link3,link4,link5)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #51
0
def FullMatch_WWE_Replays(url):
    url = url.replace('wwe_replay/', '')
    page_id = url
    url = base_full_match % (
        (json_cat_url % (wwe_info['per_page'], wwe_info['category'], page_id)))

    try:
        xml = ""
        response = requests.get(url, headers).json()
        try:
            if 'invalid' in response['code']:
                return
        except:
            pass
        for post in response:
            title = clean_titles(post['title']['rendered'])
            if not 'wwe' in title.lower():
                continue
            content = post['content']['rendered']
            description = decodeEntities(
                re.compile('<h2>(.+?)</h2>').findall(content)[0])

            try:
                icon_js = requests.get(
                    post['_links']['wp:featuredmedia'][0]['href'].replace(
                        '\\', ''))
                icon_js = json.loads(icon_js.text)
                icon = str(icon_js['guid']['rendered'])
            except:
                icon = addon_icon

            sources = dom_parser.parseDOM(str(content), 'iframe', ret='src')
            if len(sources) > 0:
                xml += "<item>"\
                       "    <title>[COLOR red]%s[/COLOR]</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <link>" % (title,description)

                for source in sources:
                    if not 'http' in source:
                        source = 'http:%s' % source
                    host = urlparse.urlparse(source).netloc.capitalize()
                    xml += "        <sublink>%s(%s)</sublink>" % (source, host)

                xml += "    </link>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (icon)
    except:
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <fullmatch>wwe_replay/%s</fullmatch>"\
               "</dir>" % (str(int(page_id)+1))
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #52
0
def open_action_movies(url):
    genre = url.split("/")[-1]
    pins = "PLuginotbaudiobook" + genre
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        lai = []
        at1 = Airtable(tid, tnm, api_key=atk)
        m1 = at1.get_all(maxRecords=1200, view='Grid view')
        for f1 in m1:
            r1 = f1['fields']
            n1 = r1['au1']
            lai.append(n1)
        if yai in lai:
            pass
        else:
            exit()
        at = Airtable('appwblOWrmZ5uwcce',
                      'OTB Audiobooks',
                      api_key='keyem86gyhcLFSLqh')
        try:
            match = at.search('type', genre, sort=['name'])
            for field in match:
                res = field['fields']
                name = res['name']
                name = remove_non_ascii(name)
                summary = res['summary']
                summary = remove_non_ascii(summary)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
        except:
            pass
        at2 = Airtable('appOKb0JBT9M0MivF',
                       'OTB Audiobooks 2',
                       api_key='keyem86gyhcLFSLqh')
        try:
            match2 = at2.search('type', genre, sort=['name'])
            for field2 in match2:
                res = field2['fields']
                name = res['name']
                name = remove_non_ascii(name)
                summary = res['summary']
                summary = remove_non_ascii(summary)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
        except:
            pass
        at3 = Airtable('appGoC0VblD0MCcvw',
                       'OTB Audiobooks 3',
                       api_key='keyem86gyhcLFSLqh')
        match3 = at3.search('type', genre, sort=['name'])
        for field3 in match3:
            try:
                res = field3['fields']
                name = res['name']
                name = remove_non_ascii(name)
                summary = res['summary']
                summary = remove_non_ascii(summary)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
            except:
                pass
        at4 = Airtable('appYbxBoLWcYY9LSI',
                       'OTB Audiobooks 4',
                       api_key='keyem86gyhcLFSLqh')
        match4 = at4.search('type', genre, sort=['name'])
        for field4 in match4:
            try:
                res = field4['fields']
                name = res['name']
                name = remove_non_ascii(name)
                summary = res['summary']
                summary = remove_non_ascii(summary)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
            except:
                pass
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #53
0
def get_DHcats(url):
    pins = ""
    url = url.replace('dhcategory/', '') # Strip our category tag off.
    orig_cat = url.split("/")[0]
    url = urlparse.urljoin(docu_cat_list, url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            html = requests.get(url).content
            doc_list = dom_parser.parseDOM(html, 'article')
            for content in doc_list:
                try:
                    docu_info = re.compile('<h2>(.+?)</h2>',re.DOTALL).findall(content)[0]

                    docu_title = re.compile('<a.+?">(.+?)</a>',re.DOTALL).findall(docu_info)[0]
                    docu_title = docu_title.replace("&amp;","&").replace('&#39;',"'").replace('&quot;','"').replace('&#39;',"'").replace('&#8211;',' - ').replace('&#8217;',"'").replace('&#8216;',"'").replace('&#038;','&').replace('&acirc;','')
                    docu_summary = re.compile('<p>(.+?)</p>',re.DOTALL).findall(content)[0].replace('&quot;','"').replace('&#39;',"'").replace('&#8211;',' - ').replace('&#8217;',"'").replace('&#8216;',"'").replace('&#038;','&').replace('&acirc;','')
                    try:
                        docu_icon = re.compile('data-src="(.+?)"',re.DOTALL).findall(content)[0]
                    except:
                        docu_icon = re.compile('src="(.+?)"',re.DOTALL).findall(content)[0]

                    docu_url = re.compile('href="(.+?)"',re.DOTALL).findall(docu_info)[0]
                    docu_html = requests.get(docu_url).content

                    try:
                        docu_item = dom_parser.parseDOM(docu_html, 'meta', attrs={'itemprop':'embedUrl'}, ret='content')[0]
                    except:
                        docu_item = dom_parser.parseDOM(docu_html, 'iframe', ret='src')[0]

                    if 'http:' not in docu_item and  'https:' not in docu_item:
                        docu_item = 'https:' + docu_item
                    docu_url = docu_item

                    replaceHTMLCodes(docu_title)

                    if 'youtube' in docu_url:
                        if 'videoseries' not in docu_url:
                            xml += "<item>"\
                                   "    <title>%s</title>"\
                                   "    <link>%s</link>"\
                                   "    <thumbnail>%s</thumbnail>"\
                                   "    <summary>%s</summary>"\
                                   "</item>" % (docu_title,docu_url,docu_icon,docu_summary)
                        else:
                            # videoseries stuff?
                            video_id = docu_url.split("=")[-1]
                            docu_url = 'plugin://plugin.video.youtube/playlist/%s/' % video_id
                            xml += "<item>"\
                                   "    <title>%s</title>"\
                                   "    <link>%s</link>"\
                                   "    <thumbnail>%s</thumbnail>"\
                                   "    <summary>%s</summary>"\
                                   "</item>" % (docu_title,docu_url,docu_icon,docu_summary)
                    elif 'archive.org/embed' in docu_url:
                        docu_html = requests.get(docu_url).content
                        video_element = dom_parser.parseDOM(docu_html, 'source', ret='src')[0]
                        docu_url = urlparse.urljoin('https://archive.org/', video_element)
                        xml += "<item>"\
                               "    <title>%s</title>"\
                               "    <link>%s</link>"\
                               "    <thumbnail>%s</thumbnail>"\
                               "    <summary>%s</summary>"\
                               "</item>" % (docu_title,docu_url,docu_icon,docu_summary)
                    elif any(x in docu_url for x in reg_items):
                        xml += "<item>"\
                               "    <title>%s</title>"\
                               "    <link>%s</link>"\
                               "    <thumbnail>%s</thumbnail>"\
                               "    <summary>%s</summary>"\
                               "</item>" % (docu_title,docu_url,docu_icon,docu_summary)
                    elif any(x in docu_url for x in unreg_items):
                        # most of these gone now so screw it lol, and no valid player know yet to work with nfb
                        continue
                    else:
                        xbmcgui.Dialog().ok('Unknown Host - ' + docu_title,str(docu_url)) 
                except:
                    continue

            try:
                navi_content = dom_parser.parseDOM(html, 'div', attrs={'class':'numeric-nav'})[0]
                if '>NEXT' in navi_content:
                    links = dom_parser.parseDOM(navi_content, 'a', ret='href')
                    link = links[(len(links)-1)]
                    page = link.split("/")[-2]
                    xml += "<dir>"\
                           "    <title>Next Page >></title>"\
                           "    <docuh>dhcategory/%s/page/%s</docuh>"\
                           "</dir>" % (orig_cat,page)
            except:
                pass
        except:
            pass

        save_to_db(xml, url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #54
0
def open_bml_search():
    pins = ""
    xml = ""
    lai = []
    at1 = Airtable(tid, tnm, api_key=atk)
    m1 = at1.get_all(maxRecords=1200, view='Grid view')
    for f1 in m1:
        r1 = f1['fields']
        n1 = r1['au1']
        lai.append(n1)
    if yai in lai:
        pass
    else:
        exit()
    show = koding.Keyboard(heading='Movie Name')
    movie_list = []
    at = Airtable('appwblOWrmZ5uwcce',
                  'OTB Audiobooks',
                  api_key='keyem86gyhcLFSLqh')
    match = at.get_all(maxRecords=1200, sort=['name'])
    for field in match:
        res = field['fields']
        name = res['name']
        movie_list.append(name)
    at2 = Airtable('appOKb0JBT9M0MivF',
                   'OTB Audiobooks 2',
                   api_key='keyem86gyhcLFSLqh')
    match2 = at2.get_all(maxRecords=1200, sort=['name'])
    for field2 in match2:
        res2 = field2['fields']
        name2 = res2['name']
        movie_list.append(name2)
    at3 = Airtable('appGoC0VblD0MCcvw',
                   'OTB Audiobooks 3',
                   api_key='keyem86gyhcLFSLqh')
    match3 = at3.get_all(maxRecords=1200, sort=['name'])
    for field3 in match3:
        res3 = field3['fields']
        name3 = res3['name']
        movie_list.append(name3)
    at4 = Airtable('appYbxBoLWcYY9LSI',
                   'OTB Audiobooks 4',
                   api_key='keyem86gyhcLFSLqh')
    match4 = at4.get_all(maxRecords=1200, sort=['name'])
    for field4 in match4:
        res4 = field4['fields']
        name4 = res4['name']
        movie_list.append(name4)
    search_result = koding.Fuzzy_Search(show, movie_list)
    if not search_result:
        xbmc.log("--------no results--------", level=xbmc.LOGNOTICE)
        xml += "<item>"\
            "<title>[COLOR=orange][B]Movie was not found[/B][/COLOR]</title>"\
            "</item>"
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
    for item in search_result:
        item2 = str(item)
        item2 = remove_non_ascii(item2)
        try:
            match2 = at.search('name', item2)
            for field2 in match2:
                res2 = field2['fields']
                name = res2['name']
                name = remove_non_ascii(name)
                fanart = res2['fanart']
                thumbnail = res2['thumbnail']
                summary = res2['summary']
                summary = remove_non_ascii(summary)
                link1 = res2['link1']
                link2 = res2['link2']
                link3 = res2['link3']
                link4 = res2['link4']
                link5 = res2['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
        except:
            pass
        try:
            match2 = at2.search('name', item2)
            for field2 in match2:
                res2 = field2['fields']
                name = res2['name']
                name = remove_non_ascii(name)
                fanart = res2['fanart']
                thumbnail = res2['thumbnail']
                summary = res2['summary']
                summary = remove_non_ascii(summary)
                link1 = res2['link1']
                link2 = res2['link2']
                link3 = res2['link3']
                link4 = res2['link4']
                link5 = res2['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
        except:
            pass
        try:
            match2 = at3.search('name', item2)
            for field2 in match2:
                res2 = field2['fields']
                name = res2['name']
                name = remove_non_ascii(name)
                fanart = res2['fanart']
                thumbnail = res2['thumbnail']
                summary = res2['summary']
                summary = remove_non_ascii(summary)
                link1 = res2['link1']
                link2 = res2['link2']
                link3 = res2['link3']
                link4 = res2['link4']
                link5 = res2['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
        except:
            pass
        try:
            match2 = at4.search('name', item2)
            for field2 in match2:
                res2 = field2['fields']
                name = res2['name']
                name = remove_non_ascii(name)
                fanart = res2['fanart']
                thumbnail = res2['thumbnail']
                summary = res2['summary']
                summary = remove_non_ascii(summary)
                link1 = res2['link1']
                link2 = res2['link2']
                link3 = res2['link3']
                link4 = res2['link4']
                link5 = res2['link5']
                xml += display_xml(name, summary, thumbnail, fanart, link1,
                                   link2, link3, link4, link5)
        except:
            pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #55
0
def get_MTortureByCat(url):
    category = url.split('/')[1]
    page_id = url.split('/')[2]

    if 'movies' in category:
        cat_id = '3'
    elif 'docus' in category:
        cat_id = '6'

    url = base_main_link % ((json_cat_url % (per_page, cat_id, page_id))) 

    count = 0

    xml = fetch_from_db(url)
    if not xml == '1':
        try:
            xml = ""
            response = requests.get(url,headers).json()
            try:
                if 'invalid' in response['code']:
                    return
            except:
                pass
            count = len(response)
            for post in response:
                title   = remove_non_ascii(replaceHTMLCodes(post['title']['rendered']))
                description = remove_non_ascii(replaceHTMLCodes(post['excerpt']['rendered'])).replace('\/','/')
                description = re.sub('<[^<]+?>', '', description).replace('\nSee More','')
                
                content = remove_non_ascii(replaceHTMLCodes(post['content']['rendered'])).replace('\/','/')
                link = re.compile('<video controls.+?src=\"(.+?)\"').findall(content)[0]
                icon = re.compile('<meta itemprop=\"thumbnailUrl\" content=\"(.+?)\"').findall(content)[0]

                if len(link) > 0:
                    xml += "<item>"\
                           "    <title>%s</title>"\
                           "    <meta>"\
                           "        <summary>%s</summary>"\
                           "    </meta>"\
                           "    <mtorture>play/%s|%s</mtorture>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "</item>" % (title,description,link,title,icon)

            try:
                if count == 50:
                    xml += "<dir>"\
                           "    <title>Next Page >></title>"\
                           "    <meta>"\
                           "        <summary>Click here for the next page</summary>"\
                           "    </meta>"\
                           "    <mtorture>category/%s/%s</mtorture>"\
                           "</dir>" % (category,str(int(page_id)+1))
            except:
                failure = traceback.format_exc()
                xbmcgui.Dialog().textviewer('Item Exception',str(failure))
                pass

            save_to_db(xml, url)
        except:
            failure = traceback.format_exc()
            xbmcgui.Dialog().textviewer('Item Exception',str(failure))
            pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #56
0
def get_shows(url):
    xml = ""
    try:
        url = "https://www.arconaitv.us/"
        headers = {'User_Agent': User_Agent}
        html = requests.get(url, headers=headers).content
        block = re.compile(
            '<div class="stream-nav shows" id="shows">(.+?)<div class="acontainer">',
            re.DOTALL).findall(html)
        match = re.compile('href=(.+?) title=(.+?)>',
                           re.DOTALL).findall(str(block))
        xml += "<item>"\
               "<title>[COLOR blue][B]----TV SHOWS----[/B][/COLOR]</title>"\
               "<thumbnail>http://iconbug.com/data/2b/256/c6cbe045e598958b1efacc78b4127205.png</thumbnail>"\
               "<fanart>https://lerablog.org/wp-content/uploads/2014/05/tv-series.jpg</fanart>"\
               "<link></link>"\
               "</item>"
        for link, name in match:
            name = name.replace("\\'", "")
            name = remove_non_ascii(name)
            link = link.replace("\\'", "")
            link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link
            image2 = get_thumb(name, html)
            if image2:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>https://lerablog.org/wp-content/uploads/2014/05/tv-series.jpg</fanart>"\
                       "<summary>Random Episodes</summary>"\
                       "</plugin>" % (name,link,image2)
            elif not image2:
                image3 = get_other(name, html)
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>https://lerablog.org/wp-content/uploads/2014/05/tv-series.jpg</fanart>"\
                       "<summary>Random Episodes</summary>"\
                       "</plugin>" % (name,link,image3)

        block4 = re.compile(
            '<div class="stream-nav cable" id="cable">(.+?)<div class="acontainer">',
            re.DOTALL).findall(html)
        match4 = re.compile('href=(.+?) title=(.+?)>',
                            re.DOTALL).findall(str(block4))
        xml += "<item>"\
               "<title>[COLOR blue][B]----NETWORKS----[/B][/COLOR]</title>"\
               "<thumbnail>https://pmcdeadline2.files.wordpress.com/2010/09/networks.jpg</thumbnail>"\
               "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\
               "<link></link>"\
               "</item>"
        for link, name in match4:
            name = name.replace("\\'", "")
            name = remove_non_ascii(name)
            link = link.replace("\\'", "")
            link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link
            image2 = get_thumb(name, html)
            if image2:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\
                       "<summary>Random TV Shows</summary>"\
                       "</plugin>" % (name,link,image2)
            elif not image2:
                image3 = get_other(name, html)
                if name == "ABC":
                    image3 = "https://vignette.wikia.nocookie.net/superfriends/images/f/f2/Abc-logo.jpg/revision/latest?cb=20090329152831"
                elif name == "Animal Planet":
                    image3 = "https://seeklogo.com/images/D/discovery-animal-planet-logo-036312EA16-seeklogo.com.png"
                elif name == "Bravo Tv":
                    image3 = "https://kodi.tv/sites/default/files/styles/medium_crop/public/addon_assets/plugin.video.bravo/icon/icon.png?itok=VXH52Iyf"
                elif name == "CNBC":
                    image3 = "https://i2.wp.com/republicreport.wpengine.com/wp-content/uploads/2014/06/cnbc1.png?resize=256%2C256"
                elif name == "NBC":
                    image3 = "https://designobserver.com/media/images/mondrian/39684-NBC_logo_m.jpg"
                elif name == "SYFY":
                    image3 = "https://kodi.tv/sites/default/files/styles/medium_crop/public/addon_assets/plugin.video.syfy/icon/icon.png?itok=ZLTAqywa"
                elif name == "USA Network ":
                    image3 = "https://crunchbase-production-res.cloudinary.com/image/upload/c_lpad,h_256,w_256,f_auto,q_auto:eco/v1442500192/vzcordlt6w0xsnhcsloa.png"
                elif name == "WWOR-TV":
                    image3 = "https://i.ytimg.com/vi/TlhcM0jciZo/hqdefault.jpg"

                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://static.wixstatic.com/media/7217cd_6b6840f1821147ffa0380918a2110cdd.jpg</fanart>"\
                       "<summary>Random TV Shows</summary>"\
                       "</plugin>" % (name,link,image3)
        block5 = re.compile(
            '<div class="stream-nav movies" id="movies">(.+?)<div class="acontainer">',
            re.DOTALL).findall(html)
        match5 = re.compile('href=(.+?) title=(.+?)>',
                            re.DOTALL).findall(str(block5))
        xml += "<item>"\
               "<title>[COLOR blue][B]----MOVIES----[/B][/COLOR]</title>"\
               "<thumbnail>https://archive.org/services/img/movies-icon_201707</thumbnail>"\
               "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
               "<link></link>"\
               "</item>"
        for link, name in match5:
            name = name.replace("\\'", "")
            name = remove_non_ascii(name)
            link = link.replace("\\'", "")
            link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link
            image3 = get_other(name, html)
            if image3:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
                       "<summary>Random Movies</summary>"\
                       "</plugin>" % (name,link,image3)
            elif not image3:
                image3 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png"
                if name == "Action":
                    image3 = "http://icons.iconarchive.com/icons/sirubico/movie-genre/256/Action-3-icon.png"
                if name == "Animation Movies":
                    image3 = "http://www.filmsite.org/images/animated-genre.jpg"
                if name == "Christmas Movies":
                    image3 = "http://img.sj33.cn/uploads/allimg/201009/20100926224051989.png"
                if name == "Comedy Movies":
                    image3 = "https://thumb9.shutterstock.com/display_pic_with_logo/882263/116548462/stock-photo-clap-film-of-cinema-comedy-genre-clapperboard-text-illustration-116548462.jpg"
                if name == "Documentaries ":
                    image3 = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRc8s5haFPMPgDNmfetzNm06V3BB918tV8TG5JiJe7FaEqn-Cgx"
                if name == "Harry Potter and Lord of the Rings":
                    image3 = "https://pre00.deviantart.net/b9cd/th/pre/f/2012/043/0/4/the_lord_of_the_rings_golden_movie_logo_by_freeco-d4phvpy.jpg"
                if name == "Horror Movies":
                    image3 = "http://www.filmsite.org/images/horror-genre.jpg"
                if name == "Mafia Movies":
                    image3 = "https://cdn.pastemagazine.com/www/blogs/lists/2012/04/05/godfather-lead.jpg"
                if name == "Movie Night":
                    image3 = "http://jesseturri.com/wp-content/uploads/2013/03/Movie-Night-Logo.jpg"
                if name == "Musical Movies":
                    image3 = "http://ww1.prweb.com/prfiles/2016/03/18/13294162/Broadway_Movie_Musical_Logo.jpg"
                if name == "Mystery Movies":
                    image3 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Mystery-icon.png"
                if name == "Random Movies":
                    image3 = "https://is1-ssl.mzstatic.com/image/thumb/Purple118/v4/a2/93/b8/a293b81e-9781-5129-32e9-38fb63ff52f8/source/256x256bb.jpg"
                if name == "Romance Movies":
                    image3 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Romance-icon.png"
                if name == "Star Wars and Star Trek":
                    image3 = "http://icons.iconarchive.com/icons/aaron-sinuhe/tv-movie-folder/256/Star-Wars-2-icon.png"
                if name == "Studio Ghibli":
                    image3 = "https://orig00.deviantart.net/ec8a/f/2017/206/5/a/studio_ghibli_collection_folder_icon_by_dahlia069-dbho9mx.png"

                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
                       "<summary>Random Movies</summary>"\
                       "</plugin>" % (name,link,image3)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #57
0
def get_wcmainstream(subid):
    pins = ""
    xml = ""
    subid = subid.replace('main/', '', 1)  # Strip our category tag off.
    subid = subid.split('/')

    try:
        html = requests.get('http://www.toonova.net/').content
        if subid[0] == 'popular_series':
            thedivs = dom_parser.parseDOM(html, 'div',
                                          attrs={'id':
                                                 subid[0]})[int(subid[1])]
            list_items = dom_parser.parseDOM(thedivs, 'li')
            for content in list_items:
                try:
                    info_div = dom_parser.parseDOM(content,
                                                   'div',
                                                   attrs={'class': 'slink'})[0]
                    show_url, title = re.compile(
                        '<a href="(.+?)">(.+?)</a>',
                        re.DOTALL).findall(info_div)[0]
                    title = refreshtitle(title).replace('Episode ', 'EP:')
                    title = remove_non_ascii(title)
                    show_icon = re.compile('src="(.+?)"',
                                           re.DOTALL).findall(content)[0]
                    xml += "<dir>"\
                           "    <title>%s</title>"\
                           "    <wctoon>wcepisode/%s</wctoon>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "    <summary>%s</summary>"\
                           "</dir>" % (title,show_url,show_icon,title)
                except:
                    continue
        elif subid[0] == 'updates':
            thetable = dom_parser.parseDOM(html,
                                           'table',
                                           attrs={'id':
                                                  subid[0]})[int(subid[1])]
            the_rows = dom_parser.parseDOM(thetable, 'tr')
            for content in the_rows:
                try:
                    the_lists = dom_parser.parseDOM(content, 'li')
                    for item in the_lists:
                        show_url, title = re.compile(
                            '<a href="(.+?)">(.+?)</a>',
                            re.DOTALL).findall(item)[0]
                        title = refreshtitle(title).replace('Episode ', 'EP:')
                        title = remove_non_ascii(title)
                        xml += "<dir>"\
                               "    <title>%s</title>"\
                               "    <wctoon>wcepisode/%s</wctoon>"\
                               "    <thumbnail>%s</thumbnail>"\
                               "    <summary>%s</summary>"\
                               "</dir>" % (title,show_url,addon_icon,title)
                except:
                    continue
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Пример #58
0
def movies(html):
    xml = ""
    try:
        block5 = re.compile(
            '<div class="stream-nav movies" id="movies">(.+?)<div class="acontainer">',
            re.DOTALL).findall(html)
        match5 = re.compile('href=(.+?) title=(.+?)>',
                            re.DOTALL).findall(str(block5))
        xml += "<item>"\
               "<title>[COLOR blue][B]----MOVIES----[/B][/COLOR]</title>"\
               "<thumbnail>https://archive.org/services/img/movies-icon_201707</thumbnail>"\
               "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
               "<link></link>"\
               "</item>"
        for link, name in match5:
            name = name.replace("\\'", "")
            name = remove_non_ascii(name)
            link = link.replace("\\'", "")
            link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link
            image3 = get_other(name, html)
            if image3:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
                       "<summary>Random Movies</summary>"\
                       "</plugin>" % (name,link,image3)
            elif not image3:
                image3 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png"
                if name == "Action":
                    image3 = "http://icons.iconarchive.com/icons/sirubico/movie-genre/256/Action-3-icon.png"
                if name == "Animation Movies":
                    image3 = "http://www.filmsite.org/images/animated-genre.jpg"
                if name == "Christmas Movies":
                    image3 = "http://img.sj33.cn/uploads/allimg/201009/20100926224051989.png"
                if name == "Comedy Movies":
                    image3 = "https://thumb9.shutterstock.com/display_pic_with_logo/882263/116548462/stock-photo-clap-film-of-cinema-comedy-genre-clapperboard-text-illustration-116548462.jpg"
                if name == "Documentaries ":
                    image3 = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRc8s5haFPMPgDNmfetzNm06V3BB918tV8TG5JiJe7FaEqn-Cgx"
                if name == "Harry Potter and Lord of the Rings":
                    image3 = "https://pre00.deviantart.net/b9cd/th/pre/f/2012/043/0/4/the_lord_of_the_rings_golden_movie_logo_by_freeco-d4phvpy.jpg"
                if name == "Horror Movies":
                    image3 = "http://www.filmsite.org/images/horror-genre.jpg"
                if name == "Mafia Movies":
                    image3 = "https://cdn.pastemagazine.com/www/blogs/lists/2012/04/05/godfather-lead.jpg"
                if name == "Movie Night":
                    image3 = "http://jesseturri.com/wp-content/uploads/2013/03/Movie-Night-Logo.jpg"
                if name == "Musical Movies":
                    image3 = "http://ww1.prweb.com/prfiles/2016/03/18/13294162/Broadway_Movie_Musical_Logo.jpg"
                if name == "Mystery Movies":
                    image3 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Mystery-icon.png"
                if name == "Random Movies":
                    image3 = "https://is1-ssl.mzstatic.com/image/thumb/Purple118/v4/a2/93/b8/a293b81e-9781-5129-32e9-38fb63ff52f8/source/256x256bb.jpg"
                if name == "Romance Movies":
                    image3 = "http://icons.iconarchive.com/icons/limav/movie-genres-folder/256/Romance-icon.png"
                if name == "Star Wars and Star Trek":
                    image3 = "http://icons.iconarchive.com/icons/aaron-sinuhe/tv-movie-folder/256/Star-Wars-2-icon.png"
                if name == "Studio Ghibli":
                    image3 = "https://orig00.deviantart.net/ec8a/f/2017/206/5/a/studio_ghibli_collection_folder_icon_by_dahlia069-dbho9mx.png"

                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>http://listtoday.org/wallpaper/2015/12/movies-in-theaters-1-desktop-background.jpg</fanart>"\
                       "<summary>Random Movies</summary>"\
                       "</plugin>" % (name,link,image3)
    except:
        pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #59
0
def get_stream(url):
    xml = ""
    try:
        url = "http://www.sports-stream.net/schedule.html"
        headers = {'User_Agent': User_Agent}
        html = requests.get(url, headers=headers).content
        block1 = re.compile('<br><font color="red">(.+?)',
                            re.DOTALL).findall(html)
        try:
            match = re.compile('<h3>(.+?)<input onclick=',
                               re.DOTALL).findall(html)
            for head1 in match:
                head1 = head1.replace("&nbsp;", "")
                xml += "<item>"\
                       "<title>[COLOR blue]%s[/COLOR]</title>"\
                       "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                       "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                       "<link></link>"\
                       "</item>" % (head1)
        except:
            pass
        try:
            match1 = re.compile(
                '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',
                re.DOTALL).findall(html)
            for time, name, link in match1:
                link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=" + link
                xml += "<plugin>"\
                       "<title>%s - %s</title>"\
                       "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                       "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                       "<link>%s</link>"\
                       "</plugin>" % (time,name,link)
        except:
            pass
        try:
            match3 = re.compile(
                '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<input onclick=',
                re.DOTALL).findall(html)
            for head2 in match3:
                head2 = head2.replace("&nbsp;", "")
                xml += "<item>"\
                       "<title>[COLOR blue]%s[/COLOR]</title>"\
                       "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                       "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                       "<link></link>"\
                       "</item>" % (head2)
        except:
            pass
        try:
            block2 = re.compile(
                '<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<script data-cfasync',
                re.DOTALL).findall(html)
            match4 = re.compile(
                '<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',
                re.DOTALL).findall(str(block2))
            for time, name, link in match4:
                link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=" + link
                xml += "<plugin>"\
                       "<title>%s - %s</title>"\
                       "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                       "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                       "<link>%s</link>"\
                       "</plugin>" % (time,name,link)
        except:
            pass
    except:
        pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Пример #60
0
def lastfm(url):
    req_url = BASE_URL
    response_key = None
    __builtin__.content_type = "files"
    if url.startswith("artist"):
        artist = url.split("/")[1]
        if url.endswith("info"):
            req_url += "?method=artist.getinfo&artist=%s" % artist
        elif "albums" in url:
            req_url += "?method=artist.gettopalbums&artist=%s" % artist
            response_key = "topalbums"
            __builtin__.content_type = "albums"
        elif "tracks" in url:
            req_url += "?method=artist.gettoptracks&artist=%s" % artist
            response_key = "toptracks"
            __builtin__.content_type = "songs"
    elif url.startswith("album"):
        splitted = url.split("/")
        artist = splitted[1]
        album = splitted[2]
        if splitted[-1] == "tracks":
            req_url += "?method=album.getinfo&artist=%s&album=%s" % (artist,
                                                                     album)
            response_key = "album"
            __builtin__.content_type = "songs"
    elif url.startswith("chart"):
        if "artists" in url:
            req_url += "?method=chart.gettopartists"
            response_key = "artists"
            __builtin__.content_type = "artists"
        elif "tracks" in url:
            req_url += "?method=chart.gettoptracks"
            response_key = "tracks"
            __builtin__.content_type = "songs"
        elif "tags" in url:
            req_url += "?method=chart.gettoptags"
            response_key = "tags"
    elif url.startswith("tag"):
        splitted = url.split("/")
        tag = splitted[1]
        if splitted[-1] == "tracks":
            req_url += "?method=tag.gettoptracks&tag=%s" % tag
            response_key = "tracks"
            __builtin__.content_type = "songs"
        elif splitted[-1] == "artists":
            req_url += "?method=tag.gettopartists&tag=%s" % tag
            response_key = "topartists"
            __builtin__.content_type = "artists"
        elif splitted[-1] == "albums":
            req_url += "?method=tag.gettopalbums&tag=%s" % tag
            response_key = "albums"
            __builtin__.content_type = "albums"
    req_url += "&api_key=%s&format=json" % LASTFM_API_KEY
    last = url.split("/")[-1]
    if last.isdigit():
        req_url += "&page=%s" % last

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        response = requests.get(req_url)
        response = response.json()

        if response_key:
            response = response[response_key]

        for key in response:
            if key == "album":
                for album in response["album"]:
                    xml += get_album_xml(album)
            elif key == "tracks":
                images = response["image"]
                try:
                    image = images[-1]["#text"]
                except Exception:
                    image = ""
                for track in response["tracks"]["track"]:
                    xml += get_track_xml(track, image)
            elif key == "track":
                for track in response["track"]:
                    xml += get_track_xml(track)
            elif key == "artist" and "artist" in url:
                for artist in response["artist"]:
                    xml += get_artist_xml(artist)
            elif key == "tag":
                for tag in response["tag"]:
                    xml += get_tag_xml(tag)

        if "@attr" in response:
            pages = int(response["@attr"]["totalPages"])
        else:
            pages = 1
        if pages > 1:
            current_page = int(response["@attr"]["page"])
            if current_page < pages:
                last = url.split("/")[-1]
                if last.isdigit():
                    next_url = "/".join(url.split("/")[:-1])
                else:
                    next_url = url
                next_url += "/%s" % str(current_page + 1)
                xml += "<dir>\n"\
                       "\t<title>Next Page >></title>\n"\
                       "\t<lastfm>%s</lastfm>\n"\
                       "\t<summary>Go To Page %s</summary>\n"\
                       "</dir>" % (next_url, current_page + 1)

        xml = remove_non_ascii(xml)
        save_to_db(xml, url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), __builtin__.content_type)