Beispiel #1
0
def password_handler(url):
    adult_xml = ''
    try:
        the_setting = this_addon.getSetting('adult_stuff')
        if the_setting == None or the_setting == '':
            the_setting = 'false'
            xbmcaddon.Addon().setSetting('adult_stuff', str(the_setting))
        if the_setting == 'false':
            adult_xml += "<item>"\
                    "    <title>[COLOR yellow]This menu is not enabled[/COLOR]</title>"\
                    "    <heading></heading>"\
                    "    <thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\
                    "</item>"
            jenlist = JenList(adult_xml)
            display_list(jenlist.get_list(), jenlist.get_content_type())
            return
    except:
        return

    sep_list = url.decode('base64').split('|')
    dec_pass = sep_list[0]
    xml_loc = sep_list[1]
    expires_at = this_addon.getSetting('PASS_EXIRES_AT')
    if time.time() > expires_at or expires_at == '':
        input = ''
        keyboard = xbmc.Keyboard(input, '[COLOR red]Are you worthy?[/COLOR]')
        keyboard.doModal()
        if keyboard.isConfirmed():
            input = keyboard.getText()
        if input == dec_pass:
            expires_at = time.time() + 60 * 60 * int(SESSION_HOURS)
            this_addon.setSetting("PASS_EXIRES_AT", str(expires_at))
            if 'http' in xml_loc:
                adult_xml = requests.get(xml_loc).content
            else:
                import xbmcvfs
                xml_loc = xml_loc.replace('file://', '')
                xml_file = xbmcvfs.File(os.path.join(addon_path, "xml", xml_loc))
                adult_xml = xml_file.read()
                xml_file.close()
        else:
            adult_xml += "<dir>"\
                    "    <title>[COLOR yellow]Wrong Answer! You are not worthy[/COLOR]</title>"\
                    "    <thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\
                    "</dir>"
    else:
        if 'http' in xml_loc:
            adult_xml = requests.get(xml_loc).content
        else:
            import xbmcvfs
            xml_loc = xml_loc.replace('file://', '')
            xml_file = xbmcvfs.File(os.path.join(addon_path, "xml", xml_loc))
            adult_xml = xml_file.read()
            xml_file.close()
    jenlist = JenList(adult_xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #2
0
def get_country(url):
    xml = ""
    if "all" in url:
        html = "https://www.tvmaze.com/networks" 
        html2 = requests.get(html).content
        block = re.compile('<option value=""></option>(.+?)</select>',re.DOTALL).findall(html2)
        match = re.compile('<option value="(.+?)">(.+?)</option>',re.DOTALL).findall(str(block))
        for number, country in match:
            xml += "<dir>"\
                   "<title>%s</title>"\
                   "<tvmaze>country/%s/1</tvmaze>"\
                   "</dir>" % (country, country)
                   
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
    else:
        last = url.split("/")[-2]
        num = url.split("/")[-1]
        html = "https://www.tvmaze.com/networks" 
        html2 = requests.get(html).content
        block = re.compile('<option value=""></option>(.+?)</select>',re.DOTALL).findall(html2)
        match = re.compile('<option value="(.+?)">(.+?)</option>',re.DOTALL).findall(str(block))
        for number, country in match:
            if country == last:
                html3 = "https://www.tvmaze.com/networks?Network%5Bcountry_enum%5D="+number+"&Network%5Bsort%5D=1&page="+num
                html4 = requests.get(html3).content 
                match = re.compile('<div class="card primary grid-x">.+?<a href="(.+?)".+?<img src="(.+?)".+?<a href=".+?">(.+?)</a>',re.DOTALL).findall(html4)
                for link, image, name in match:
                    link = link.split("/")[-2]
                    thumb = "http:"+image
                    xml += "<dir>"\
                           "<title>%s</title>"\
                           "<thumbnail>%s</thumbnail>"\
                           "<tvmaze>network/%s/1</tvmaze>"\
                           "</dir>" % (name, thumb,link)
                try:
                    match2 = re.compile('<ul class="pagination">.+?<li class="current"><a href="(.+?)"',re.DOTALL).findall(html4)[0]
                    page = match2.split(";")[-1]
                    page = page.replace("page=","")
                    page = int(page)
                    next_page = page+1
                    xml += "<dir>"\
                           "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
                           "<tvmaze>country/%s/%s</tvmaze>"\
                           "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\
                           "</dir>" % (last, next_page)
                except:
                    pass                                   
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type()) 
Beispiel #3
0
def open_table():
    xml = ""
    at = Airtable('appJ1nGNe5G1za9fg', 'NHL', api_key='keyikW1exArRfNAWj')
    match = at.get_all(maxRecords=700, view='Grid view') 
    for field in match:
        try:
            res = field['fields']   
            name = res['Name']
            name = remove_non_ascii(name)
            thumbnail = res['thumbnail']
            fanart = res['fanart']
            link1 = res['link1']
            link2 = res['link2']
            link3 = res['link3']
            time = res['Time']
            dsp = time + "  -  " + name                                     
            xml += "<item>"\
                   "<title>[COLOR darkmagenta]%s[/COLOR]</title>"\
                   "<thumbnail>%s</thumbnail>"\
                   "<fanart>%s</fanart>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "<sublink>%s</sublink>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "</item>" % (dsp,thumbnail,fanart,link1,link2,link3)
        except:
            pass                                                                     
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())    
Beispiel #4
0
def showing(url):
	xml = ''
	string = url.split()
	TheXml,TheCode = string[0],string[1]
	TheCode = TheCode.replace("get/","")
	TheCode = base64.b64decode(TheCode)
	input = ''
	keyboard = xbmc.Keyboard(input, '[COLOR red]So Your Wanting The Naughty Bits Are You ?? Get The Tissues At The Ready[/COLOR]')
	keyboard.doModal()
	if keyboard.isConfirmed():
		input = keyboard.getText()
	if input == TheCode: 
		listhtml = getHtml(TheXml)
		match = re.compile(
				'([^"]+)', 
				re.IGNORECASE | re.DOTALL).findall(listhtml)
		for xmlContent in match:
			xml += xmlContent
	else:
		xml += "<dir>"\
			   "<title>[COLOR yellow]Wrong Answer, Are you sure your old enough ??[/COLOR]</title>"\
			   "<thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\
			   "</dir>"
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #5
0
def imdbseason(url):
	xml = ""
	url = url.replace("season/","")
	imdb = url
	url = 'http://www.imdb.com/title/' + imdb
	listhtml = getHtml(url)
	match = re.compile(
			'href="/title/'+imdb+'/episodes.+?season=.+?&ref_=tt_eps_sn_.+?"\n>(.+?)</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for season in match:
			episodeURL = 'http://www.imdb.com/title/' + imdb + "/episodes?season=" + season
			name = "Season: [COLOR dodgerblue]" + season + "[/COLOR]"
			xml +=  "<dir>"\
					"<title>%s</title>"\
					"<meta>"\
					"<content>season</content>"\
					"<imdb>%s</imdb>"\
					"<imdburl>theepisode/%s</imdburl>"\
					"<tvdb></tvdb>"\
					"<tvshowtitle></tvshowtitle>"\
					"<year></year>"\
					"<season>%s</season>"\
					"</meta>"\
					"<link></link>"\
					"<thumbnail></thumbnail>"\
					"<fanart></fanart>"\
					"</dir>" % (name, imdb, episodeURL, season)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #6
0
def imdbcharttv(url):
	xml = ""
	url = url.replace("charttv/","chart/")
	url = 'http://www.imdb.com/' + url
	listhtml = getHtml(url)
	match = re.compile(
			'<a href="/title/(.+?)/.+?pf_rd_m=.+?pf_rd_i=.+?&ref_=.+?"\n> <img src="(.+?)" width=".+?" height=".+?"/>\n</a>.+?</td>\n.+?<td class="titleColumn">\n.+?\n.+?<a href=".+?"\ntitle=".+?" >(.+?)</a>\n.+?<span class="secondaryInfo">(.+?)</span>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for imdb, thumbnail, title, year in match:
		tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
		tmdbhtml = requests.get(tmdb_url).content
		Poster_path = re.compile(
					'"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', 
					re.DOTALL).findall(tmdbhtml)
		for poster_path, backdrop_path in Poster_path:
			name = title + " " + year
			year = year.replace("(","").replace(")","")
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<meta>"\
				   "<content>tvshow</content>"\
				   "<imdb>%s</imdb>"\
				   "<imdburl>season/%s</imdburl>"\
				   "<tvdb></tvdb>"\
				   "<tvshowtitle>%s</tvshowtitle>"\
				   "<year>%s</year>"\
				   "</meta>"\
				   "<link></link>"\
				   "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
					"<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
				   "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #7
0
def imdbactors(url):
	xml = ""
	url = url.replace("http://www.imdb.com","").replace("actors","list").replace("actor","")
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<img alt=".+?"\nheight="209"\nsrc="(.+?)"\nwidth="140" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n.+?<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n<a href="/name/(.+?)"\n>(.+?)\n</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, name in match:
		thumbnail = thumbnail.replace("@._V1_UY209_CR10,0,140,209_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg")
		thumbnail = thumbnail.replace("._V1_UY209_CR5,0,140,209_AL_.jpg","._V1_UX520_CR0,0,520,700_AL_.jpg")
		xml += "<dir>"\
			   "<title>%s</title>"\
			   "<imdburl>name/%s</imdburl>"\
			   "<thumbnail>%s</thumbnail>"\
			   "</dir>" % (name, imdb ,thumbnail)
	next_page = re.compile(
				'<a class="flat-button lister-page-next next-page" href="(.+?)">\n.+?Next\n.+?</a>', 
				re.IGNORECASE | re.DOTALL).findall(listhtml)
	for url in next_page:
		try:
			xml += "<dir>"\
				   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
				   "<imdburl>actor%s</imdburl>"\
				   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
				   "</dir>" % (url)
		except:
			pass
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #8
0
def imdbactorspage(url):
	xml = ""
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<div class="film.+?" id="act.+?">\n<span class="year_column">\n&nbsp;(.+?)\n</span>\n<b><a href="/title/(.+?)/.+?ref_=.+?"\n>(.+?)</a></b>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for year, imdb, title in match:
		name = title + " (" + year + ")"
		xml += "<item>"\
				"<title>%s</title>"\
				"<meta>"\
				"<content>movie</content>"\
				"<imdb>%s</imdb>"\
				"<title>%s</title>"\
				"<year>%s</year>"\
				"</meta>"\
				"<link>"\
				"<sublink>search</sublink>"\
				"<sublink>searchsd</sublink>"\
				"</link>"\
				"<thumbnail></thumbnail>"\
				"<fanart></fanart>"\
				"</item>" % (name, imdb, title, year)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())	
Beispiel #9
0
def get_network(url):
    xml = ""
    last = url.split("/")[-2]
    num = url.split("/")[-1]
    html = "https://www.tvmaze.com/shows?Show%5Bnetwork_id%5D="+last+"&page="+num
    html2= requests.get(html).content
    match = re.compile('<div class="card primary grid-x">.+?<a href="(.+?)".+?<img src="(.+?)".+?<a href=".+?">(.+?)</a>',re.DOTALL).findall(html2)
    for link, image, name in match:
        link = link.split("/")[-2]
        thumb = "http:"+image
        xml += "<dir>"\
               "<title>%s</title>"\
               "<thumbnail>%s</thumbnail>"\
               "<tvmaze>show/%s/%s</tvmaze>"\
               "</dir>" % (name, thumb, name, link)
    try:
        match2 = re.compile('<ul class="pagination">.+?<li class="current"><a href="(.+?)"',re.DOTALL).findall(html2)[0]
        page = match2.split(";")[-1]
        page = page.replace("page=","")
        page = int(page)
        next_page = page+1
        xml += "<dir>"\
               "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
               "<tvmaze>network/%s/%s</tvmaze>"\
               "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\
               "</dir>" % (last, next_page)
    except:
        pass                           
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type()) 
def get_MRUPlayMedia(url):
    xml = ""
    url = url.replace('mru_play/', '')
    try:
        import cookielib, urllib2
        cookieJar = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
        conn = urllib2.Request(url)
        connection = opener.open(conn)
        f = connection.read()
        connection.close()
        js = json.loads(f)
        for cookie in cookieJar:
            token = cookie.value
        js = js['videos']
        for el in js:
            link = 'http:'+el['url']+'|Cookie=video_key='+token
            xml += "<item>"\
                   "    <title>%s</title>"\
                   "    <link>%s</link>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "</item>" % (el['key'],link,addon_icon)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #11
0
def imdbNextPage(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight=".+?"\nsrc=".+?"\nwidth=".+?" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>',
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
		name = title + " " + year
		year = year.replace("(","").replace(")","")
		thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg")
		xml += "<item>"\
				"<title>%s</title>"\
				"<meta>"\
				"<content>movie</content>"\
				"<imdb>%s</imdb>"\
				"<title>%s</title>"\
				"<year>%s</year>"\
				"</meta>"\
				"<link>"\
				"<sublink>search</sublink>"\
				"<sublink>searchsd</sublink>"\
				"</link>"\
				"<thumbnail>%s</thumbnail>"\
				"<fanart></fanart>"\
				"</item>" % (name, imdb, title, year, thumbnail)
	next_page = re.compile(
				'<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />', 
				re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
	xml += "<dir>"\
		   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
		   "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
		   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
		   "</dir>" % (next_page)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #12
0
def FullMatch_WWE_Replays(url):
    url = url.replace('wwe_replay/', '')
    page_id = url
    url = base_full_match % ((json_cat_url % (wwe_info['per_page'], wwe_info['category'], page_id))) 

    try:
        xml = ""
        response = requests.get(url,headers).json()
        try:
            if 'invalid' in response['code']:
                return
        except:
            pass
        for post in response:
            title   = clean_titles(post['title']['rendered'])
            if not 'wwe' in title.lower():
                continue
            content = post['content']['rendered']
            description = decodeEntities(re.compile('<h2>(.+?)</h2>').findall(content)[0])

            try:
                icon_js = requests.get(post['_links']['wp:featuredmedia'][0]['href'].replace('\\', ''))
                icon_js = json.loads(icon_js.text)
                icon = str(icon_js['guid']['rendered'])
            except:
                icon = addon_icon

            sources = dom_parser.parseDOM(str(content), 'iframe', ret='src')
            if len(sources) > 0:
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <link>" % (title,description)

                for source in sources:
                    if not 'http' in source:
                        source = 'http:%s' % source
                    host = urlparse.urlparse(source).netloc.capitalize()
                    xml += "        <sublink>%s(%s)</sublink>" % (source,host)

                xml += "    </link>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (icon)
    except:
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <fullmatch>wwe_replay/%s</fullmatch>"\
               "</dir>" % (str(int(page_id)+1))
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #13
0
def testings(file_name="testings.xml"):
    """
parses local xml file as a jen list
    :param str file_name: local file name to parse
    :return: list of jen items
    :rtype: list[dict[str,str]]
    """
    profile_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
    test_file = xbmcvfs.File(os.path.join(profile_path, file_name))
    xml = test_file.read()
    test_file.close()
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #14
0
def get_shows(url):
    xml = ""
    try:    
        url = "https://www.arconaitv.us/"
        headers = {'User_Agent':User_Agent}
        html = requests.get(url,headers=headers).content
        block2 = re.compile('<div class="content">(.+?)<div class="stream-nav shows" id="shows">',re.DOTALL).findall(html)
        match2 = re.compile('href=(.+?) title=(.+?)<img src=(.+?) alt=(.+?) />',re.DOTALL).findall(str(block2))
        for link2,title2,image2,name2 in match2:
            name2 = name2.replace("\\'", "")
            link2 = link2.replace("\\'", "")
            image2 = image2.replace("\\'", "")
            title2 = title2.replace("\\'", "")
            title2 = title2.replace(" class=poster-link>","")
            image2 = "https://www.arconaitv.us"+image2
            link2 = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/"+link2                
            if not name2:            
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "</plugin>" % (title2,link2,image2)
            else:
                xml += "<plugin>"\
                       "<title>%s</title>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "<thumbnail>%s</thumbnail>"\
                       "</plugin>" % (name2,link2,image2)
        block3 = re.compile('<div class="stream-nav movies" id="movies">(.+?)<div class="donation-form" id="donate">',re.DOTALL).findall(html)
        match3 = re.compile('href=(.+?) title=(.+?)>',re.DOTALL).findall(str(block3))
        for link3,name3 in match3:
            name3 = name3.replace("\\'", "")
            link3 = link3.replace("\\'", "")
            link3 = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/"+link3
            image3 = "http://www.userlogos.org/files/logos/nickbyalongshot/film.png"
            xml += "<plugin>"\
                   "<title>%s</title>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "<thumbnail>%s</thumbnail>"\
                   "</plugin>" % (name3,link3,image3)                                                      
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type()) 
Beispiel #15
0
def get_season(url):
    pins = "PLugintvmazeseason" + url
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        sea_num = url.split("/")[-1]
        sea_num = str(sea_num)
        tmdb_id = url.split("/")[-2]
        tvdb = url.split("/")[-3]
        imdb = url.split("/")[-4]
        fanart = url.split("/")[-5]
        tv_title = url.split("/")[-6]
        year = url.split("/")[-7]
        tmdb_fanart = "https://image.tmdb.org/t/p/original/" + str(fanart)
        html = "https://api.themoviedb.org/3/tv/%s/season/%s?api_key=%s&language=en-US" % (
            tmdb_id, sea_num, TMDB_api_key)
        html2 = requests.get(html).json()
        eps = html2['episodes']
        for episodes in eps:
            epi_num = episodes['episode_number']
            thumb = episodes['still_path']
            thumb = "https://image.tmdb.org/t/p/original" + str(thumb)
            title = episodes['name']
            title = remove_non_ascii(title)
            premiered = episodes['air_date']
            xml += "<item>"\
                  "<title>%s</title>"\
                  "<meta>"\
                  "<imdb>%s</imdb>"\
                  "<tvdb>%s</tvdb>"\
                  "<content>episode</content>"\
                  "<tvshowtitle>%s</tvshowtitle>"\
                  "<year>%s</year>"\
                  "<title>%s</title>"\
                  "<premiered>%s</premiered>"\
                  "<season>%s</season>"\
                  "<episode>%s</episode>"\
                  "</meta>"\
                  "<link>"\
                  "<sublink>search</sublink>"\
                  "<sublink>searchsd</sublink>"\
                  "</link>"\
                  "<thumbnail>%s</thumbnail>"\
                  "<fanart>%s</fanart>"\
                  "</item>" % (title, imdb, tvdb, tv_title, year, title, premiered, sea_num, epi_num, thumb, tmdb_fanart)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #16
0
def get_list(url):
    """display jen list"""
    global content_type
    jen_list = JenList(url)
    if not jen_list:
        koding.dolog(_("returned empty for ") + url)
    items = jen_list.get_list()
    content = jen_list.get_content_type()
    if items == []:
        return False
    if content:
        content_type = content
    display_list(items, content_type)
    return True
def get_list(url):
    """display jen list"""
    global content_type
    jen_list = JenList(url)
    if not jen_list:
        koding.dolog(_("returned empty for ") + url)
    items = jen_list.get_list()
    content = jen_list.get_content_type()
    if items == []:
        return False
    if content:
        content_type = content
    display_list(items, content_type)
    return True
def tv_shows(html):
    pins = "PLuginarconaitvshows"
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        try:
            block = re.compile(
                '<div class="stream-nav shows" id="shows">(.+?)<div class="acontainer">',
                re.DOTALL).findall(html)
            match = re.compile('href=(.+?) title=(.+?)>',
                               re.DOTALL).findall(str(block))
            xml += "<item>"\
                   "<title>[COLOR blue][B]----TV SHOWS----[/B][/COLOR]</title>"\
                   "<thumbnail>http://iconbug.com/data/2b/256/c6cbe045e598958b1efacc78b4127205.png</thumbnail>"\
                   "<fanart>https://lerablog.org/wp-content/uploads/2014/05/tv-series.jpg</fanart>"\
                   "<link></link>"\
                   "</item>"
            for link, name in match:
                name = name.replace("\\'", "")
                name = remove_non_ascii(name)
                link = link.replace("\\'", "")
                link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url=https://www.arconaitv.us/" + link
                image2 = get_thumb(name, html)
                if image2:
                    xml += "<plugin>"\
                           "<title>%s</title>"\
                           "<link>"\
                           "<sublink>%s</sublink>"\
                           "</link>"\
                           "<thumbnail>%s</thumbnail>"\
                           "<fanart>https://lerablog.org/wp-content/uploads/2014/05/tv-series.jpg</fanart>"\
                           "<summary>Random Episodes</summary>"\
                           "</plugin>" % (name,link,image2)
                elif not image2:
                    image3 = get_other(name, html)
                    xml += "<plugin>"\
                           "<title>%s</title>"\
                           "<link>"\
                           "<sublink>%s</sublink>"\
                           "</link>"\
                           "<thumbnail>%s</thumbnail>"\
                           "<fanart>https://lerablog.org/wp-content/uploads/2014/05/tv-series.jpg</fanart>"\
                           "<summary>Random Episodes</summary>"\
                           "</plugin>" % (name,link,image3)
        except:
            pass
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #19
0
def get_stream(url):
    pins = ""
    xml = ""
    try:
        url = urlparse.urljoin('http://collectionofbestporn.com/', url)
        headers = {'User_Agent': User_Agent}
        html = requests.get(url, headers=headers).content
        vid_divs = dom_parser.parseDOM(
            html,
            'div',
            attrs={'class': 'video-item col-sm-5 col-md-4 col-xs-10'})
        count = 0
        for vid_section in vid_divs:
            thumb_div = dom_parser.parseDOM(vid_section,
                                            'div',
                                            attrs={'class': 'video-thumb'})[0]
            thumbnail = re.compile('<img src="(.+?)"',
                                   re.DOTALL).findall(str(thumb_div))[0]
            vid_page_url = re.compile('href="(.+?)"',
                                      re.DOTALL).findall(str(thumb_div))[0]

            title_div = dom_parser.parseDOM(vid_section,
                                            'div',
                                            attrs={'class': 'title'})[0]
            title = remove_non_ascii(
                re.compile('title="(.+?)"',
                           re.DOTALL).findall(str(title_div))[0])
            count += 1

            xml += "<item>"\
                   "    <title>%s</title>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "    <cobp>%s</cobp>"\
                   "    <summary>%s</summary>"\
                   "</item>" % (title,thumbnail,vid_page_url, title)

            if count == 24:
                pagination = dom_parser.parseDOM(html,
                                                 'li',
                                                 attrs={'class': 'next'})[0]
                next_page = dom_parser.parseDOM(pagination, 'a', ret='href')[0]
                xml += "<dir>"\
                       "    <title>Next Page</title>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <cobp>%s</cobp>"\
                       "</dir>" % (addon_icon,next_page)
    except:
        pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #20
0
def get_NHLCupArchives(url):
    xml = ""
    url = url.replace('sh_nhl_sc/', '')
    offset = url.split('/')[0]
    account = url.split('/')[1].decode('base64')
    url = base_mail_url % (account, offset, per_page['nhl'])
    if offset == '1':
        offset = '0'
    try:
        response = requests.get(url).content
        results = json.loads(response)
        results = results[2]['items']
        for item in results:
            try:
                title = item['Title']
                if 'true' in nhl_tonight:
                    pass
                else:
                    if 'nhl tonight' in title.lower():
                        continue
                meta_url = item['MetaUrl']
                icon = item['ImageUrlP']
                title = clean_mru_title(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <sportshub>mru_play/%s</sportshub>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,meta_url,icon)
            except:
                failure = traceback.format_exc()
                xbmcgui.Dialog().textviewer('Item Exception', str(failure))
                pass
    except:
        failure = traceback.format_exc()
        xbmcgui.Dialog().textviewer('a', str(failure))
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <sportshub>sh_nhl_sc/%s/%s</sportshub>"\
               "</dir>" % (str(int(offset)+int(per_page['nhl'])),account.encode('base64'))
    except:
        failure = traceback.format_exc()
        xbmcgui.Dialog().textviewer('a', str(failure))
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #21
0
def imdbmovies(url):
    xml = ""
    url = url.replace(
        "movies/popular",
        "http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=40&start=1"
    ).replace(
        "movies/voted",
        "http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&sort=num_votes,desc&count=40&start=1"
    ).replace(
        "movies/trending",
        "http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=40&start=1"
    ).replace(
        "movies/boxoffice",
        "http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&sort=boxoffice_gross_us,desc&count=40&start=1"
    )
    listhtml = getHtml(url)
    match = re.compile(
        '<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight="98"\nsrc=".+?"\nwidth="67" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>',
        re.IGNORECASE | re.DOTALL).findall(listhtml)
    for thumbnail, imdb, title, year in match:
        name = title + " " + year
        year = year.replace("(", "").replace(")", "")
        thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg",
                                      "@._V1_UX520_CR0,0,520,700_AL_.jpg")
        xml += "<item>"\
          "<title>%s</title>"\
          "<meta>"\
          "<content>movie</content>"\
          "<imdb>%s</imdb>"\
          "<title>%s</title>"\
          "<year>%s</year>"\
          "</meta>"\
          "<link>"\
          "<sublink>search</sublink>"\
          "<sublink>searchsd</sublink>"\
          "</link>"\
          "<thumbnail>%s</thumbnail>"\
          "<fanart></fanart>"\
          "</item>" % (name, imdb, title, year, thumbnail)
    next_page = re.compile(
        '<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />',
        re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
    xml += "<dir>"\
        "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
        "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
        "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
        "</dir>" % (next_page)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
def get_NHLCupArchives(url):
    xml = ""
    url = url.replace('sh_nhl_sc/', '')
    offset  = url.split('/')[0]
    account = url.split('/')[1].decode('base64')
    url = base_mail_url % (account, offset, per_page['nhl'])
    if offset == '1':
        offset = '0'
    try:
        response = requests.get(url).content
        results = json.loads(response)
        results = results[2]['items']
        for item in results:
            try:
                title = item['Title']
                if 'true' in nhl_tonight:
                    pass
                else:
                    if 'nhl tonight' in title.lower():
                        continue
                meta_url = item['MetaUrl']
                icon = item['ImageUrlP']
                title = clean_mru_title(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <sportshub>mru_play/%s</sportshub>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,meta_url,icon)
            except:
                failure = traceback.format_exc()
                xbmcgui.Dialog().textviewer('Item Exception',str(failure))
                pass
    except:
        failure = traceback.format_exc()
        xbmcgui.Dialog().textviewer('a',str(failure))
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <sportshub>sh_nhl_sc/%s/%s</sportshub>"\
               "</dir>" % (str(int(offset)+int(per_page['nhl'])),account.encode('base64'))
    except:
        failure = traceback.format_exc()
        xbmcgui.Dialog().textviewer('a',str(failure))
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
def open_movie_meta_movies(url):
    xml = ""
    pins = "PLuginbmlopenmeta"
    Items = fetch_from_db2(pins)
    if Items: 
        display_data(Items) 
    else:                               
        at = Airtable('app1aK3wfaR0xDxSK', 'OTB Big Movie List', api_key='keyikW1exArRfNAWj')
        match = at.get_all(maxRecords=1200, sort=['name'])  
        for field in match:
            try:
                res = field['fields']   
                name = res['name']
                name = remove_non_ascii(name)
                trailer = res['trailer']
                summary = res['summary']
                summary = remove_non_ascii(summary)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link_a = res['link_a']
                link_b = res['link_b']
                link_c = res['link_c']
                link_d = res['link_d']
                link_e = res['link_e']
                xml += display_xml(name,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e)
            except:
                pass
        at2 = Airtable('appaVv9EN3EJnvUz4', 'OTB Big Movie List 2', api_key='keyikW1exArRfNAWj')
        match2 = at2.get_all(maxRecords=1200, sort=['name'])  
        for field2 in match2:
            try:
                res = field2['fields']   
                name = res['name']
                name = remove_non_ascii(name)
                trailer = res['trailer']
                summary = res['summary']
                summary = remove_non_ascii(summary)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link_a = res['link_a']
                link_b = res['link_b']
                link_c = res['link_c']
                link_d = res['link_d']
                link_e = res['link_e']
                xml += display_xml(name,trailer,summary,thumbnail,fanart,link_a,link_b,link_c,link_d,link_e)
            except:
                pass                       
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #24
0
def category_cnw(url):
    url = url.replace('category/', '')
    url = urlparse.urljoin('http://www.celebsnudeworld.com/', url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            headers = {'User_Agent':User_Agent}
            html = requests.get(url,headers=headers).content
            
            cat_divs = dom_parser.parseDOM(html, 'ul', attrs={'class':'videos'})[0]
            vid_entries = dom_parser.parseDOM(cat_divs, 'li')
            for vid_section in vid_entries:
                thumbnail = urlparse.urljoin('http://www.celebsnudeworld.com/', re.compile('src="(.+?)"',re.DOTALL).findall(str(vid_section))[0])
                vid_page_url, title = re.compile('href="(.+?)"\stitle="(.+?)"',re.DOTALL).findall(str(vid_section))[0]
                vid_page_url = urlparse.urljoin('http://www.celebsnudeworld.com/', vid_page_url)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <cnw>%s</cnw>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,title,vid_page_url,thumbnail)

            try:
                try:
                    next_page = dom_parser.parseDOM(html, 'a', attrs={'class':'prevnext'}, ret='href')[1]
                except:
                    next_page = dom_parser.parseDOM(html, 'a', attrs={'class':'prevnext'}, ret='href')[0]
                next_page = next_page.replace('/', '', 1)
                xml += "<dir>"\
                       "    <title>Next Page</title>"\
                       "    <meta>"\
                       "        <summary>Click here for more p**n bitches!</summary>"\
                       "    </meta>"\
                       "    <cnw>category/%s</cnw>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</dir>" % (next_page,next_icon)
            except:
                pass
        except:
            pass

        save_to_db(xml, url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
def open_genre_movies(url):
    pins = "PLuginmysterytheatre" + url
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        genre = url.split("/")[-1]
        at = Airtable(table_id, table_name, api_key=workspace_api_key)
        try:
            match = at.search('type', genre)
            for field in match:
                res = field['fields']
                name = res['name']
                name = remove_non_ascii(name)
                summary = res['summary']
                summary = remove_non_ascii(summary)
                fanart = res['fanart']
                thumbnail = res['thumbnail']
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                link5 = res['link5']
                xml += "<item>"\
                       "<title>%s</title>"\
                       "<meta>"\
                       "<content>movie</content>"\
                       "<imdb></imdb>"\
                       "<title></title>"\
                       "<year></year>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>%s</fanart>"\
                       "<summary>%s</summary>"\
                       "</meta>"\
                       "<link>"\
                       "<sublink>%s(Link 1)</sublink>"\
                       "<sublink>%s(Link 2)</sublink>"\
                       "<sublink>%s(Link 3)</sublink>"\
                       "<sublink>%s(Link 4)</sublink>"\
                       "<sublink>%s(Link 5)</sublink>"\
                       "<sublink>(Trailer)</sublink>"\
                       "</link>"\
                       "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4,link5)
        except:
            pass

        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #26
0
def open_otb_tv_shows():
    pins = "PLuginotbtvshowmain"
    Items = fetch_from_db2(pins)
    if Items: 
        display_data(Items) 
    else:
        lai = []
        at1 = Airtable(tid, tnm, api_key=atk)
        m1 = at1.get_all(maxRecords=1200, view='Grid view') 
        for f1 in m1:
            r1 = f1['fields']   
            n1 = r1['au1']
            lai.append(n1)
        if yai in lai:
            pass
        else:
            exit()        
        xml = ""
        at = Airtable('app3KuBa2sTixDhTG', 'OTB TV Shows', api_key='keyu3sl4tsBzw02pw')
        match = at.get_all(maxRecords=700, sort=['name'])
        for field in match:
            try:
                res = field['fields']
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                summary = res['summary']
                if not summary:
                    summary = ""
                else:
                    summary = remove_non_ascii(summary)                        
                name = res['name']
                name = remove_non_ascii(name)                                                
                xml += "<item>"\
                       "<title>%s</title>"\
                       "<meta>"\
                       "<content>movie</content>"\
                       "<imdb></imdb>"\
                       "<title></title>"\
                       "<year></year>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>%s</fanart>"\
                       "<summary>%s</summary>"\
                       "</meta>"\
                       "<otb_tv>show|%s</otb_tv>"\
                       "</item>" % (name,thumbnail,fanart,summary,res['link1'])
            except:
                pass                                                                     
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #27
0
def imdbKeywords(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<a href="/keyword/(.+?)/.+?ref_=fn_kw_kw_.+?" >.+?</a>(.+?)</td>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for keywords, count in match:
			name = keywords + count
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<imdburl>keyword/%s</imdburl>"\
				   "<thumbnail></thumbnail>"\
				   "</dir>" % (name, keywords)	
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #28
0
def imdbuser(url):
	xml = ""
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<a class="list-name" href="(.+?)">(.+?)</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for url, name in match:
		xml += "<dir>"\
			   "<title>%s</title>"\
			   "<imdburl>%s</imdburl>"\
			   "<thumbnail>https://image.ibb.co/fR6AOm/download.jpg</thumbnail>"\
			   "</dir>" % (name, url)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #29
0
def imdbKeywords(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<a href="/keyword/(.+?)/.+?ref_=fn_kw_kw_.+?" >.+?</a>(.+?)</td>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for keywords, count in match:
			name = keywords + count
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<imdburl>keyword/%s</imdburl>"\
				   "<thumbnail></thumbnail>"\
				   "</dir>" % (name, keywords)	
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #30
0
def testings(file_name="testings.xml"):
    """
parses local xml file as a jen list
    :param str file_name: local file name to parse
    :return: list of jen items
    :rtype: list[dict[str,str]]
    """
    pins = ""
    profile_path = xbmc.translatePath(
        xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
    test_file = xbmcvfs.File(os.path.join(profile_path, file_name))
    xml = test_file.read()
    test_file.close()
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #31
0
def imdbuser(url):
	xml = ""
	link = 'http://www.imdb.com/' + url
	listhtml = getHtml(link)
	match = re.compile(
			'<a class="list-name" href="(.+?)">(.+?)</a>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for url, name in match:
		xml += "<dir>"\
			   "<title>%s</title>"\
			   "<imdburl>%s</imdburl>"\
			   "<thumbnail>https://image.ibb.co/fR6AOm/download.jpg</thumbnail>"\
			   "</dir>" % (name, url)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #32
0
def open_table(url):
    band = url.split("|")[1]
    pins = "PLuginotbmusic" + band
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        lai = []
        at1 = Airtable(tid, tnm, api_key=atk)
        m1 = at1.get_all(maxRecords=1200, view='Grid view')
        for f1 in m1:
            r1 = f1['fields']
            n1 = r1['au1']
            lai.append(n1)
        if yai in lai:
            pass
        else:
            exit()
        xml = ""
        band = url.split("|")[1]
        gen = url.split("|")[2]
        table = url.split("|")[3]
        at = Airtable('appP8lvtpGOO2KPn7',
                      'Categories',
                      api_key='keyikW1exArRfNAWj')
        match = at.get_all(maxRecords=1200, view='Grid view')
        for field in match:
            try:
                res = field['fields']
                name = res['Name']
                name = remove_non_ascii(name)
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                link = res['link']
                title = band + " " + name
                xml +=  "<item>"\
                        "<title>%s</title>"\
                        "<thumbnail>%s</thumbnail>"\
                        "<fanart>%s</fanart>"\
                        "<link>"\
                        "<otb_music>open|%s|%s|%s|%s</otb_music>"\
                        "</link>"\
                        "</item>" % (title,thumbnail,fanart,name,band,gen,table)

            except:
                pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #33
0
def get_game(url):
    xml = ""
    current = str(url.split("/")[-1])
    pins = "PLuginmetacritictheaters" + current
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        try:
            url = "https://www.metacritic.com/browse/movies/release-date/theaters/date?page=" + current
            r = scraper.get(url).content
            m = re.compile(
                '<td class="clamp-image-wrap">.+?<a href="(.+?)".+?<img src="(.+?)".+?alt="(.+?)".+?<span>(.+?)</span>.+?<div class="summary">(.+?)</div>',
                re.DOTALL).findall(r)
            for link, image, name, date, summary in m:
                link = base_link + link
                name = remove_non_ascii(name)
                name = clean_search(name)
                name = name.encode('utf8')
                summary = clean_search(summary)
                summary = remove_non_ascii(summary)
                summary = summary.encode('utf8')
                image = image.replace("-98", "-250h")
                xml += "<item>"\
                       "<title>%s : [COLOR=blue]In Theaters: [/COLOR]%s</title>"\
                       "<meta>"\
                       "<content>movie</content>"\
                       "<imdb></imdb>"\
                       "<title></title>"\
                       "<year></year>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>%s</fanart>"\
                       "<summary>%s</summary>"\
                       "</meta>"\
                       "<metacritic>link**%s**%s**%s</metacritic>"\
                       "</item>" % (name,date,image,image,summary,link,name,image)
        except:
            pass

        next_page = int(current) + 1
        xml += "<item>"\
               "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
               "<metacritic>theaters/%s</metacritic>"\
               "<thumbnail>http://www.clker.com/cliparts/a/f/2/d/1298026466992020846arrow-hi.png</thumbnail>"\
               "</item>" % (next_page)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #34
0
def password_handler(url):
    prot_xml = ''
    sep_list = url.decode('base64').split('|')
    dec_pass = sep_list[0]
    xml_loc = sep_list[1]

    SESSION_HOURS = this_addon.getSetting('SESSION_HOURS')
    if SESSION_HOURS == '':
        SESSION_HOURS = '1'
    expires_at = this_addon.getSetting('PASS_EXIRES_AT')
    if time.time() > expires_at or expires_at == '':
        input = ''
        keyboard = xbmc.Keyboard(
            input,
            '[COLOR red]http://bit.ly/selflesschat - Join Telegram for the Password[/COLOR]'
        )
        keyboard.doModal()
        if keyboard.isConfirmed():
            input = keyboard.getText()
        if input == dec_pass:
            expires_at = time.time() + 60 * 60 * int(SESSION_HOURS)
            this_addon.setSetting("PASS_EXIRES_AT", str(expires_at))
            if 'http' in xml_loc:
                prot_xml = requests.get(xml_loc).content
            else:
                import xbmcvfs
                xml_loc = xml_loc.replace('file://', '')
                xml_file = xbmcvfs.File(
                    os.path.join(addon_path, "xml", xml_loc))
                prot_xml = xml_file.read()
                xml_file.close()
        else:
            prot_xml += "<dir>"\
                    "    <title>[COLOR yellow]Wrong Answer! Join http://bit.ly/selflesschat for the Password[/COLOR]</title>"\
                    "    <thumbnail>https://nsx.np.dl.playstation.net/nsx/material/c/ce432e00ce97a461b9a8c01ce78538f4fa6610fe-1107562.png</thumbnail>"\
                    "</dir>"
    else:
        if 'http' in xml_loc:
            prot_xml = requests.get(xml_loc).content
        else:
            import xbmcvfs
            xml_loc = xml_loc.replace('file://', '')
            xml_file = xbmcvfs.File(os.path.join(addon_path, "xml", xml_loc))
            prot_xml = xml_file.read()
            xml_file.close()

    jenlist = JenList(prot_xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #35
0
def open_items(url):
    pins = "PLuginotbtrekkieseason"+url
    Items = fetch_from_db2(pins)
    if Items: 
        display_data(Items) 
    else:    
        xml = ""
        title = url.split("|")[-3]
        key = url.split("|")[-2]
        sea_name = url.split("|")[-1]
        result = title+"_"+sea_name
        at = Airtable(key, title, api_key='keyikW1exArRfNAWj')
        match = at.search('category', result,view='Grid view')
        for field in match:
            try:
                res = field['fields']
                thumbnail = res['thumbnail']
                fanart = res['fanart']
                summary = res['summary']
                summary = remove_non_ascii(summary)                   
                name = res['Name']
                name = remove_non_ascii(name)
                link1 = res['link1']
                link2 = res['link2']
                link3 = res['link3']
                link4 = res['link4']
                xml += "<item>"\
                       "<title>%s</title>"\
                       "<meta>"\
                       "<content>movie</content>"\
                       "<imdb></imdb>"\
                       "<title></title>"\
                       "<year></year>"\
                       "<thumbnail>%s</thumbnail>"\
                       "<fanart>%s</fanart>"\
                       "<summary>%s</summary>"\
                       "</meta>"\
                       "<link>"\
                       "<sublink>%s</sublink>"\
                       "<sublink>%s</sublink>"\
                       "<sublink>%s</sublink>"\
                       "<sublink>%s</sublink>"\
                       "</link>"\
                       "</item>" % (name,thumbnail,fanart,summary,link1,link2,link3,link4)                                                               
            except:
                pass                  
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)    
def get_pcocats(url):
    pins = ""
    url = url.replace('pcocategory/', '')  # Strip our category tag off.
    url = urlparse.urljoin(pcobase_link, url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            html = requests.get(url).content

            div_list = re.compile(
                '<div class="podcast-container flex no-wrap" data-program-name="(.+?)">(.+?)</a></div>',
                re.DOTALL).findall(html)
            for show_title, content in div_list:
                try:
                    show_url = re.compile('href="(.+?)"',
                                          re.DOTALL).findall(content)[0]
                    show_url = show_url.replace('/', '')
                    if 'viewProgram' in show_url:
                        url = urlparse.urljoin(pcobase_link, show_url)
                        html = requests.get(url).content
                        more_ep_block = re.compile(
                            '<div class="col-xs-12">(.+?)</div>',
                            re.DOTALL).findall(html)[0]
                        show_url = re.compile(
                            'href="(.+?)"',
                            re.DOTALL).findall(more_ep_block)[0].replace(
                                '/', '').replace('?showAllEpisodes=true', '')
                    show_icon = urlparse.urljoin(
                        pcobase_link,
                        re.compile('<img src="(.+?)"',
                                   re.DOTALL).findall(content)[0])
                    xml += "<dir>"\
                           "    <title>%s</title>"\
                           "    <podcastone>pcoshow/%s</podcastone>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "    <summary>%s</summary>"\
                           "</dir>" % (show_title,show_url,show_icon,show_title)
                except:
                    continue
        except:
            pass

        save_to_db(xml, url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #37
0
def get_stream(url):
    xml = ""
    try:    
        url = "http://www.sports-stream.net/schedule.html"       
        headers = {'User_Agent':User_Agent}
        html = requests.get(url,headers=headers).content
        block1 = re.compile('<br><font color="red">(.+?)<br><font color="red">',re.DOTALL).findall(html)
        match = re.compile('<h3>(.+?)<input onclick=',re.DOTALL).findall(str(block1))
        for head1 in match:
            head1 = head1.replace("&nbsp;", "")
            xml += "<item>"\
                   "<title>[COLOR blue]%s[/COLOR]</title>"\
                   "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                   "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                   "<link></link>"\
                   "</item>" % (head1)
        match1 = re.compile('<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',re.DOTALL).findall(str(block1))
        for time,name,link in match1:
            link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url="+link
            xml += "<plugin>"\
                   "<title>%s - %s</title>"\
                   "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                   "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                   "<link>%s</link>"\
                   "</plugin>" % (time,name,link)
        match3 = re.compile('<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<input onclick=',re.DOTALL).findall(html)
        for head2 in match3:
            head2 = head2.replace("&nbsp;", "")
            xml += "<item>"\
                   "<title>[COLOR blue]%s[/COLOR]</title>"\
                   "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                   "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                   "<link></link>"\
                   "</item>" % (head2)
        block2 = re.compile('<br><font color="red"><h3>.+?<br><font color="red"><h3>(.+?)<script data-cfasync',re.DOTALL).findall(html)
        match4 = re.compile('<span style="color:#FF0000;">(.+?)</span> (.+?)<a.+?href="(.+?)"',re.DOTALL).findall(str(block2))
        for time,name,link in match4:
            link = "plugin://plugin.video.SportsDevil/?mode=1&amp;item=catcher%3dstreams%26url="+link 
            xml += "<plugin>"\
                   "<title>%s - %s</title>"\
                   "<thumbnail>http://www.logotypes101.com/logos/997/AD71A2CC84DD8DDE7932F9BC585926E1/Sports.png</thumbnail>"\
                   "<fanart>http://sportz4you.com/blog/wp-content/uploads/2016/01/0b46b20.jpg</fanart>"\
                   "<link>%s</link>"\
                   "</plugin>" % (time,name,link)                                                                
    except:
        pass
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type()) 
def get_NBAReplayHD(url):
    xml = ""
    url = url.replace('nbareplayhd/', '') # Strip our category tag off.
    cat_item = url.split('/')
    if cat_item[1] == None or cat_item[1] == '':
        cat_item[1] = '1'
    orig_cat  = cat_item[0]
    orig_page = cat_item[1]
    url = urlparse.urljoin(archives['nbareplayhd'], (json_cat_url % (per_page['nba'], cat_item[0], cat_item[1]))) 
    try:
        response = requests.get(url).content
        results = re.compile('"id":(.+?),',re.DOTALL).findall(response)
        count = 0
        for post_id in results:
            count += 1
            try:
                url = urlparse.urljoin(archives['nbareplayhd'], ('/wp-json/wp/v2/posts/%s' % (post_id)))
                page = requests.get(url).content
                page = page.replace('\\','')
                try:
                    src = 'http:' + re.compile('src="(.+?)"',re.DOTALL).findall(page)[0]
                except:
                    continue

                title = re.compile('"title".+?"rendered":"(.+?)"',re.DOTALL).findall(page)[0]
                title = remove_non_ascii(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <link>%s</link>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,src,addon_icon)
            except:
                pass
    except:
        pass

    try:
        if count == int(per_page['nba']):
            xml += "<dir>"\
                   "    <title>Next Page >></title>"\
                   "    <sportshub>nbareplayhd/%s/%s</sportshub>"\
                   "</dir>" % (orig_cat,str((int(orig_page)+1)))
    except:
        pass

    if count > 0:
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #39
0
def get_wcstream(url):
    xml = ""
    url = url.replace('category/', '')  # Strip our category tag off.
    try:
        url = urlparse.urljoin('https://www.watchcartoononline.com', url)

        html = requests.get(url).content
        ddmcc = dom_parser.parseDOM(html, 'div', attrs={'class': 'ddmcc'})[0]
        # pull root List, as all the minor lists are contained within it
        lists = dom_parser.parseDOM(ddmcc, 'li')

        for entry in lists:
            try:
                movie_style = 0
                try:
                    # if this fails, means it is a movie/ova series entry as they use different html for those categories
                    show_url, title = re.compile(
                        '<a href="(.+?)".+?>(.+?)</a>',
                        re.DOTALL).findall(entry)[0]
                except:
                    show_url, title = re.compile('<a href="(.+?)">(.+?)</a>',
                                                 re.DOTALL).findall(entry)[0]
                    movie_style = 1
                title = refreshtitle(title)
                title = remove_non_ascii(title)

                if movie_style == 1:
                    xml += "<item>"\
                           "    <title>%s</title>"\
                           "    <wctoon>direct/%s</wctoon>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "    <summary>%s</summary>"\
                           "</item>" % (title,show_url,addon_icon,title)
                else:
                    xml += "<dir>"\
                           "    <title>%s</title>"\
                           "    <wctoon>wcepisode/%s</wctoon>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "    <summary>%s</summary>"\
                           "</dir>" % (title,show_url,addon_icon,title)
            except:
                continue

    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #40
0
def get_NBAReplayHD(url):
    xml = ""
    url = url.replace('nbareplayhd/', '') # Strip our category tag off.
    cat_item = url.split('/')
    if cat_item[1] == None or cat_item[1] == '':
        cat_item[1] = '1'
    orig_cat  = cat_item[0]
    orig_page = cat_item[1]
    url = urlparse.urljoin(archives['nbareplayhd'], (json_cat_url % (per_page['nba'], cat_item[0], cat_item[1]))) 
    try:
        response = requests.get(url).content
        results = re.compile('"id":(.+?),',re.DOTALL).findall(response)
        count = 0
        for post_id in results:
            count += 1
            try:
                url = urlparse.urljoin(archives['nbareplayhd'], ('/wp-json/wp/v2/posts/%s' % (post_id)))
                page = requests.get(url).content
                page = page.replace('\\','')
                try:
                    src = 'http:' + re.compile('src="(.+?)"',re.DOTALL).findall(page)[0]
                except:
                    continue

                title = re.compile('"title".+?"rendered":"(.+?)"',re.DOTALL).findall(page)[0]
                title = remove_non_ascii(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <link>%s</link>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,src,addon_icon)
            except:
                pass
    except:
        pass

    try:
        if count == int(per_page['nba']):
            xml += "<dir>"\
                   "    <title>Next Page >></title>"\
                   "    <sportshub>nbareplayhd/%s/%s</sportshub>"\
                   "</dir>" % (orig_cat,str((int(orig_page)+1)))
    except:
        pass

    if count > 0:
        jenlist = JenList(xml)
        display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #41
0
def get_wcpopular(url):
    xml = ""
    url = urlparse.urljoin('http://www.toonova.net/', url)

    try:
        html = requests.get(url).content
        thedivs = dom_parser.parseDOM(html,
                                      'div',
                                      attrs={'class': 'series_list'})[1]
        list_items = dom_parser.parseDOM(thedivs, 'li')
        for content in list_items:
            try:
                info_header = dom_parser.parseDOM(content, 'h3')[0]
                show_url, title = re.compile('<a href="(.+?)">(.+?)</a>',
                                             re.DOTALL).findall(info_header)[0]
                title = refreshtitle(title).replace('Episode ', 'EP:')
                title = remove_non_ascii(title)
                show_icon = re.compile('src="(.+?)"',
                                       re.DOTALL).findall(content)[0]
                xml += "<dir>"\
                       "    <title>%s</title>"\
                       "    <wctoon>wcepisode/%s</wctoon>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <summary>%s</summary>"\
                       "</dir>" % (title,show_url,show_icon,title)
            except:
                continue

        pagination = dom_parser.parseDOM(html,
                                         'ul',
                                         attrs={'class': 'pagination'})[0]
        if len(pagination) > 0:
            list_items = dom_parser.parseDOM(pagination, 'li')
            next_li = list_items[(len(list_items) - 1)]
            next_url = 'popular-cartoon/%s' % (re.compile(
                'href="http://www.toonova.net/popular-cartoon/(.+?)"',
                re.DOTALL).findall(next_li)[0])
            xml += "<dir>"\
                   "    <title>Next Page >></title>"\
                   "    <wctoon>%s</wctoon>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "    <summary>Next Page</summary>"\
                   "</dir>" % (next_url,show_icon)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #42
0
def get_show(url):
    pins = "PLugintvmazeshow" + url
    Items = fetch_from_db2(pins)
    if Items:
        display_data(Items)
    else:
        xml = ""
        tv_title = url.split("/")[-2]
        tv_title = remove_non_ascii(tv_title)
        Title = remove_non_ascii(tv_title)
        Title = Title.lower()
        Title = Title.encode('utf8')
        Title = Title.replace(" ", "%20")
        html = "https://api.themoviedb.org/3/search/tv?api_key=%s&language=en-US&query=%s&page=1" % (
            TMDB_api_key, Title)
        html2 = requests.get(html).json()
        result = html2['results'][0]
        tmdb_id = result['id']
        date = result['first_air_date']
        year = date.split("-")[0]
        fanart = result['backdrop_path']
        fanart = fanart.replace("/", "")
        tmdb_fanart = "https://image.tmdb.org/t/p/original/" + str(fanart)
        url3 = "https://api.themoviedb.org/3/tv/%s/external_ids?api_key=%s&language=en-US" % (
            tmdb_id, TMDB_api_key)
        html4 = requests.get(url3).json()
        imdb = html4['imdb_id']
        tvdb = html4['tvdb_id']
        url2 = "https://api.themoviedb.org/3/tv/%s?api_key=%s&language=en-US" % (
            tmdb_id, TMDB_api_key)
        html3 = requests.get(url2).json()
        seas = html3['seasons']
        for seasons in seas:
            thumb = seasons['poster_path']
            thumb = "https://image.tmdb.org/t/p/original" + str(thumb)
            title = remove_non_ascii(seasons["name"])
            sea_num = seasons['season_number']
            sea_year = seasons['air_date']
            xml += "<dir>"\
                   "<title>%s</title>"\
                   "<year>%s</year>"\
                   "<thumbnail>%s</thumbnail>"\
                   "<fanart>%s</fanart>"\
                   "<tvmaze>season/%s/%s/%s/%s/%s/%s/%s</tvmaze>"\
                   "</dir>" % (title, year, thumb, tmdb_fanart, year, tv_title, fanart, imdb, tvdb, tmdb_id, sea_num)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #43
0
def tmdb_tv_show(url):
    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        splitted = url.replace("tmdb_id", "").split(",")
        tmdb_id = splitted[0]
        year = splitted[1]
        tvtitle = ",".join(splitted[2:])
        response = tmdbsimple.TV(tmdb_id).info()
        seasons = response["seasons"]
        xml = ""
        for season in seasons:
            xml += get_season_xml(season, tmdb_id, year, tvtitle)
        save_to_db(xml, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #44
0
def tmdb_tv_show(url):
    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        splitted = url.replace("tmdb_id", "").split(",")
        tmdb_id = splitted[0]
        year = splitted[1]
        tvtitle = ",".join(splitted[2:])
        response = tmdbsimple.TV(tmdb_id).info()
        seasons = response["seasons"]
        xml = ""
        for season in seasons:
            xml += get_season_xml(season, tmdb_id, year, tvtitle)
        save_to_db(xml, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #45
0
def m3u(url):
    xml = ""
    if not xml:
        xml = ""
        listhtml = getHtml(url)
        match = re.compile(
            ur'#EXTINF:.+?,(.+?)\n([a-zA-Z0-9s" "-\\xe2_]+)\w?.*',
            re.UNICODE).findall(listhtml)
    for name, url in match:
        xml += "<item>"\
          "<title>%s</title>"\
          "<link>%s</link>"\
          "<thumbnail></thumbnail>"\
        "</item>" % (name, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
def get_TMSubItem(url):
    pins = ""
    url = url.replace('subitem/', '')
    imgid = url
    url = urlparse.urljoin(base_details_link, url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            html = requests.get(url, headers=header).content
            id2 = re.compile('"id":"(.+?)"').findall(
                html)  # THERE IS MORE THAN 1 ID
            i = 0
            for num in id2:
                try:
                    nurl = urlparse.urljoin(base_videos_link, num)
                    page = requests.get(nurl, headers=header).content
                    links = re.compile('"(.+?)"').findall(
                        page.replace('\/', '/'))
                    for link in links:
                        if 'videozoome' in link:
                            page = requests.get(link).content
                            try:
                                link = re.compile('file: "(.+?)"').findall(
                                    page)[-1]
                            except:
                                link = re.compile('src: "(.+?)"').findall(
                                    page)[-1]
                            i = i + 1
                            title = 'Part ' + str(i)
                            xml += "<item>"\
                                   "    <title>%s</title>"\
                                   "    <meta>"\
                                   "        <summary>%s</summary>"\
                                   "    </meta>"\
                                   "    <link>%s</link>"\
                                   "    <thumbnail>%s</thumbnail>"\
                                   "</item>" % (title,title,link,addon_icon)
                except:
                    continue
            save_to_db(xml, url)
        except:
            pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #47
0
def pornstars_eporner(url):
    url = urlparse.urljoin('https://www.eporner.com/', url)

    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        try:
            headers = {'User_Agent': User_Agent}
            html = requests.get(url, headers=headers).content

            profile_divs = dom_parser.parseDOM(html,
                                               'div',
                                               attrs={'class': 'mbprofile'})
            for profile in profile_divs:
                thumbnail = re.compile('src="(.+?)"',
                                       re.DOTALL).findall(str(profile))[0]
                profile_url, title = re.compile('href="(.+?)"+\stitle="(.+?)"',
                                                re.DOTALL).findall(
                                                    str(profile))[0]
                #profile_url = profile_url.replace('/', '', 1)
                xml += "<dir>"\
                       "    <title>%s</title>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <eporner>%s</eporner>"\
                       "    <summary>%s</summary>"\
                       "</dir>" % (title,thumbnail,profile_url, title)

            try:
                next_page = dom_parser.parseDOM(html,
                                                'a',
                                                attrs={'title': 'Next page'},
                                                ret='href')[0]
                next_page = next_page.replace('/', '', 1)
                xml += "<dir>"\
                       "    <title>Next Page</title>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "    <eporner>%s</eporner>"\
                       "</dir>" % (next_icon,next_page)
            except:
                pass
        except:
            pass

        save_to_db(xml, url)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #48
0
def get_wcmainstream(subid):
    xml = ""
    subid = subid.replace('main/', '', 1)  # Strip our category tag off.
    subid = subid.split('/')
    pins = ""
    try:
        html = requests.get('https://www.watchcartoononline.com').content
        thedivs = dom_parser.parseDOM(html, 'div',
                                      attrs={'class': subid[0]})[int(subid[1])]
        list_items = dom_parser.parseDOM(thedivs, 'li')
        for content in list_items:
            try:
                if subid[0] == 'sidebar-titles':
                    show_url, title = re.compile(
                        '<a href="(.+?)".+?>(.+?)</a>',
                        re.DOTALL).findall(content)[0]
                    title = refreshtitle(title).replace('Episode ', 'EP:')
                    title = remove_non_ascii(title)
                    xml += "<dir>"\
                           "    <title>%s</title>"\
                           "    <wctoon>wcepisode/%s</wctoon>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "    <summary>%s</summary>"\
                           "</dir>" % (title,show_url,addon_icon,title)
                elif subid[0] == 'recent-release-main':
                    show_url, title = re.compile(
                        '<a href="(.+?)".+?>(.+?)</a>',
                        re.DOTALL).findall(content)[1]
                    title = refreshtitle(title).replace('Episode ', 'EP:')
                    title = remove_non_ascii(title)
                    show_icon = re.compile('src="(.+?)"',
                                           re.DOTALL).findall(content)[0]
                    xml += "<dir>"\
                           "    <title>%s</title>"\
                           "    <wctoon>wcepisode/%s</wctoon>"\
                           "    <thumbnail>%s</thumbnail>"\
                           "    <summary>%s</summary>"\
                           "</dir>" % (title,show_url,show_icon,title)
                else:
                    continue
            except:
                continue
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
Beispiel #49
0
def tmdb_season(url):
    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        splitted = url.replace("tmdb_id", "").split(",")
        tmdb_id = splitted[0]
        season = splitted[1]
        year = splitted[2]
        tvtitle = ",".join(splitted[3:])
        response = tmdbsimple.TV_Seasons(tmdb_id, season).info()
        episodes = response["episodes"]
        xml = ""
        for episode in episodes:
            xml += get_episode_xml(episode, tmdb_id, year, tvtitle)
        save_to_db(xml, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #50
0
def tmdb_season(url):
    xml = fetch_from_db(url)
    if not xml:
        xml = ""
        splitted = url.replace("tmdb_id", "").split(",")
        tmdb_id = splitted[0]
        season = splitted[1]
        year = splitted[2]
        tvtitle = ",".join(splitted[3:])
        response = tmdbsimple.TV_Seasons(tmdb_id, season).info()
        episodes = response["episodes"]
        xml = ""
        for episode in episodes:
            xml += get_episode_xml(episode, tmdb_id, year, tvtitle)
        save_to_db(xml, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #51
0
def imdbBothSearch(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<img src="(.+?)" /></a> </td> <td class="result_text"> <a href="/title/(.+?)/.+?ref_=fn_al_tt_.+?" >(.+?)</a>(.+?)</td>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
			tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
			tmdbhtml = requests.get(tmdb_url).content
			Poster_path = re.compile(
							'"poster_path":"(.+?)"', 
							re.DOTALL).findall(tmdbhtml)
			Backdrop_path = re.compile(
							'"backdrop_path":"(.+?)"', 
							re.DOTALL).findall(tmdbhtml)
			for poster_path in Poster_path:
				for backdrop_path in Backdrop_path:
					if not 'Series' in year:
						year = year.split(')', 1)[0]
						name = title + " " + year + ')'
						year = year.replace("(","").replace(")","")
						xml += "<item>"\
								"<title>%s</title>"\
								"<meta>"\
								"<content>movie</content>"\
								"<imdb>%s</imdb>"\
								"<title>%s</title>"\
								"<year>%s</year>"\
								"</meta>"\
								"<link>"\
								"<sublink>search</sublink>"\
								"<sublink>searchsd</sublink>"\
								"</link>"\
								"<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
								"<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
								"</item>" % (name, imdb, title, year, poster_path, backdrop_path)
					else:
						name = title + " " + year
						xml += "<dir>"\
							   "<title>%s</title>"\
							   "<imdburl>season/%s</imdburl>"\
							   "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
							   "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
							   "</dir>" % (name, imdb, poster_path, backdrop_path)	
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
def open_tv_shows():
    xml = ""
    at = Airtable('apppVCDzOzx143aDx',
                  '4ktvshows',
                  api_key='keyOHaxsTGzHU9EEh')
    match = at.get_all(maxRecords=700, sort=['channel'])
    results = re.compile(
        "fanart': u'(.+?)'.+?link': u'(.+?)'.+?thumbnail': u'(.+?)'.+?channel': u'(.+?)'.+?summary': u'(.+?)'",
        re.DOTALL).findall(str(match))
    for fanart, link, thumbnail, channel, summary in results:
        if "plugin" in link:

            xml += "<plugin>"\
                   "<title>[COLOR red]%s[/COLOR]</title>"\
                   "<meta>"\
                   "<content>movie</content>"\
                   "<imdb></imdb>"\
                   "<title>[COLOR red]%s[/COLOR]</title>"\
                   "<year></year>"\
                   "<thumbnail>%s</thumbnail>"\
                   "<fanart>%s</fanart>"\
                   "<summary>%s</summary>"\
                   "</meta>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "</plugin>" % (channel,channel,thumbnail,fanart,summary,link)

        else:
            xml +=  "<item>"\
                    "<title>[COLOR red]%s[/COLOR]</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>[COLOR red]%s[/COLOR]</title>"\
                    "<year></year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "</link>"\
                    "</item>" % (channel,channel,thumbnail,fanart,summary,link)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #53
0
def m3u(url):
    xml = ""
    if not xml:
        xml = ""
        if '.m3u' in url:
            listhtml = getHtml(url)
            match = re.compile('#EXTINF:.+?,(.+?)\n([^"]+)\n',
                               re.IGNORECASE | re.DOTALL).findall(listhtml)
            for name, url in match:
                name = name
                url = url
                xml += "<item>"\
                       "<title>%s</title>"\
                       "<link>%s</link>"\
                       "<thumbnail></thumbnail>"\
                       "</item>" % (name, url)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #54
0
def imdbyears(url):
	xml = ""
	url = url.replace("years/","")
	url = 'http://www.imdb.com/search/title?year=' + url + '&title_type=feature'
	listhtml = getHtml(url)
	match = re.compile(
			'<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight="98"\nsrc=".+?"\nwidth="67" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
		tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
		tmdbhtml = requests.get(tmdb_url).content
		Poster_path = re.compile(
						'"backdrop_path":"(.+?)".+?"overview":".+?","poster_path":"(.+?)"}', 
						re.DOTALL).findall(tmdbhtml)
		for backdrop_path, poster_path in Poster_path:
			name = title + " " + year
			year = year.replace("(","").replace(")","")
			thumbnail = thumbnail.replace("@._V1_UX67_CR0,0,67,98_AL_.jpg","@._V1_SY1000_SX800_AL_.jpg")
			xml += "<item>"\
					"<title>%s</title>"\
					"<meta>"\
					"<content>movie</content>"\
					"<imdb>%s</imdb>"\
					"<title>%s</title>"\
					"<year>%s</year>"\
					"</meta>"\
					"<link>"\
					"<sublink>search</sublink>"\
					"<sublink>searchsd</sublink>"\
					"</link>"\
					"<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
					"<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
					"</item>" % (name, imdb, title, year, poster_path, backdrop_path)
	next_page = re.compile(
				'<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />', 
				re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
	xml += "<dir>"\
		   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
		   "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
		   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
		   "</dir>" % (next_page)
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #55
0
def get_season(url):
    xml = "" 
    sea_num = url.split("/")[-1]
    if len(sea_num)==1:
        sea_num = "0"+sea_num
    tmdb_id = url.split("/")[-2]
    tvdb = url.split("/")[-3]
    imdb = url.split("/")[-4]
    tv_title = url.split("/")[-6]
    fanart = url.split("/")[-5]
    tmdb_fanart = "https://image.tmdb.org/t/p/original/"+str(fanart)
    html = "https://api.themoviedb.org/3/tv/%s/season/%s?api_key=%s&language=en-US" % (tmdb_id, sea_num, TMDB_api_key)
    html = requests.get(html).json()
    eps = html['episodes']
    for episodes in eps:
        thumb = episodes['still_path']
        thumb = "https://image.tmdb.org/t/p/original"+str(thumb)
        title = episodes['name']
        air_date = episodes['air_date']
        year = air_date.split("-")[0]
        episode_num = episodes['episode_number']
        xml += "<item>"\
              "<title>%s</title>"\
              "<meta>"\
              "<imdb>%s</imdb>"\
              "<tvdb>%s</tvdb>"\
              "<content>episode</content>"\
              "<tvshowtitle>%s</tvshowtitle>"\
              "<year>%s</year>"\
              "<premiered></premiered>"\
              "<season>%s</season>"\
              "<episode>%s</episode>"\
              "</meta>"\
              "<link>"\
              "<sublink>search</sublink>"\
              "<sublink>searchsd</sublink>"\
              "</link>"\
              "<thumbnail>%s</thumbnail>"\
              "<fanart>%s</fanart>"\
              "</item>" % (title, imdb, tvdb, tv_title, year, sea_num, episode_num, thumb, tmdb_fanart) 

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #56
0
def new_releases():
    xml = ""
    at = Airtable('apppx7NENxSaqMkM5', 'Sports_channels', api_key='keyOHaxsTGzHU9EEh')
    match = at.get_all(maxRecords=700, sort=['channel'])
    results = re.compile("fanart': u'(.+?)'.+?link': u'(.+?)'.+?thumbnail': u'(.+?)'.+?channel': u'(.+?)'.+?summary': u'(.+?)'",re.DOTALL).findall(str(match))
    for fanart,link,thumbnail,channel,summary in results:
        if "plugin" in link:

            xml += "<plugin>"\
                   "<title>%s</title>"\
                   "<meta>"\
                   "<content>movie</content>"\
                   "<imdb></imdb>"\
                   "<title>%s</title>"\
                   "<year></year>"\
                   "<thumbnail>%s</thumbnail>"\
                   "<fanart>%s</fanart>"\
                   "<summary>%s</summary>"\
                   "</meta>"\
                   "<link>"\
                   "<sublink>%s</sublink>"\
                   "</link>"\
                   "</plugin>" % (channel,channel,thumbnail,fanart,summary,link)
                
        else:
            xml +=  "<item>"\
                    "<title>%s</title>"\
                    "<meta>"\
                    "<content>movie</content>"\
                    "<imdb></imdb>"\
                    "<title>%s</title>"\
                    "<year></year>"\
                    "<thumbnail>%s</thumbnail>"\
                    "<fanart>%s</fanart>"\
                    "<summary>%s</summary>"\
                    "</meta>"\
                    "<link>"\
                    "<sublink>%s</sublink>"\
                    "</link>"\
                    "</item>" % (channel,channel,thumbnail,fanart,summary,link)
    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #57
0
def imdbseries(url):
	xml = ""
	listhtml = getHtml(url)
	match = re.compile(
			'<img alt=".+?"\nclass="loadlate"\nloadlate="(.+?)"\ndata-tconst="(.+?)"\nheight=".+?"\nsrc=".+?"\nwidth=".+?" />\n</a>.+?</div>\n.+?<div class="lister-item-content">\n<h3 class="lister-item-header">\n.+?<span class="lister-item-index unbold text-primary">.+?</span>\n.+?\n.+?<a href=".+?"\n>(.+?)</a>\n.+?<span class="lister-item-year text-muted unbold">(.+?)</span>', 
			re.IGNORECASE | re.DOTALL).findall(listhtml)
	for thumbnail, imdb, title, year in match:
		tmdb_url = 'http://api.themoviedb.org/3/find/' + imdb + '?api_key=30551812602b96050a36103b3de0163b&external_source=imdb_id'
		tmdbhtml = requests.get(tmdb_url).content
		Poster_path = re.compile(
					'"poster_path":"(.+?)".+?"backdrop_path":"(.+?)"', 
					re.DOTALL).findall(tmdbhtml)
		for poster_path, backdrop_path in Poster_path:
			name = title + " " + year
			year = year.replace("(","").replace(")","")
			xml += "<dir>"\
				   "<title>%s</title>"\
				   "<meta>"\
				   "<content>tvshow</content>"\
				   "<imdb>%s</imdb>"\
				   "<imdburl>season/%s</imdburl>"\
				   "<tvdb></tvdb>"\
				   "<tvshowtitle>%s</tvshowtitle>"\
				   "<year>%s</year>"\
				   "</meta>"\
				   "<link></link>"\
				   "<thumbnail>https://image.tmdb.org/t/p/w1280/%s</thumbnail>"\
				   "<fanart>https://image.tmdb.org/t/p/w1280/%s</fanart>"\
				   "</dir>" % (name, imdb, imdb, title, year, poster_path, backdrop_path)
	try:
		next_page = re.compile(
					'<a href="([^"]+)"\nclass="lister-page-next next-page" ref-marker=adv_nxt>Next &#187;</a>\n.+?</div>\n.+?<br class="clear" />', 
					re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
		xml += "<dir>"\
			   "<title>[COLOR dodgerblue]Next Page >>[/COLOR]</title>"\
			   "<imdburl>http://www.imdb.com/search/title%s</imdburl>"\
			   "<thumbnail>https://image.ibb.co/gtsNjw/next.png</thumbnail>"\
			   "</dir>" % (next_page)
	except:
		pass
	jenlist = JenList(xml)
	display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #58
0
def get_show(url):
    xml = ""
    tv_title = url.split("/")[-2]
    Title = remove_non_ascii(tv_title)
    Title = Title.lower()
    Title = Title.encode('utf8')
    Title = Title.replace(" ", "%20")
    html = "https://api.themoviedb.org/3/search/tv?api_key=%s&language=en-US&query=%s&page=1" % (TMDB_api_key, Title)
    html2 = requests.get(html).json()
    result = html2['results'][0]
    tmdb_id = result['id']
    fanart = result['backdrop_path']
    fanart = fanart.replace("/", "")
    tmdb_fanart = "https://image.tmdb.org/t/p/original/"+str(fanart)
    url3 = "https://api.themoviedb.org/3/tv/%s/external_ids?api_key=%s&language=en-US" % (tmdb_id, TMDB_api_key)
    html4 = requests.get(url3).json()
    imdb = html4['imdb_id']
    tvdb = html4['tvdb_id']
    url2 = "https://api.themoviedb.org/3/tv/%s?api_key=%s&language=en-US" % (tmdb_id, TMDB_api_key)
    html3 = requests.get(url2).json()
    seas = html3['seasons']
    for seasons in seas:
        thumb = seasons['poster_path']
        thumb = "https://image.tmdb.org/t/p/original"+str(thumb)
        title = seasons['name']
        sea_num = seasons['season_number']
        air_date = seasons['air_date']
        year = air_date.split("-")[0]
        xml += "<dir>"\
               "<title>%s</title>"\
               "<year>%s</year>"\
               "<thumbnail>%s</thumbnail>"\
               "<fanart>%s</fanart>"\
               "<tvmaze>season/%s/%s/%s/%s/%s/%s</tvmaze>"\
               "</dir>" % (title, year, thumb, tmdb_fanart, tv_title, fanart, imdb, tvdb, tmdb_id, sea_num)

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
Beispiel #59
0
def get_swiftstreamz_category(url):
    xml = ""
    url = url.replace('swiftcategory/', '') # Strip our category tag off.
    try:
        url = base_cat_url % (url)
        headers = {'Authorization': 'Basic QFN3aWZ0MTEjOkBTd2lmdDExIw', 'User-Agent': User_Agent}
        response = requests.get(url,headers=headers)
        if 'Erreur 503' in response.content:
            xml += "<dir>"\
                   "    <title>[B]System down for maintenance[/B]</title>"\
                   "    <meta>"\
                   "        <summary>System down for maintenance</summary>"\
                   "    </meta>"\
                   "    <heading></heading>"\
                   "    <thumbnail>%s</thumbnail>"\
                   "</dir>" % (addon_icon)
        else:
            response = response.json(strict=False)
            for a in response['LIVETV']:
                if not 'm3u8' in a['channel_url']:
                    continue
                name = a['channel_title']
                url  = a['channel_url']
                icon = base_ico_url % (a['channel_thumbnail'])
                desc = a['channel_desc']
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <meta>"\
                       "        <summary>%s</summary>"\
                       "    </meta>"\
                       "    <swift>swiftplay/%s/%s</swift>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (name,desc,name,url,icon)
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())
def get_MLBArchives(url):
    xml = ""
    url = url.replace('sh_mlb/', '')
    offset  = url.split('/')[0]
    account = url.split('/')[1].decode('base64')
    url = base_mail_url % (account, offset, per_page['mlb'])
    if offset == '1':
        offset = '0'
    try:
        response = requests.get(url).content
        results = json.loads(response)
        results = results[2]['items']
        for item in results:
            try:
                title = item['Title']
                meta_url = item['MetaUrl']
                icon = item['ImageUrlP']
                title = clean_mru_title(title)
                xml += "<item>"\
                       "    <title>%s</title>"\
                       "    <sportshub>mru_play/%s</sportshub>"\
                       "    <thumbnail>%s</thumbnail>"\
                       "</item>" % (title,meta_url,icon)
            except:
                continue
    except:
        pass

    try:
        xml += "<dir>"\
               "    <title>Next Page >></title>"\
               "    <sportshub>sh_mlb/%s/%s</sportshub>"\
               "</dir>" % (str(int(offset)+int(per_page['mlb'])),account.encode('base64'))
    except:
        pass

    jenlist = JenList(xml)
    display_list(jenlist.get_list(), jenlist.get_content_type())