def get_pbshow(url): xml = "" url = url.replace('pbshow/', '') # Strip our show tag off. try: url = urlparse.urljoin(pbshow_link, url) html = requests.get(url).content show_icon = re.compile( '<meta property="og:image" content="(.+?)"').findall(html)[0] table_content = dom_parser.parseDOM(html, 'div', attrs={'class': 'span8 well'})[0] table_rows = dom_parser.parseDOM(table_content, 'tr') for row in table_rows: if 'href' in row: ep_page, ep_summary, ep_title = re.compile( '<a href="(.+?)".+?title="(.*?)">(.+?)</a>', re.DOTALL).findall(row)[0] else: continue xml += "<item>"\ " <title>%s</title>"\ " <podbay>pbepisode/%s</podbay>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (ep_title,ep_page,show_icon,ep_summary) except: #failure = traceback.format_exc() #xbmcgui.Dialog().textviewer('Total Failure', str(failure)) pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def category_eporner(url): pins = "" xml = "" try: url = urlparse.urljoin('https://www.eporner.com/', url) headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content hdy_vid_divs = dom_parser.parseDOM(html, 'div', attrs={'class': 'mb hdy'}) for vid_section in hdy_vid_divs: thumbnail = re.compile('src="(.+?)"', re.DOTALL).findall(str(vid_section))[0] vid_page_url, title = re.compile('href="(.+?)"+\stitle="(.+?)"', re.DOTALL).findall( str(vid_section))[0] vid_page_url = urlparse.urljoin('https://www.eporner.com/', vid_page_url) xml += "<item>"\ " <title>%s</title>"\ " <thumbnail>%s</thumbnail>"\ " <eporner>%s</eporner>"\ " <summary>%s</summary>"\ "</item>" % (title,thumbnail,vid_page_url, title) vid_divs = dom_parser.parseDOM(html, 'div', attrs={'class': 'mb'}) for vid_section in vid_divs: thumbnail = re.compile('src="(.+?)"', re.DOTALL).findall(str(vid_section))[0] vid_page_url, title = re.compile('href="(.+?)"+\stitle="(.+?)"', re.DOTALL).findall( str(vid_section))[0] vid_page_url = urlparse.urljoin('https://www.eporner.com/', vid_page_url) xml += "<item>"\ " <title>%s</title>"\ " <thumbnail>%s</thumbnail>"\ " <eporner>%s</eporner>"\ " <summary>%s</summary>"\ "</item>" % (title,thumbnail,vid_page_url, title) try: next_page = dom_parser.parseDOM(html, 'a', attrs={'title': 'Next page'}, ret='href')[0] next_page = next_page.replace('/', '', 1) xml += "<dir>"\ " <title>[COLORpalegreen]Next Page[/COLOR]</title>"\ " <thumbnail>%s</thumbnail>"\ " <eporner>%s</eporner>"\ "</dir>" % (next_icon,next_page) except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_pbepisode(url): xml = "" url = url.replace('pbepisode/', '') # Strip our episode tag off. try: html = requests.get(url).content ep_icon = dom_parser.parseDOM(html, 'meta', attrs={'property': 'og:image'}, ret='content')[0] ep_title = dom_parser.parseDOM(html, 'meta', attrs={'property': 'og:title'}, ret='content')[0].encode( 'utf-8', 'ignore').decode('utf-8') url = dom_parser.parseDOM(html, 'a', attrs={'class': 'btn btn-mini btn-primary'}, ret='href')[0] item = xbmcgui.ListItem(label=ep_title, path=url, iconImage=ep_icon, thumbnailImage=ep_icon) item.setInfo(type="Video", infoLabels={"Title": ep_title}) import resolveurl koding.Play_Video(url, showbusy=False, ignore_dp=True, item=item, resolver=resolveurl) except: pass
def show_Template_Item(url): xml = "" url = url.replace('main/', '', 1) # Strip our category tag off. try: url = urlparse.urljoin(base_main_link, url) html = requests.get(url).content sections = dom_parser.parseDOM(html, 'table', attrs={'class':'series_index'}) for table in sections: try: the_cols = dom_parser.parseDOM(table, 'td') for column in the_cols: if ' ' in column: continue show_url, title = re.compile('<a href="(.+?)">(.+?)</a>',re.DOTALL).findall(column)[0] title = refreshtitle(title) title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <link>%s</link>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (title,title,show_url,addon_icon) except: continue except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wcgenre(url): if 'all' in url: get_wcgenrelist() return else: url = url.replace('wcgenre/', '') # Strip our genre tag off. url = urlparse.urljoin('https://www.watchcartoononline.io/search-by-genre/', url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content ddmcc = dom_parser.parseDOM(html, 'div', attrs={'class':'ddmcc'})[0] # pull root List, as all the minor lists are contained within it lists = dom_parser.parseDOM(ddmcc, 'li') for entry in lists: show_url, title = re.compile('href="(.+?)">(.+?)</a>',re.DOTALL).findall(entry)[0] title = refreshtitle(title) title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wcepisodes(url): xml = "" url = url.replace('wcepisode/', '') # Strip our episode tag off. try: url = urlparse.urljoin('http://www.toonova.net/', url) html = requests.get(url).content thediv = dom_parser.parseDOM(html, 'div', attrs={'id': 'videos'})[0] lists = dom_parser.parseDOM(thediv, 'li') for entry in lists: show_url, title = re.compile('<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(entry)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) show_icon = dom_parser.parseDOM(html, 'div', attrs={'id': 'series_info'})[0] show_icon = re.compile('src="(.+?)"', re.DOTALL).findall(show_icon)[0] xml += "<item>"\ " <title>%s</title>"\ " <wctoon>list-videos/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (title,show_url,show_icon,title) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wcgenrelist(): url = 'https://www.watchcartoononline.com/search-by-genre/' pins = "" xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content ddmcc = dom_parser.parseDOM(html, 'div', attrs={'class': 'ddmcc'})[0] # pull root List, as all the minor lists are contained within it lists = dom_parser.parseDOM(ddmcc, 'li') for entry in lists: show_url, title = re.compile('href="(.+?)">(.+?)</a>', re.DOTALL).findall(entry)[0] # convert show_url to get last tag in the url for the xml creation show_url = show_url.split('/')[-1] title = refreshtitle(title) title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcgenre/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_wcstream(url): xml = "" url = url.replace('category/', '') # Strip our category tag off. try: url = urlparse.urljoin('http://www.toonova.net/', url) html = requests.get(url).content sections = dom_parser.parseDOM(html, 'table', attrs={'class': 'series_index'}) for table in sections: try: the_cols = dom_parser.parseDOM(table, 'td') for column in the_cols: if ' ' in column: continue show_url, title = re.compile('<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(column)[0] title = refreshtitle(title) title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) except: continue except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wcepisodes(url): url = url.replace('wcepisode/', '') # Strip our episode tag off. url = urlparse.urljoin('https://www.watchcartoononline.io', url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content thediv = dom_parser.parseDOM(html, 'div', attrs={'id':'catlist-listview'})[0] lists = dom_parser.parseDOM(thediv, 'li') for entry in lists: show_url, title = re.compile('<a href="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(entry)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) show_icon = dom_parser.parseDOM(html, 'meta', attrs={'property':'og:image'}, ret='content')[0] xml += "<item>"\ " <title>%s</title>"\ " <wctoon>direct/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (title,show_url,show_icon,title) save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_pbshow(url): url = url.replace('pbshow/', '') # Strip our show tag off. url = urlparse.urljoin(pbshow_link, url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content show_icon = re.compile('<meta property="og:image" content="(.+?)"').findall(html)[0] table_content = dom_parser.parseDOM(html, 'div', attrs={'class': 'span8 well'})[0] table_rows = dom_parser.parseDOM(table_content, 'tr') for row in table_rows: if 'href' in row: ep_page, ep_summary, ep_title = re.compile('<a href="(.+?)".+?title="(.*?)">(.+?)</a>',re.DOTALL).findall(row)[0] else: continue xml += "<item>"\ " <title>%s</title>"\ " <podbay>pbepisode/%s</podbay>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (ep_title,ep_page,show_icon,ep_summary) except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_pbcats(url): xml = "" url = url.replace('pbcategory/', '') # Strip our category tag off. try: url = urlparse.urljoin(pbcats_link, url) html = requests.get(url).content page_list = dom_parser.parseDOM(html, 'ul', attrs={'class': 'thumbnails'})[0] show_list = dom_parser.parseDOM(page_list, 'li', attrs={'class': 'span3'}) for entry in show_list: try: show_url = dom_parser.parseDOM(entry, 'a', ret='href')[0] show_icon = dom_parser.parseDOM(entry, 'img', ret='src')[0] show_title = dom_parser.parseDOM(entry, 'h4')[0] show_title = refreshtitle(show_title) show_title = remove_non_ascii(show_title) xml += "<dir>"\ " <title>%s</title>"\ " <podbay>pbshow/%s</podbay>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (show_title,show_url,show_icon,show_title) except: continue except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wctopfiftystream(url): url = url.replace('topfifty/', '') # Strip our category tag off. url = urlparse.urljoin('https://www.watchcartoononline.io', url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content thediv = dom_parser.parseDOM(html, 'div', attrs={'class':'menulaststyle'})[0] lists = dom_parser.parseDOM(thediv, 'li') for entry in lists: try: show_url, title = re.compile('<a href="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(entry)[0] title = refreshtitle(title) title = remove_non_ascii(title) xml += "<item>"\ " <title>%s</title>"\ " <wctoon>direct/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (title,show_url,addon_icon,title) except: continue save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def fxxx_tags(url): url = url.replace('fxmtag/', '') orig_tag = url.split("/")[0] url = urlparse.urljoin('http://fullxxxmovies.net/tag/', url) xml = fetch_from_db(url) if not xml: xml = "" try: headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content try: tag_divs = dom_parser.parseDOM(html, 'div', attrs={'id': 'mainAnninapro'})[0] vid_entries = dom_parser.parseDOM(tag_divs, 'article') for vid_section in vid_entries: thumbnail = re.compile('src="(.+?)"', re.DOTALL).findall( str(vid_section))[0] vid_page_url, title = re.compile( 'h3 class="entry-title"><a href="(.+?)" rel="bookmark">(.+?)</a></h3', re.DOTALL).findall(str(vid_section))[0] xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <fxxxmovies>%s</fxxxmovies>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) except: pass try: try: next_page = dom_parser.parseDOM( html, 'a', attrs={'class': 'next page-numbers'}, ret='href')[0] next_page = next_page.split("/")[-2] xml += "<dir>"\ " <title>Next Page</title>"\ " <meta>"\ " <summary>Click here for more p**n bitches!</summary>"\ " </meta>"\ " <fxxxmovies>fxmtag/%s/page/%s</fxxxmovies>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (orig_tag,next_page,next_icon) except: pass except: pass except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def category_cnw(url): url = url.replace('category/', '') url = urlparse.urljoin('http://www.celebsnudeworld.com/', url) xml = fetch_from_db(url) if not xml: xml = "" try: headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content cat_divs = dom_parser.parseDOM(html, 'ul', attrs={'class': 'videos'})[0] vid_entries = dom_parser.parseDOM(cat_divs, 'li') for vid_section in vid_entries: thumbnail = urlparse.urljoin( 'http://www.celebsnudeworld.com/', re.compile('src="(.+?)"', re.DOTALL).findall(str(vid_section))[0]) vid_page_url, title = re.compile('href="(.+?)"\stitle="(.+?)"', re.DOTALL).findall( str(vid_section))[0] vid_page_url = urlparse.urljoin( 'http://www.celebsnudeworld.com/', vid_page_url) xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <cnw>%s</cnw>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) try: try: next_page = dom_parser.parseDOM( html, 'a', attrs={'class': 'prevnext'}, ret='href')[1] except: next_page = dom_parser.parseDOM( html, 'a', attrs={'class': 'prevnext'}, ret='href')[0] next_page = next_page.replace('/', '', 1) xml += "<dir>"\ " <title>Next Page</title>"\ " <meta>"\ " <summary>Click here for more p**n bitches!</summary>"\ " </meta>"\ " <cnw>category/%s</cnw>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (next_page,next_icon) except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_stream(url): cat = url.split("/")[-1] pins = "PLugincobp" + cat Items = fetch_from_db2(pins) if Items: display_data(Items) xml = "" try: url = urlparse.urljoin('http://collectionofbestporn.com/', url) headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content vid_divs = dom_parser.parseDOM( html, 'div', attrs={'class': 'video-item col-sm-5 col-md-4 col-xs-10'}) count = 0 for vid_section in vid_divs: thumb_div = dom_parser.parseDOM(vid_section, 'div', attrs={'class': 'video-thumb'})[0] thumbnail = re.compile('<img src="(.+?)"', re.DOTALL).findall(str(thumb_div))[0] vid_page_url = re.compile('href="(.+?)"', re.DOTALL).findall(str(thumb_div))[0] title_div = dom_parser.parseDOM(vid_section, 'div', attrs={'class': 'title'})[0] title = remove_non_ascii( re.compile('title="(.+?)"', re.DOTALL).findall(str(title_div))[0]) count += 1 xml += "<item>"\ " <title>%s</title>"\ " <thumbnail>%s</thumbnail>"\ " <cobp>%s</cobp>"\ " <summary>%s</summary>"\ "</item>" % (title,thumbnail,vid_page_url, title) if count == 24: pagination = dom_parser.parseDOM(html, 'li', attrs={'class': 'next'})[0] next_page = dom_parser.parseDOM(pagination, 'a', ret='href')[0] xml += "<dir>"\ " <title>Next Page</title>"\ " <thumbnail>%s</thumbnail>"\ " <cobp>%s</cobp>"\ "</dir>" % (addon_icon,next_page) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_wcstream(url): url = url.replace('category/', '') # Strip our category tag off. url = urlparse.urljoin('https://www.thewatchcartoononline.tv/', url) #url = urlparse.urljoin('https://www.watchcartoononline.com', url) pins = "" xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content ddmcc = dom_parser.parseDOM(html, 'div', attrs={'class': 'ddmcc'})[0] # pull root List, as all the minor lists are contained within it lists = dom_parser.parseDOM(ddmcc, 'li') for entry in lists: try: movie_style = 0 try: # if this fails, means it is a movie/ova series entry as they use different html for those categories show_url, title = re.compile( '<a href="(.+?)".+?>(.+?)</a>', re.DOTALL).findall(entry)[0] except: show_url, title = re.compile( '<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(entry)[0] movie_style = 1 title = refreshtitle(title) title = remove_non_ascii(title) if movie_style == 1: xml += "<item>"\ " <title>%s</title>"\ " <wctoon>direct/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (title,show_url,addon_icon,title) else: xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) except: continue save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_wcpopular(url): pins = "" url = urlparse.urljoin('http://www.toonova.net/', url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content thedivs = dom_parser.parseDOM(html, 'div', attrs={'class': 'series_list'})[1] list_items = dom_parser.parseDOM(thedivs, 'li') for content in list_items: try: info_header = dom_parser.parseDOM(content, 'h3')[0] show_url, title = re.compile( '<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(info_header)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) show_icon = re.compile('src="(.+?)"', re.DOTALL).findall(content)[0] xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,show_icon,title) except: continue pagination = dom_parser.parseDOM(html, 'ul', attrs={'class': 'pagination'})[0] if len(pagination) > 0: list_items = dom_parser.parseDOM(pagination, 'li') next_li = list_items[(len(list_items) - 1)] next_url = 'popular-cartoon/%s' % (re.compile( 'href="http://www.toonova.net/popular-cartoon/(.+?)"', re.DOTALL).findall(next_li)[0]) xml += "<dir>"\ " <title>Next Page >></title>"\ " <wctoon>%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>Next Page</summary>"\ "</dir>" % (next_url,show_icon) save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_wcdaily(url): pins = "" url = url.replace('wcdaily-', '') # Strip our episode tag off. url = urlparse.urljoin('http://www.toonova.net/', url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content thetable = dom_parser.parseDOM(html, 'table', attrs={'id': 'updates'})[0] the_rows = dom_parser.parseDOM(thetable, 'tr') for content in the_rows: try: the_lists = dom_parser.parseDOM(content, 'li') for item in the_lists: show_url, title = re.compile( '<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(item)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) except: continue pagination = dom_parser.parseDOM(html, 'ul', attrs={'class': 'pagination'})[0] if len(pagination) > 0: list_items = dom_parser.parseDOM(pagination, 'li') next_li = list_items[(len(list_items) - 1)] next_url = 'wcdaily-updates/%s' % (re.compile( 'href="http://www.toonova.net/updates/(.+?)"', re.DOTALL).findall(next_li)[0]) xml += "<dir>"\ " <title>Next Page >></title>"\ " <wctoon>%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>Next Page</summary>"\ "</dir>" % (next_url,addon_icon) save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_pcoshow(url): xml = "" url = url.replace('pcoshow/', '') # Strip our show tag off. try: url = urlparse.urljoin(pcobase_link, url) url = url + '?showAllEpisodes=true' html = requests.get(url).content icon_item = dom_parser.parseDOM(html, 'div', attrs={'class': 'col-sm-3 col-xs-12 current-episode-img'})[0] show_icon = dom_parser.parseDOM(icon_item, 'img', ret='src')[0] try: # pulls latest/new at the top latest_content = re.compile('<div class="letestEpiDes">(.+?)</div>',re.DOTALL).findall(html)[0] episode_item = dom_parser.parseDOM(latest_content, 'h3', attrs={'class': 'dateTime'})[0] ep_title = re.compile('href=".+?" style="color:inherit;">(.+?)</a>',re.DOTALL).findall(latest_content)[0] ep_page = urlparse.urljoin(pcobase_link, re.compile('href="(.+?)"',re.DOTALL).findall(latest_content)[0]) xml += "<item>"\ " <title>%s</title>"\ " <podcastone>pcoepisode/%s</podcastone>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (ep_title,ep_page,show_icon,ep_title) except: pass # handles pulling the rest of the shows available for free past_episodes = dom_parser.parseDOM(html, 'div', attrs={'class': 'col-xs-12 col-sm-12 col-md-12 col-lg-12'})[0] episode_list = dom_parser.parseDOM(past_episodes, 'h3', attrs={'class': 'dateTime'}) for content in episode_list: try: ep_title = re.compile('href=".+?" style="color:inherit;">(.+?)</a>',re.DOTALL).findall(content)[0] ep_page = urlparse.urljoin(pcobase_link, re.compile('href="(.+?)"',re.DOTALL).findall(content)[0]) xml += "<item>"\ " <title>%s</title>"\ " <podcastone>pcoepisode/%s</podcastone>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (ep_title,ep_page,show_icon,ep_title) except: continue except: #failure = traceback.format_exc() #xbmcgui.Dialog().textviewer('Total Failure', str(failure)) pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_stream(url): url = urlparse.urljoin('http://collectionofbestporn.com/', url) xml = fetch_from_db(url) if not xml: xml = "" try: headers = {'User_Agent':User_Agent} html = requests.get(url,headers=headers).content vid_divs = dom_parser.parseDOM(html, 'div', attrs={'class':'video-item col-sm-5 col-md-4 col-xs-10'}) count = 0 for vid_section in vid_divs: thumb_div = dom_parser.parseDOM(vid_section, 'div', attrs={'class':'video-thumb'})[0] thumbnail = re.compile('<img src="(.+?)"',re.DOTALL).findall(str(thumb_div))[0] vid_page_url = re.compile('href="(.+?)"',re.DOTALL).findall(str(thumb_div))[0] title_div = dom_parser.parseDOM(vid_section, 'div', attrs={'class':'title'})[0] title = remove_non_ascii(re.compile('title="(.+?)"',re.DOTALL).findall(str(title_div))[0]) count += 1 xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <cobp>%s</cobp>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) try: pagination = dom_parser.parseDOM(html, 'li', attrs={'class':'next'})[0] next_page = dom_parser.parseDOM(pagination, 'a', ret='href')[0] xml += "<dir>"\ " <title>Next Page</title>"\ " <meta>"\ " <summary>Click here for more p**n bitches!</summary>"\ " </meta>"\ " <cobp>%s</cobp>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (next_page,next_icon) except: pass save_to_db(xml, url) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_pbepisode(url): xml = "" url = url.replace('pbepisode/', '') # Strip our episode tag off. try: html = requests.get(url).content ep_icon = re.compile('property="og:image" content="(.*?)"', re.DOTALL).findall(html)[0] ep_title = re.compile('property="og:title" content="(.*?)"', re.DOTALL).findall(html)[0] ep_title = refreshtitle(ep_title) url = dom_parser.parseDOM(html, 'a', attrs={'class': 'btn btn-mini btn-primary'}, ret='href')[0] if not 'mp3' in url: url = re.compile('file: "(.*?)"', re.DOTALL).findall(html)[0] item = xbmcgui.ListItem(label=ep_title, path=url, iconImage=ep_icon, thumbnailImage=ep_icon) item.setInfo(type="Video", infoLabels={"Title": ep_title}) import resolveurl koding.Play_Video(url, showbusy=False, ignore_dp=True, item=item, resolver=resolveurl) except: pass
def get_wclistvideos(url): pins = "" url = url.replace('list-videos/', '') # Strip our episode tag off. xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content the_divs = dom_parser.parseDOM(html, 'div', attrs={'class': 'vmargin'}) for video_entry in the_divs: iframe = re.compile('iframe src="(.+?)"', re.DOTALL).findall(video_entry)[0] html = requests.get(iframe) nurl = re.findall( r'''file:\s*['\"]([^'\"]+)['\"](?:\,\s*label:\s*|)(?:['\"]|)([\d]+|)''', html.text) if len(nurl) == 1: host = nurl[0][0].split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[1].upper() xml += "<item>"\ " <title>%s</title>"\ " <wctoon>direct/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (host,str(nurl[0][0]),addon_icon,host) save_to_db(xml, nurl) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def get_pcoshow(url): url = url.replace('pcoshow/', '') # Strip our show tag off. url = urlparse.urljoin(pcobase_link, url) url = url + '?showAllEpisodes=true' xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content prog_id = re.compile('progID: (.+?),',re.DOTALL).findall(html)[0] url = pcoepisodes_link % (prog_id) html = requests.get(url).content # https://www.podcastone.com/pg/jsp/program/pasteps_cms.jsp?size=1000&amountToDisplay=1000&page=1&infiniteScroll=true&progID=1181&showTwitter=false&pmProtect=false&displayPremiumEpisodes=false&startAt=0 past_episodes = dom_parser.parseDOM(html, 'div', attrs={'class':'flex no-wrap align-center'}) for episode in past_episodes: try: ep_link, ep_title = re.compile('<h3 class="dateTime"><a href="(.+?)" style="color:inherit;">(.+?)</a>',re.DOTALL).findall(episode)[0] ep_page = urlparse.urljoin(pcobase_link, ep_link) ep_icon = re.compile('img src="(.+?)"',re.DOTALL).findall(episode)[0] xml += "<item>"\ " <title>%s</title>"\ " <podcastone>pcoepisode/%s</podcastone>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</item>" % (ep_title,ep_page,ep_icon,ep_title) except: continue except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def FullMatch_MLB_Replays(url): url = url.replace('mlb_replay/', '') page_id = url url = base_full_match % ( (json_cat_url % (mlb_info['per_page'], mlb_info['category'], page_id))) try: xml = "" response = requests.get(url, headers).json() try: if 'invalid' in response['code']: return except: pass for post in response: title = clean_titles(post['title']['rendered']) content = post['content']['rendered'] description = decodeEntities( re.compile('<h2>(.+?)</h2>').findall(content)[0]) try: icon_js = requests.get( post['_links']['wp:featuredmedia'][0]['href'].replace( '\\', '')) icon_js = json.loads(icon_js.text) icon = str(icon_js['guid']['rendered']) except: icon = addon_icon sources = dom_parser.parseDOM(str(content), 'iframe', ret='src') if len(sources) > 0: xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <link>" % (title,description) for source in sources: if not 'http' in source: source = 'http:%s' % source host = urlparse.urlparse(source).netloc.capitalize() xml += " <sublink>%s(%s)</sublink>" % (source, host) xml += " </link>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (icon) except: pass try: xml += "<dir>"\ " <title>Next Page >></title>"\ " <fullmatch>mlb_replay/%s</fullmatch>"\ "</dir>" % (str(int(page_id)+1)) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def pornstars_eporner(url): url = urlparse.urljoin('https://www.eporner.com/', url) xml = fetch_from_db(url) if not xml: xml = "" try: headers = {'User_Agent': User_Agent} html = requests.get(url, headers=headers).content profile_divs = dom_parser.parseDOM(html, 'div', attrs={'class': 'mbprofile'}) for profile in profile_divs: thumbnail = re.compile('src="(.+?)"', re.DOTALL).findall(str(profile))[0] profile_url, title = re.compile('href="(.+?)"+\stitle="(.+?)"', re.DOTALL).findall( str(profile))[0] #profile_url = profile_url.replace('/', '', 1) xml += "<dir>"\ " <title>%s</title>"\ " <thumbnail>%s</thumbnail>"\ " <eporner>%s</eporner>"\ " <summary>%s</summary>"\ "</dir>" % (title,thumbnail,profile_url, title) try: next_page = dom_parser.parseDOM(html, 'a', attrs={'title': 'Next page'}, ret='href')[0] next_page = next_page.replace('/', '', 1) xml += "<dir>"\ " <title>Next Page</title>"\ " <thumbnail>%s</thumbnail>"\ " <eporner>%s</eporner>"\ "</dir>" % (next_icon,next_page) except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_wcmainstream(subid): xml = "" subid = subid.replace('main/', '', 1) # Strip our category tag off. subid = subid.split('/') pins = "" try: html = requests.get('https://www.watchcartoononline.com').content thedivs = dom_parser.parseDOM(html, 'div', attrs={'class': subid[0]})[int(subid[1])] list_items = dom_parser.parseDOM(thedivs, 'li') for content in list_items: try: if subid[0] == 'sidebar-titles': show_url, title = re.compile( '<a href="(.+?)".+?>(.+?)</a>', re.DOTALL).findall(content)[0] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,addon_icon,title) elif subid[0] == 'recent-release-main': show_url, title = re.compile( '<a href="(.+?)".+?>(.+?)</a>', re.DOTALL).findall(content)[1] title = refreshtitle(title).replace('Episode ', 'EP:') title = remove_non_ascii(title) show_icon = re.compile('src="(.+?)"', re.DOTALL).findall(content)[0] xml += "<dir>"\ " <title>%s</title>"\ " <wctoon>wcepisode/%s</wctoon>"\ " <thumbnail>%s</thumbnail>"\ " <summary>%s</summary>"\ "</dir>" % (title,show_url,show_icon,title) else: continue except: continue except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type(), pins)
def pornstars_cnw(url): xml = "" try: keyboard = xbmc.Keyboard('', 'Search for') keyboard.doModal() if keyboard.isConfirmed() != None and keyboard.isConfirmed() != "": search = keyboard.getText() else: return if search == None or search == "": xml += "<item>"\ " <title>Search Cancelled</title>"\ " <heading></heading>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (addon_icon) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type()) return total = 0 try: search_url = 'http://www.celebsnudeworld.com/search/pornstar/?s=%s' % search.replace( ' ', '+') html = requests.get(search_url).content results = dom_parser.parseDOM(html, 'div', attrs={'class': 'model'}) if len(results) == 0: dialog = xbmcgui.Dialog() dialog.ok('Search Results', 'Search Results are empty') return for star in results: thumbnail = urlparse.urljoin( 'http://www.celebsnudeworld.com/', re.compile('src="(.+?)"', re.DOTALL).findall(str(star))[0]) vid_page_url, title = re.compile('href="(.+?)"\stitle="(.+?)"', re.DOTALL).findall( str(star))[0] xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <cnw>%s</cnw>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,vid_page_url,thumbnail) total += 1 except: pass except: pass if total > 0: jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def fxxx_play(url): try: progress_dialog = xbmcgui.DialogProgress() progress_dialog.create( "Finding Streams", "Get your tissues ready while we find the movie....") progress = 0 headers = {'User_Agent': User_Agent} vid_html = requests.get(url, headers=headers).content the_item = dom_parser.parseDOM(vid_html, 'div', attrs={'class': 'entry-content'})[0] progress += 10 progress_dialog.update(progress) streams = re.compile('href="(.+?)"', re.DOTALL).findall(str(the_item)) try: title = re.compile('<em>(.+?)</em>', re.DOTALL).findall(str(the_item))[0] except: title = re.compile('property="og:title" content="(.+?)"', re.DOTALL).findall(str(vid_html))[0] progress += 10 progress_dialog.update(progress) names = [] sources = [] for stream in streams: progress += 5 progress_dialog.update(progress) if 'securely.link' in stream: response = requests.get(stream) stream = response.url elif any(x in stream for x in ignore_items): continue if 'openload' in stream: names.append('Openload') sources.append(stream) elif 'streamango' in stream: names.append('Streamango') sources.append(stream) progress_dialog.close() selected = xbmcgui.Dialog().select( '[COLORlime]4QED XXX[/COLOR] - Get the tissues ready!', names) if selected == -1: return item = xbmcgui.ListItem(label=title, path=sources[selected], iconImage=addon_icon, thumbnailImage=addon_icon) item.setInfo(type="Video", infoLabels={"Title": title}) import resolveurl koding.Play_Video(sources[selected], showbusy=False, ignore_dp=True, item=item, resolver=resolveurl) except: return
def FullMatch_WWE_Replays(url): url = url.replace('wwe_replay/', '') page_id = url url = base_full_match % ((json_cat_url % (wwe_info['per_page'], wwe_info['category'], page_id))) try: xml = "" response = requests.get(url,headers).json() try: if 'invalid' in response['code']: return except: pass for post in response: title = clean_titles(post['title']['rendered']) if not 'wwe' in title.lower(): continue content = post['content']['rendered'] description = decodeEntities(re.compile('<h2>(.+?)</h2>').findall(content)[0]) try: icon_js = requests.get(post['_links']['wp:featuredmedia'][0]['href'].replace('\\', '')) icon_js = json.loads(icon_js.text) icon = str(icon_js['guid']['rendered']) except: icon = addon_icon sources = dom_parser.parseDOM(str(content), 'iframe', ret='src') if len(sources) > 0: xml += "<item>"\ " <title>%s</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <link>" % (title,description) for source in sources: if not 'http' in source: source = 'http:%s' % source host = urlparse.urlparse(source).netloc.capitalize() xml += " <sublink>%s(%s)</sublink>" % (source,host) xml += " </link>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (icon) except: pass try: xml += "<dir>"\ " <title>Next Page >></title>"\ " <fullmatch>wwe_replay/%s</fullmatch>"\ "</dir>" % (str(int(page_id)+1)) except: pass jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())
def get_pcoepisode(url): xml = "" url = url.replace('pcoepisode/', '') # Strip our episode tag off. try: html = requests.get(url).content episode_item = dom_parser.parseDOM(html, 'div', attrs={'class': 'media-player'})[0] episode_item2 = dom_parser.parseDOM(html, 'div', attrs={'class': 'letestepi'})[0] ep_icon = dom_parser.parseDOM(episode_item2, 'img', attrs={'class': 'img-responsive'}, ret='src')[0] ep_title = dom_parser.parseDOM(html, 'title')[0].replace('PodcastOne: ','') play_url = re.compile('href="(.+?)"',re.DOTALL).findall(episode_item)[0].replace("\n","").replace('/downloadsecurity?url=', '') url = pcoplay_link % play_url item = xbmcgui.ListItem(label=ep_title, path=url, iconImage=ep_icon, thumbnailImage=ep_icon) item.setInfo( type="Video", infoLabels={ "Title": ep_title } ) import resolveurl koding.Play_Video(url,showbusy=False,ignore_dp=True,item=item,resolver=resolveurl) except: pass
def get_B98Main_Processor(url): url = url.replace('serieslist/', '') url = urlparse.urljoin(base_main_link, url) xml = fetch_from_db(url) if not xml: xml = "" try: html = requests.get(url).content item_list = dom_parser.parseDOM(html, 'div', attrs={'class': 'item col-lg-3 col-md-3 col-sm-12 '}) for content in item_list: link = re.compile('href="(.+?)"',re.DOTALL).findall(content)[0] icon, title = re.compile('img src="(.+?) alt="(.+?)"',re.DOTALL).findall(content)[0] try: link = link.replace(base_main_link,'') title = replaceHTMLCodes(title) if 'videos_categories' in link: if 'Darkwing Duck' not in title: # Why Dandy? Why? xml += "<dir>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <B98>serieslist/%s</B98>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (title,title,link,icon) else: xml += "<item>"\ " <title>[COLOR red]%s[/COLOR]</title>"\ " <meta>"\ " <summary>%s</summary>"\ " </meta>"\ " <B98>playtoon/%s</B98>"\ " <thumbnail>%s</thumbnail>"\ "</item>" % (title,title,link,icon) except: continue try: navi_link = re.compile('a class="next page-numbers" href="(.+?)"',re.DOTALL).findall(html)[0] xml += "<dir>"\ " <title>[COLOR red]Next Page >>[/COLOR]</title>"\ " <meta>"\ " <summary>Click here to see the next page of awesome content!</summary>"\ " </meta>"\ " <B98>serieslist/%s</B98>"\ " <thumbnail>%s</thumbnail>"\ "</dir>" % (navi_link,next_icon) except: pass except: pass save_to_db(xml, url) jenlist = JenList(xml) display_list(jenlist.get_list(), jenlist.get_content_type())