def WXFList(url, page=1, onelist=None): if onelist: url = url.replace('/page/1/','/page/'+str(page)+'/') sort = getWXFSortMethod() if re.search('\?', url, re.DOTALL | re.IGNORECASE): url = url + '&filtre=' + sort + '&display=extract' else: url = url + '?filtre=' + sort + '&display=extract' try: listhtml = utils.getHtml(url, '') except Exception as e: return None # match = re.compile('src="([^"]+)" class="attachment-thumb_site.*?<a href="([^"]+)" title="([^"]+)".*?<p>([^<]+)</p>', re.DOTALL | re.IGNORECASE).findall(listhtml) match = re.compile('<article id=.*?<a href="([^"]+)" title="([^"]+)".*?<img data-src="([^"]+)" alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) # Current as of 19.02.23 # for img, videopage, name, desc in match: for videopage, name, img, desc in match: # Current as of 19.02.23 name = utils.cleantext(name) desc = utils.cleantext(desc) utils.addDownLink(name, videopage, 13, img, desc) if not onelist: if re.search('<link rel="next"', listhtml, re.DOTALL | re.IGNORECASE): npage = page + 1 url = url.replace('/page/' + str(page) + '/', '/page/' + str(npage) + '/') utils.addDir('Next Page ('+str(npage)+')', url, 11, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def v7_cat(url): listhtml = utils.getHtml(url, 'http://www.vidz7.com/') match = re.compile('li><a href="([^"]+)">(.*?)</a><span>([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, name, nr in match: name = utils.cleantext(name) + ' [COLOR orange]' + nr.strip() + '[/COLOR]' utils.addDir(name, catpage, 641, '', 1) xbmcplugin.endOfDirectory(utils.addon_handle)
def v7_list(url, page=None, search=None): orig_url = str(url) if page: page_end = 'page/' + str(page) + '/' if url.endswith('/') else '/page/' + str(page) + '/' url += page_end else: page = 1 sort = '?orderby=date' if url.endswith('/') else '/?orderby=date' url += sort url = url + search if search else url try: listhtml = utils.getHtml(url) except Exception as e: return None match = re.compile('''class='thumb-wrapp'.*?href='([^']+)'.*?"([^"]+)".*?class='vl'(.*?)class="duration">(.*?)</div>.*?class='hp'[^>]+>([^<]+)<''', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, hd, duration, name in match: hd = ' [COLOR orange]HD[/COLOR] ' if 'HD' in hd else ' ' name = utils.cleantext(name) + hd + duration.strip() utils.addDownLink(name, videopage, 642, img, '') pages_html = re.compile('<div class="buttons">(.*?)</div', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] pages = re.compile('<a[^>]+>(.*?)</a', re.DOTALL | re.IGNORECASE).findall(pages_html) pages = [int(p.replace(' ', '').replace('...', '').strip()) for p in pages] max_page = max(pages) if page < max_page: utils.addDir('Next Page (' + str(page + 1) + ')' , orig_url, 641, '', page + 1, search) xbmcplugin.endOfDirectory(utils.addon_handle)
def pl_cat(url): listhtml = utils.getHtml(url, 'https://porns.land/') match = re.compile('<div class="category".*?href="([^"]+)".*?data-original="([^"]+)".*?alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, img, name in match: name = utils.cleantext(name) utils.addDir(name, catpage, 621, img, 1) xbmcplugin.endOfDirectory(utils.addon_handle)
def Cat(url): listhtml = utils.getHtml(url, '') match = re.compile('<a class="item" href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?videos">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, name, img, videos in match: name = utils.cleantext(name) + " [COLOR deeppink]" + videos + "[/COLOR]" utils.addDir(name, catpage, 361, img, '') xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url): try: listhtml = utils.getHtml(url, '') except: return None match = re.compile('thumb-main-titre"><a href="..([^"]+)".*?title="([^"]+)".*?src="([^"]+)".*?<div class="thumb-info">(.*?)time">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for videourl, name, img, hd, duration in match: name = utils.cleantext(name) if hd.find('hd') > 0: if hd.find('full') > 0: hd = " [COLOR yellow]FULLHD[/COLOR] " else: hd = " [COLOR orange]HD[/COLOR] " else: hd = " " videopage = "http://www.absoluporn.com" + videourl videopage = videopage.replace(" ","%20") name = name + hd + "[COLOR deeppink]" + duration + "[/COLOR]" utils.addDownLink(name, videopage, 302, img, '') try: nextp=re.compile(r'<span class="text16">\d+</span> <a href="..([^"]+)"').findall(listhtml)[0] nextp = nextp.replace(" ","%20") utils.addDir('Next Page', 'http://www.absoluporn.com' + nextp, 301,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def datoporn_cat(url): listhtml = utils.getHtml(url) match = re.compile('''class="vid_block".*?href="([^"]+)".*?url[(]([^)]+)[)].*?<span>([^<]+)</span>.*?<b>([^<]+)</b''', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, img, count, name in sorted(match, key=lambda x: x[3].strip().lower()): name = utils.cleantext(name.strip()) + " [COLOR deeppink]" + count.strip() + "[/COLOR]" utils.addDir(name, catpage, 671, img, 1) xbmcplugin.endOfDirectory(utils.addon_handle)
def Cat(url): listhtml = utils.getHtml(url, "") match = re.compile('<li><a href="([^"]+)" rel="tag">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, name in match: name = utils.cleantext(name) utils.addDir(name, catpage, 371, "", "") xbmcplugin.endOfDirectory(utils.addon_handle)
def pornvibe_cat(url): listhtml = utils.getHtml(url) match = re.compile('''<img src="([^"]+)" alt="([^"]+)">.+?href="([^"]+)".*?<p>([^&]+)&''', re.DOTALL | re.IGNORECASE).findall(listhtml) for img, name, catpage, count in sorted(match, key=lambda x: x[1].strip().lower()): name = utils.cleantext(name.strip()) + " [COLOR deeppink]" + count.strip() + " videos[/COLOR]" utils.addDir(name, catpage, 681, img, 1) xbmcplugin.endOfDirectory(utils.addon_handle)
def PTList(url, page=1, onelist=None): if onelist: url = url.replace('page=1','page='+str(page)) try: listhtml = utils.getHtml(url, '') except: utils.notify('Oh oh','It looks like this website is down.') return None match = re.compile(r'<div class="(?:visible-xs|thumb-overlay)+">\s+<img src=.*?data-original="([^"]+)" title="([^"]+)"[^>]+>(.*?)duration">[^\d]+([^\t\n\r]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for img, name, hd, duration in match: name = utils.cleantext(name) if hd.find('HD') > 0: hd = " [COLOR orange]HD[/COLOR] " else: hd = " " urlid = re.search(r"(\d{2,})", img, re.DOTALL | re.IGNORECASE).group() videopage = "http://www.porntrex.com/media/nuevo/config.php?key=" + urlid + "-1-1" name = name + hd + "[COLOR deeppink]" + duration + "[/COLOR]" utils.addDownLink(name, videopage, 52, img, '') if not onelist: if re.search('class="prevnext">Next', listhtml, re.DOTALL | re.IGNORECASE): npage = page + 1 url = url.replace('page='+str(page),'page='+str(npage)) utils.addDir('Next Page ('+str(npage)+')', url, 51, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def Categories(url): listhtml = utils.getHtml(url, '') match = re.compile(r'<li>\s+<a href="([^"]+)"[^<]+<[^<]+<img.*?src="([^"]+)".*?title">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, img, name in match: name = utils.cleantext(name) catpage = catpage + '?sortby=post_date' utils.addDir(name, catpage, 341, img, '') xbmcplugin.endOfDirectory(utils.addon_handle)
def Cat(url): listhtml = utils.getHtml3(url) match = re.compile('<a class="list-item__link" href="([^"]+)" title="([^"]+)".*?class="list-item__info">([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, name, videos in match: videos=videos.replace(' ','') name = utils.cleantext(name) + " [COLOR deeppink]" + videos + "[/COLOR]" utils.addDir(name, catpage, 361) xbmcplugin.endOfDirectory(utils.addon_handle)
def Cat(url): listhtml = utils.getHtml(url, '') match0 = re.compile('<h2>Categories(.+?)<tr id="myRow">', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] match = re.compile('<a href="(.+?)"\s+title=".+?">(.+?)<', re.DOTALL | re.IGNORECASE).findall(match0) for catpage, name in match: name = utils.cleantext(name) utils.addDir(name, catpage, 371, '', '') xbmcplugin.endOfDirectory(utils.addon_handle)
def Categories(url): listhtml = utils.getHtml(url, "") match = re.compile( '<a href="(.+?)" title=".+?">\n.+?<div class="thumb">\n.+?<img class="thumb" src="(.+?)" alt="(.+?)"/>' ).findall(listhtml) for catpage, img, name in match: name = utils.cleantext(name) utils.addDir(name, catpage, 341, img, "") xbmcplugin.endOfDirectory(utils.addon_handle)
def ChannelList(url): listhtml = utils.getHtml(url, '') match = re.compile('<a href="([^"]+)" class="thumb" data-rt=".+?">.+?<img width="220" height="165" src="([^"]+)" alt="([^"]+)"', re.DOTALL).findall(listhtml) for videopage, img, name in match: name = utils.cleantext(name) utils.addDownLink(name, 'http://www.hclips.com' + videopage, 382, img, '') try: nextp=re.compile('<li class="next">.+?<a href="([^"]+)".*?>Next</a>', re.DOTALL | re.IGNORECASE).findall(listhtml) utils.addDir('Next Page', 'http://www.hclips.com' + nextp[0], 386,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def Channels(url): listhtml = utils.getHtml(url, '') match = re.compile('<a href="([^"]+)" class="video_thumb" title="([^"]+)">.+?<img height="165" width="285" src="([^"]+)"', re.DOTALL).findall(listhtml) for chanpage, name, img in match: name = utils.cleantext(name) utils.addDir(name, "http://hclips.com" + chanpage, 386, "http://hclips.com" + img, '') try: nextp=re.compile(r'<li class="next">\s+<a href="([^"]+)".*?>Next</a>', re.DOTALL | re.IGNORECASE).findall(listhtml) utils.addDir('Next Page', 'http://www.hclips.com' + nextp[0], 385,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def ListSearch(url): html = utils.getHtml(url, '').replace('\n','') match = re.compile('bookmark">([^<]+)</a></h1>.*?<img src="([^"]+)".*?href="([^"]+)"').findall(html) for name, img, videopage in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 422, img, '') try: nextp = re.compile('<link rel="next" href="(.+?)" />', re.DOTALL | re.IGNORECASE).findall(html) utils.addDir('Next Page', nextp[0], 425,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def WXFCat(url): cathtml = utils.getHtml(url, '') # # match = re.compile('<img width=.+?src="(.+?)".+?a href="(.+?)"\s+title="(.+?)".+?span class="nb_cat border.+?>(.+?)<', re.DOTALL | re.IGNORECASE).findall(cathtml) match = re.compile('<article id=.*?<a href="([^"]+)" title="([^"]+)".*?data-lazy-src="([^"]+)".*? alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(cathtml) # Current as of 19.02.23 for catpage, name, img, videos in match: # Current as of 19.02.23 # for img, catpage, name, videos in match: catpage = catpage + 'page/1/' name = utils.cleantext(name) name = name # + ' [COLOR deeppink]' + videos + '[/COLOR]' utils.addDir(name, catpage, 11, img, 1) xbmcplugin.endOfDirectory(utils.addon_handle)
def XTList(url, page=1): sort = getXTSortMethod() if re.search('\?', url, re.DOTALL | re.IGNORECASE): url = url + '&filtre=' + sort + '&display=extract' else: url = url + '?filtre=' + sort + '&display=extract' try: listhtml = utils.getHtml(url, '') except: utils.notify('Oh oh','It looks like this website is down.') return None match = re.compile('src="([^"]+?)" class="attachment.*?<a href="([^"]+)" title="([^"]+)".*?<div class="right">.<p>([^<]+)</p>', re.DOTALL | re.IGNORECASE).findall(listhtml) for img, videopage, name, desc in match: name = utils.cleantext(name) desc = utils.cleantext(desc) utils.addDownLink(name, videopage, 23, img, desc) if re.search('<link rel="next"', listhtml, re.DOTALL | re.IGNORECASE): npage = page + 1 url = url.replace('/page/'+str(page)+'/','/page/'+str(npage)+'/') utils.addDir('Next Page ('+str(npage)+')', url, 21, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def pl_channels(url): listhtml = utils.getHtml(url, 'https://porns.land/') match = re.compile('<div class="serie".*?href="([^"]+)".*?data-original="([^"]+)".*?<h2>([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, img, name in sorted(match, key=lambda x: x[2]): name = utils.cleantext(name) utils.addDir(name, catpage, 621, img, 1) try: next_page = re.compile('<a href="([^"]+)" data-ci-pagination-page="([^"]+)" rel="next"', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] utils.addDir('Next Page (' + next_page[1] + ')' , next_page[0], 624, '') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def Models(url): listhtml = utils.getHtml(url, '') match = re.compile('<a href="(.+?)" title="(.+?)">\n.+?<div class="thumb">\n.+?<img src="(.+?)"').findall(listhtml) for catpage, name, img in match: name = utils.cleantext(name) utils.addDir(name, catpage, 341, img, '') try: nextp=re.compile('<a href="(.+?)" title="Next Page" data-page-num.+?>Next page').findall(listhtml) print "next: ", 'http://www.hdzog.com' + nextp[0] utils.addDir('Next Page', 'http://www.hdzog.com' + nextp[0], 346,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def Channels(url): listhtml = utils.getHtml(url, '') match = re.compile('<A href="([^"]+)"[^<]+<[^<]+<img.*?src="([^"]+)" alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) for catpage, img, name in match: name = utils.cleantext(name) utils.addDir(name, catpage, 341, img, '') try: nextp=re.compile('href="(/channels/[^"]+)" title="Next', re.DOTALL | re.IGNORECASE).findall(listhtml) print "next: ", 'http://www.hdzog.com' + nextp[0] utils.addDir('Next Page', 'http://www.hdzog.com' + nextp[0], 345,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def TPNSearchList(url): listhtml = utils.getHtml(url, '') match = re.compile('class="item">.*?<a href="([^"]+)".*?src="([^"]+)" alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 122, img, '') try: nextp=re.compile('link rel="next" href="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) next = nextp[0] utils.addDir('Next Page', next, 127,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def animeidhentai_list(url): listhtml = utils.getHtml(url, site.url) match = re.compile( r'<article.+?data-src="(.*?)".+?link-co">([^<]+).+?mgr(.+?)description\s*dn">(?:<p>)?([^<]+).+?href="([^"]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for img, name, hd, plot, video in match: if '>hd<' in hd.lower(): name = name + " [COLOR orange]HD[/COLOR]" elif '1080p' in hd.lower(): name = name + " [COLOR orange]FHD[/COLOR]" if 'uncensored' in name.lower(): name = name.replace( 'Uncensored', '') + " [COLOR hotpink][I]Uncensored[/I][/COLOR]" site.add_download_link(utils.cleantext(name), video, 'animeidhentai_play', img, utils.cleantext(plot)) next_page = re.compile('rel="next" href="([^"]+)"', re.DOTALL | re.IGNORECASE).search(listhtml) if next_page: site.add_dir('Next Page', next_page.group(1), 'animeidhentai_list', site.img_next) utils.eod()
def XTList(url, page=1): original_url = str(url) sort = getXTSortMethod() if re.search('\?', url, re.DOTALL | re.IGNORECASE): url = url + '&filtre=' + sort + '&display=extract' else: url = url + '?filtre=' + sort + '&display=extract' try: listhtml = utils.getHtml(url, '') except: return None match = re.compile('src="([^"]+)" class="attachment-thumb_site.*?<a href="([^"]+)"[^<]+<span>([^<]+).*?">.*?<p>([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for img, videopage, name, desc in match: name = utils.cleantext(name) desc = utils.cleanhtml(desc) desc = utils.cleantext(desc) utils.addDownLink(name, videopage, 23, img, desc) if re.search('<link rel="next"', listhtml, re.DOTALL | re.IGNORECASE): npage = page + 1 next_url = original_url.replace('/page/' + str(page) + '/' , '/page/' + str(npage) + '/') utils.addDir('Next Page ('+str(npage)+')', next_url, 21, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def WXFCat(url): cathtml = utils.getHtml(url, '') # # match = re.compile('<img width=.+?src="(.+?)".+?a href="(.+?)"\s+title="(.+?)".+?span class="nb_cat border.+?>(.+?)<', re.DOTALL | re.IGNORECASE).findall(cathtml) match = re.compile( '<article id=.*?<a href="([^"]+)" title="([^"]+)".*?data-lazy-src="([^"]+)".*? alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(cathtml) # Current as of 19.02.23 for catpage, name, img, videos in match: # Current as of 19.02.23 # for img, catpage, name, videos in match: catpage = catpage + 'page/1/' name = utils.cleantext(name) name = name # + ' [COLOR deeppink]' + videos + '[/COLOR]' utils.addDir(name, catpage, 11, img, 1) xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url, page=1): if addon.getSetting("chaturbate") == "true": clean_database(False) listhtml = utils._getHtml(url) match = re.compile(r'room_list_room".+?href="([^"]+).+?src="([^"]+).+?<div[^>]+>([^<]+)</div>.+?href[^>]+>([^<]+)<.+?age">([^<]+).+?title="([^"]+).+?location.+?>([^<]+).+?cams">([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, status, name, age, subject, location, duration in match: name = utils.cleantext(name.strip()) age = utils.cleantext(age.strip()) subject = utils.cleantext(subject.strip()) + "[CR][COLOR deeppink]Location: [/COLOR]" + utils.cleantext(location.strip()) + "[CR]" + utils.cleantext(duration.strip()) status = utils.cleantext(status.replace("[CR]", "").strip()) name = name + " [COLOR deeppink][" + age + "][/COLOR] " + status videopage = bu[:-1] + videopage site.add_download_link(name, videopage, 'Playvid', img, subject, noDownload=True) nextp = re.compile(r'<a\s*href="([^"]+)"\s*class="next', re.DOTALL | re.IGNORECASE).search(listhtml) if nextp: page = page + 1 if page else 2 next = bu[:-1] + nextp.group(1) site.add_dir('Next Page (' + str(page) + ')', next, 'List', site.img_next, page) utils.eod()
def v7_tag(url): taghtml = utils.getHtml(url, site.url) match = re.compile( r'<li\s*class="\s*(?:popclick)?"><a\s*href="([^"]+)">([^<]+)</a><span\s*class="count">\(([^)]+)' ).findall(taghtml) for tagpage, name, nr in match: nr = int(nr.replace(' ', '')) if nr > 0: name = '{0} [COLOR orange]{1} Videos[/COLOR]'.format( utils.cleantext(name), nr) site.add_dir(name, tagpage + '?orderby=date', 'v7_list', '') utils.eod()
def List(url): try: listhtml = utils.getHtml(url) except: utils.kodilog('site error') return None match = re.compile('class="item-thumbnail".+?href="([^"]+)">.+?srcset="(\S+).+?title="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) if match: for videopage, img, name in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 312, img, '') else: # search match = re.compile('class="item-thumbnail".+?href="([^"]+)".+?title="([^"]+)".+?srcset="(\S+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, name, img in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 312, img, '') try: next_page = re.compile('href="([^"]+)">»<').findall(listhtml)[0] page_nr = re.findall('\d+', next_page)[-1] utils.addDir('Next Page (' + page_nr + ')', next_page, 311, '') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def animeidhentai_list(url): listhtml = utils.getHtml(url) match = re.compile(r'<article.+?title">([^<]+).+?meta">(.+?)</div.+?src="([^"]+).+?href="([^"]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for name, other, img, video in match: if 'uncensored' in other.lower() or 'uncensored' in name.lower(): name = name.replace('Uncensored', '') + " [COLOR yellow][I]Uncensored[/I][/COLOR]" site.add_download_link(utils.cleantext(name), video, 'animeidhentai_play', img, name) next_page = re.compile(r'href="([^"\s]+)"\s*class="next', re.DOTALL | re.IGNORECASE).search(listhtml) if next_page: site.add_dir('Next Page', next_page.group(1), 'animeidhentai_list', site.img_next) utils.eod()
def List(url): listhtml = utils.getHtml(url, '') match = re.compile(r'boxtitle">.+?href="([^"]+).+?title="([^"]+).+?src="([^"]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, name, img in match: name = utils.cleantext(name) site.add_download_link(name, videopage, 'Playvid', img, name) nextp = re.compile(r'''navigation'>.+?rel="next"\s*href="([^"]+)''', re.DOTALL | re.IGNORECASE).search(listhtml) if nextp: nextp = nextp.group(1) site.add_dir('Next Page... ({0})'.format(nextp.split('/')[-2]), nextp, 'List', site.img_next) utils.eod()
def XTList(url, page=1): sort = getXTSortMethod() if re.search('\?', url, re.DOTALL | re.IGNORECASE): url = url + '&filtre=' + sort + '&display=extract' else: url = url + '?filtre=' + sort + '&display=extract' try: listhtml = utils.getHtml(url, '') except: utils.notify('Oh oh','It looks like this website is down.') return None match = re.compile(r'src="([^"]+?)" class="attachment.*?<a href="([^"]+)" title="([^"]+)".*?<div class="right">(.*?)</div>\s+</li>', re.DOTALL | re.IGNORECASE).findall(listhtml) for img, videopage, name, desc in match: name = utils.cleantext(name) desc = utils.cleanhtml(desc) desc = utils.cleantext(desc) utils.addDownLink(name, videopage, 23, img, desc) if re.search('<link rel="next"', listhtml, re.DOTALL | re.IGNORECASE): npage = page + 1 url = url.replace('/page/'+str(page)+'/','/page/'+str(npage)+'/') utils.addDir('Next Page ('+str(npage)+')', url, 21, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url): listhtml = utils.getHtml2(url) match = re.compile("model_detail=(.*?)&.*?img src=(.*?)jpg.*?</div>", re.DOTALL | re.IGNORECASE).findall(listhtml) for name, img in match: name = utils.cleantext(name) img = img + 'jpg' url = img[32:-17] if len(url) == 7: url = '10' + url else: url = '1' + url utils.addDownLink(name, url, 272, img, '', noDownload=True) xbmcplugin.endOfDirectory(utils.addon_handle)
def Categories(url): listjson = utils.getHtml(url, site.url) jdata = json.loads(listjson) cats = jdata["data"] for cat in cats: count = cat["videos"] name = cat["title"] if utils.PY3 else cat["title"].encode('utf8') name = utils.cleantext(name) + '[COLOR lightpink] ({})[/COLOR]'.format( count) caturl = 'https://api.vintagetube.xxx/api/v1/categories/{}?sort=latest&c=100&offset=0'.format( cat["slug"]) img = cat["thumb"] site.add_dir(name, caturl, 'List', img) utils.eod()
def List(url): listhtml = utils.getHtml(url, '') match = re.compile(r'class="item.+?href="([^"]+).+?original="([^"]+)(.+?)le">\s*([^<]+).+?on">([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, hd, name, duration in match: hd = 'HD' if 'class="is-hd"' in hd else '' name = utils.cleantext(name.strip()) site.add_download_link(name, videopage, 'Playvid', img, name, duration=duration, quality=hd) npage = re.search(r'class="next.+?href="([^"]+)', listhtml, re.DOTALL | re.IGNORECASE) if npage: purl = site.url + npage.group(1) site.add_dir('Next Page ({0})'.format(purl.split('/')[-2]), purl, 'List', site.img_next) utils.eod()
def List(url, page=0): try: postRequest = {'page' : str(page)} response = utils.postHtml(url, form_data=postRequest,headers={},compression=False) except: return None match = re.compile(r'<div class="video-item">[^"]+"/watch/([^"]+)"[^/]+/[^/]+/[^/]+/([^"]+)" alt="([^"]+)', re.DOTALL | re.IGNORECASE).findall(response) for video, img, name in match: name = utils.cleantext(name) img = "https:/" + img utils.addDownLink(name, video, 612, img, '') npage = page + 1 utils.addDir('Next Page (' + str(npage) + ')', url, 611, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url, page=0): try: postRequest = {'page': str(page)} response = utils.postHtml(url, form_data=postRequest, headers={}, compression=False) except: return None match = re.compile(r'class="video-item">.+?href="([^"]+).+?src="([^"]+)"\s*alt="([^"]+).+?time">([^<]+)', re.DOTALL | re.IGNORECASE).findall(response) for video, img, name, length in match: video = site.url + video name = utils.cleantext(name) + ' [COLOR hotpink]' + length + '[/COLOR]' site.add_download_link(name, video, 'Playvid', img, '') npage = page + 1 site.add_dir('Next Page (' + str(npage) + ')', url, 'List', site.img_next, npage) utils.eod()
def Cat(url): cathtml = utils.getHtml(url, '') items = re.compile('class="item.+?</a>', re.DOTALL | re.IGNORECASE).findall(cathtml) for item in sorted(items): catpage = re.compile('href="([^"]+)').findall(item)[0] name = re.compile('title">([^<]+)').findall(item)[0] videos = re.compile('videos">([^<]+)').findall(item)[0] name = "{0} [COLOR deeppink][I]({1})[/I][/COLOR]".format( utils.cleantext(name.strip()), videos) img = '' if 'no image' in item else re.compile('src="([^"]+)').findall( item)[0] site.add_dir(name, catpage, 'List', img) utils.eod()
def List(url): html = utils.getHtml(url, '') match = re.compile(r'<div\s*class="entry-content">.*?lazy-src="([^"]+)".*?<a href="([^"]+)"\s*class="more-link">.+?<span\s*class="screen-reader-text">([^"]+)</span>', re.DOTALL | re.IGNORECASE).findall(html) for img, videopage, name in match: if 'ubiqfile' in name.lower(): continue name = utils.cleantext(name) site.add_download_link(name, videopage, 'Playvid', img, name) nextp = re.compile(r'<a\s*class="next.*?href="(.+?)">', re.DOTALL | re.IGNORECASE).search(html) if nextp: site.add_dir('Next Page', nextp.group(1), 'List', site.img_next) utils.eod()
def List(url): try: listhtml = utils.getHtml(url, '') except: return None match = re.compile('<div class="thumb">.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 142, img, '') try: nextp=re.compile('<a class="nextpostslink" rel="next" href="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) utils.addDir('Next Page', nextp[0], 141,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def Categories(url): for pg in range(1, 4): if pg > 1: purl = url + 'page/{0}/'.format(pg) else: purl = url cathtml = utils.getHtml(purl, '') match = re.compile(r'<article.+?href="([^"]+).+?src="([^"]+).+?title">([^<]+)', re.DOTALL).findall(cathtml) for catpage, img, name in match: name = utils.cleantext(name) site.add_dir(name, catpage, 'List', img) xbmcplugin.addSortMethod(utils.addon_handle, xbmcplugin.SORT_METHOD_TITLE) utils.eod()
def List(url): listhtml = utils.getHtml(url, '') match = re.compile('<a href="(.+?)" title="(.+?)">\n.+?<img src="(.+?)".+?style="position').findall(listhtml) for videopage, name, img in match: print "Processing: " + name name = utils.cleantext(name) utils.addDownLink(name, videopage, 242, img, '') try: print "Adding next" nextp=re.compile("<span class='current'>[0-9]+</span><a href='(.+?)'", re.DOTALL | re.IGNORECASE).findall(listhtml) nextp = nextp[0] utils.addDir('Next Page', nextp, 241,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url, page=0): try: postRequest = {'page' : str(page)} response = utils.postHtml(url, form_data=postRequest,headers={},compression=False) except: return None match = re.compile('<div class="video-item">.*?a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?<span class="video-time">([^<]+)', re.DOTALL | re.IGNORECASE).findall(response) for video, img, name, length in match: video = 'https://daftsex.com' + video name = '[COLOR hotpink]' + length + '[/COLOR] ' + utils.cleantext(name) utils.addDownLink(name, video, 612, img, '') npage = page + 1 utils.addDir('Next Page (' + str(npage) + ')', url, 611, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url, page=1): siteurl, url = url.rsplit('/', 1) siteurl += '/' apiurl = siteurl + 'api/json/videos/86400/str/{0}/60/{1}.{2}.{3}.all..{4}.json' surl = siteurl + 'api/videos.php?params=259200/str/relevance/60/search..{0}.all..&s={1}&sort=latest-updates&date=all&type=all&duration=all' if url.startswith('search.'): aurl = surl.format(page, url.split('search.')[-1]) elif '.' in url: c1, c2 = url.split('.') aurl = apiurl.format('latest-updates', c1, c2, page, 'day') else: aurl = apiurl.format(url, '', '', page, 'day') jdata = json.loads(utils.getHtml(aurl, siteurl)) if not jdata.get('videos'): utils.eod() return for item in jdata.get('videos'): name = item.get('title') if utils.PY3 else item.get('title').encode( 'utf-8') duration = item.get('duration') hd = False if "props" in item.keys(): if item["props"]: if "hd" in item["props"].keys(): if item["props"]["hd"] == "1": hd = True if not hd and "categories" in item.keys(): if "HD" in item["categories"].split(','): hd = True hd = 'HD' if hd else '' name = utils.cleantext(name) site.add_download_link(name, siteurl + item.get('video_id'), 'Playvid', item.get('scr'), name, duration=duration, quality=hd) if int(jdata.get('total_count')) - (60 * page) > 0: page += 1 last_page = -(-int(jdata.get('total_count')) // 60) site.add_dir('Next Page ({}/{})'.format(str(page), str(last_page)), siteurl + url, 'List', site.img_next, page) utils.eod()
def List(url): listhtml = utils.getHtml(url, '') match = re.compile(r'class="item.+?href="([^"]+).+?original="([^"]+).+?le">\s*([^<]+).+?on">([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name, duration in match: name = utils.cleantext(name.strip()) name += ' [COLOR deeppink]({0})[/COLOR]'.format(duration) site.add_download_link(name, videopage, 'Playvid', img, '') npage = re.search(r'class="next.+?href="([^"]+)', listhtml, re.DOTALL | re.IGNORECASE) if npage: purl = site.url + npage.group(1) site.add_dir('Next Page ({0})'.format(purl.split('/')[-2]), purl, 'List', site.img_next) utils.eod()
def Playvid(url, name, download=None): vp = utils.VideoPlayer(name, download) vp.progress.update(25, "", "Loading video page", "") videopage = utils.getHtml(url, pdreferer, headers, data='') links = re.compile('<a href="([^"]+)" class="post_download_link clearfix">[^>]+>([^<]+)<',re.DOTALL | re.IGNORECASE).findall(videopage) sources = {} for videolink, resolution in links: sources[utils.cleantext(resolution)] = videolink videourl = utils.selector('Select quality', sources, dont_ask_valid=True, sort_by=lambda x: 1081 if 'UHD' in x else int(x[:-3]), reverse=True) if not videourl: return videourl = utils.getVideoLink(videourl, url) vp.progress.update(75, "", "Loading video page", "") vp.play_from_direct_link(videourl)
def List(url, page=0): try: postRequest = {'page' : str(page)} response = utils.postHtml(url, form_data=postRequest,headers={},compression=False) except: return None match = re.compile('<div class="video-item">.*?a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?<span class="video-time">([^<]+)', re.DOTALL | re.IGNORECASE).findall(response) for video, img, name, length in match: video = 'https://daftsex.com' + video name = utils.cleantext(name) + ' [COLOR hotpink]' + length + '[/COLOR]' utils.addDownLink(name, video, 612, img, '') npage = page + 1 utils.addDir('Next Page (' + str(npage) + ')', url, 611, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def yourporn_list(url, page=None, section=None): popular_mode = section if section else None try: if popular_mode and page: listhtml = utils.postHtml(url, compression=False, form_data={'period': 'week', 'popular_source': 'blogs', 'popular_mode': popular_mode, 'popular_off': page}) page += 6 listhtml += utils.postHtml(url, compression=False, form_data={'period': 'week', 'popular_source': 'blogs', 'popular_mode': popular_mode, 'popular_off': page}) else: listhtml = utils.getHtml(url) except Exception as e: return None if popular_mode and page: content = listhtml else: content = re.compile('''<div id='(?:posts_container|search_container|topPosts_container)'.*?>(.*?)<div id=['"](?:center_control|footer)['"]>''', re.DOTALL | re.IGNORECASE).search(listhtml).group(1) match_big = re.compile('''<div class='post_el'.*?<div class='vid_container'>.*? src='([^']+)'.*?href='([^']+)'.*?title='([^']+)'.*?<span class='duration.*?'>([^<]+)<''', re.DOTALL | re.IGNORECASE).findall(content) for img, video, name, duration in match_big: duration = duration.strip() if duration == '??': continue name = utils.cleantext(name) + " [COLOR deeppink]" + duration + "[/COLOR]" utils.addDownLink(name, make_url(video), 652, make_url(img), '') match_small = re.compile('''<div class='blog_post_small'>.*?<div class='blog_post_small_title'>(.*?)</div>.*?href.*?href='([^']+)'.*? src='([^']+)'[^>]''', re.DOTALL | re.IGNORECASE).findall(content) for name, video, img in match_small: name = utils.cleantext(re.sub("<.*?>", '', name)) utils.addDownLink(name, make_url(video), 652, make_url(img), '') if popular_mode: page = page + 6 if page else 12 utils.addDir('Next Page', 'https://yourporn.sexy/php/popular_append.php', 651, '', page, section=popular_mode) else: try: next_page = re.compile('''<a href='([^']+)' class='tdn'><div class='next''', re.DOTALL | re.IGNORECASE).search(content).group(1) next_page = make_url(next_page) utils.addDir('Next Page' , next_page, 651, '') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url): if utils.addon.getSetting("chaturbate") == "true": clean_database(False) data = utils._getHtml(url, site.url) data = re.compile(r"models':\s*(.*?),\s*'", re.DOTALL | re.IGNORECASE).findall(data)[0] data = re.sub(r'\s\s+', '', data) data = data[:-2] + ']' models = json.loads(data) for model in models: name = model.get('model_seo_name').replace('-', ' ').title() age = model.get('age') subject = utils.cleantext( model.get('tagline') if utils.PY3 else model.get('tagline'). encode('utf8')) if model.get('location'): subject += "[CR][CR][COLOR deeppink]Location: [/COLOR]{0}[CR][CR]".format( model.get('location') if utils.PY3 else model.get('location'). encode('utf8')) if model.get('topic'): subject += utils.cleantext( model.get('topic') if utils.PY3 else model.get('topic'). encode('utf8')) status = model.get('room_status') name = name + " [COLOR deeppink][" + age + "][/COLOR] " + status mid = model.get('model_id') img = 'https://live-screencaps.vscdns.com/{0}-desktop.jpg'.format(mid) videourl = 'https://ws.vs3.com/chat/get-stream-urls.php?model_id={0}&video_host={1}'.format( mid, model.get('video_host')) site.add_download_link(name, videourl, 'Playvid', img, subject, noDownload=True) utils.eod()
def List(url): listhtml = utils.getHtml(url, site.url) r = re.compile(r'<title>.+?(?:"list-albums"|"box\stag)', re.DOTALL | re.IGNORECASE).search(listhtml) if r: listhtml = r.group(0) match = re.compile( r'class="item.+?href="([^"]+).+?nal="([^"]+).+?le">\s*([^<]+).+?on">([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name, duration in match: name = utils.cleantext(name.strip()) site.add_download_link(name, videopage, 'Playvid', img, name, duration=duration) nextp = re.compile(r'class="next"><a\s*href="([^"]+)', re.DOTALL | re.IGNORECASE).search(listhtml) if nextp: nextp = nextp.group(1) if nextp.startswith('#'): block, pars = re.compile( r'class="next">.+?block-id="([^"]+).+?parameters="([^"]+)', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] pno = re.compile(r'from[^\d]+(\d+)', re.IGNORECASE).findall(pars)[0] query = { 'mode': 'async', 'function': 'get_block', 'block_id': block } for par in pars.split(';'): par1, par2 = par.split(':') if '+' in par1: for spar in par1.split('+'): query.update({spar: par2}) else: query.update({par1: urllib_parse.unquote(par2)}) nextp = "{0}?{1}".format( url.split('?')[0], urllib_parse.urlencode(query)) else: nextp = site.url[:-1] + nextp if 'http' not in nextp else nextp pno = nextp.split('/')[-2] site.add_dir('Next Page... ({0})'.format(pno), nextp, 'List', site.img_next) utils.eod()
def List(url): try: listhtml = utils.getHtml(url, '') except: return None match = re.compile( r'<a href="([^"]+)" class="item.+?vthumb=.+?thumb="([^"]+)".+?"duration">([^<]+)</div>.+?class="title">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, duration, name in match: name = utils.cleantext(name) site.add_download_link(name, videopage, 'Playvid', img, name, duration=duration) nextp = re.compile(r'class="next"><a href="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) if nextp: np = re.compile(r':(\d+)">Next', re.DOTALL | re.IGNORECASE).findall(listhtml) if np: np = np[0] else: np = '' lp = re.compile(r':(\d+)">Last', re.DOTALL | re.IGNORECASE).findall(listhtml) if lp: lp = '/' + lp[0] else: lp = '' nextp = nextp[0] if nextp.startswith('/'): nextp = site.url[:-1] + nextp else: match = re.compile( r'class="next">.+?data-block-id="([^"]+)"\s+data-parameters="([^"]+)">Next<', re.DOTALL | re.IGNORECASE).findall(listhtml) if match: dbi, dp = match[0] dp = dp.replace(':', '=').replace(';', '&').replace('+from_albums', '') nextp = '{0}?mode=async&function=get_block&block_id={1}&{2}'.format( url.split('?mode')[0], dbi, dp) site.add_dir('Next Page ({}{})'.format(np, lp), nextp, 'List', site.img_next) utils.eod()
def List(url): try: listhtml = utils.getHtml(url, '') except: utils.notify('Oh oh','It looks like this website is down.') return None match = re.compile(r"<a href='([^']+).*?src='([^']+)' id=\d+ alt='([^']+)'", re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 322, img, '') try: nextp = re.compile('href="([^"]+)">Next', re.DOTALL | re.IGNORECASE).findall(listhtml) utils.addDir('Next Page', nextp[0], 321,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def Actress(url): cathtml = utils.getHtml(url) match = re.compile( r'class="tray-item-actress".+?href="([^"]+)".+?data-src="([^"]+)".+?actress-title">([^<]+)<', re.IGNORECASE | re.DOTALL).findall(cathtml) for caturl, img, name in match: name = utils.cleantext(name) site.add_dir(name, caturl, 'List', img) nextp = re.compile('href="([^"]+)"><[^>]+>Next', re.DOTALL | re.IGNORECASE).search(cathtml) if nextp: np = nextp.group(1) site.add_dir('Next Page (' + np.split('/pg-')[-1] + ')', np, 'Actress', site.img_next) utils.eod()
def PHList(url): listhtml = utils.getHtml(url, site.url) match = re.compile( r'anel-img">\s+<a href="([^"]+)">\s+<img.*?data-src="([^"]+)".*?alt="([^"]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name in match: name = utils.cleantext(name) site.add_download_link(name, videopage, 'PHVideo', img, '') nextp = re.compile('<a href="([^"]+)"[^>]+>Next', re.DOTALL | re.IGNORECASE).search(listhtml) if nextp: site.add_dir('Next Page', nextp.group(1), 'PHList', site.img_next) utils.eod()
def PAQList(url, page=1, onelist=None): if onelist: url = url.replace('page/1/','page/'+str(page)+'/') try: listhtml = utils.getHtml(url, '') except: utils.notify('Oh oh','It looks like this website is down.') return None if 'pornaq' in url: match = re.compile(r'<h2>\s+<a title="([^"]+)" href="([^"]+)".*?src="([^"]+)" class="attachment-primary-post-thumbnail', re.DOTALL | re.IGNORECASE).findall(listhtml) for name, videopage, img in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 62, img, '') elif 'porn00' in url: match = re.compile('<h2> <a title="([^"]+)" href="([^"]+)".*?src="([^"]+)" class="attachment-primary-post-thumbnail', re.DOTALL | re.IGNORECASE).findall(listhtml) for name, videopage, img in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 62, img, '') if not onelist: if re.search("<span class='current'>\d+?</span><span>", listhtml, re.DOTALL | re.IGNORECASE): npage = page + 1 url = url.replace('page/'+str(page)+'/','page/'+str(npage)+'/') utils.addDir('Next Page ('+str(npage)+')', url, 61, '', npage) xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url): try: html = utils.getHtml(url, '') except: utils.notify('Oh oh','It looks like this website is down.') return None match = re.compile('<div class="entry-content">.*?<img src="([^"]+)".*?<a href="([^"]+)" class="more-link">.+?<span class="screen-reader-text">([^"]+)</span>', re.DOTALL | re.IGNORECASE).findall(html) for img, videopage, name in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 422, img, '') try: nextp = re.compile('<a class="next.*?href="(.+?)">', re.DOTALL | re.IGNORECASE).findall(html) utils.addDir('Next Page', nextp[0], 421,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url): try: listhtml = utils.getHtml(url, '') except: return None match = re.compile('<li>\n.+?<a href="(.+?)" title="(.+?)">\n.+?<div class="thumb thumb-paged" data-screen-main="1">\n\n.+?<img src="(.+?)"').findall(listhtml) for videopage, name, img in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 342, img, '') try: nextp=re.compile('<a href="(.+?)" title="Next Page" data-page-num.+?>Next page').findall(listhtml) utils.addDir('Next Page', 'http://www.hdzog.com' + nextp[0], 341,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)
def PHList(url): try: listhtml = utils.getHtml(url, '') except: return None match = re.compile(r'anel-img">\s+<a href="([^"]+)">\s+<img.*?data-src="([^"]+)".*?alt="([^"]+)', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, img, name in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 72, img, '') try: nextp=re.compile('<a href="([^"]+)"[^>]+>Next', re.DOTALL | re.IGNORECASE).findall(listhtml) utils.addDir('Next Page', nextp[0],71,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle)