def __init__(self, params): site = self.__module__ addon = Addon() common = addon.common mode = params["mode"] common.update_favorites_db() if mode == "main": __item_list_ = [ { "site": site, "mode": "list_favorites", "title": addon.language(30884, True), "content": "all", "sub_site": params["sub_site"], "cover_url": addon.image("all.png"), "backdrop_url": addon.art(), "type": 3, }, { "site": site, "mode": "list_favorites", "title": addon.language(30885, True), "content": "movies", "sub_site": params["sub_site"], "cover_url": addon.image("movies.png"), "backdrop_url": addon.art(), "type": 3, }, { "site": site, "mode": "list_favorites", "title": addon.language(30886, True), "content": "tvshows", "sub_site": params["sub_site"], "cover_url": addon.image("tvshows.png"), "backdrop_url": addon.art(), "type": 3, }, { "site": site, "mode": "list_favorites", "title": addon.language(30888, True), "content": "episodes", "sub_site": params["sub_site"], "cover_url": addon.image("scenes.png"), "backdrop_url": addon.art(), "type": 3, }, ] addon.add_items(__item_list_) addon.end_of_directory() elif mode == "add_favorite": params = AddonDict(common.addon_type()).str_update(params["__params_"]) execute = "INSERT INTO " + common.fav_db_table + " (sub_site, content, url, __params_) VALUES (?, ?, ?, ?)" inserted = common.db.execute(execute, (params["sub_site"], params["content"], params["url"], str(params))) if common.to_bool(inserted): if inserted == 1: addon.alert( str( addon.language(30891, True) + " " + params["title"].decode("ascii", "ignore") + " " + addon.language(30893, True) ) ) if inserted == 2: addon.alert(str(params["title"].decode("ascii", "ignore") + " " + addon.language(30890, True))) elif mode == "delete_favorite": params = AddonDict(common.addon_type()).str_update(params["__params_"]) execute = "DELETE FROM " + common.fav_db_table + " WHERE sub_site=? AND content=? AND url=?" deleted = common.db.execute(execute, (params["sub_site"], params["content"], params["url"])) if common.to_bool(deleted): addon.alert( str( addon.language(30892, True) + " " + params["title"].decode("ascii", "ignore") + " " + addon.language(30894, True) ) ) xbmc.executebuiltin("Container.Refresh") elif mode == "list_favorites": if params["content"] == "all": sql_params = (params["sub_site"],) execute = "SELECT * FROM " + common.fav_db_table + " WHERE sub_site=?" else: sql_params = (params["sub_site"], params["content"]) execute = "SELECT * FROM " + common.fav_db_table + " WHERE sub_site=? AND content=?" selected = common.db.fetchall(execute, sql_params) item_list = [] if selected: for this_id, site, content, url, params in selected: params = AddonDict(common.addon_type()).str_update(params) params["context"] = 4 item_list.extend([params]) if item_list: addon.add_items(item_list) addon.end_of_directory() elif mode == "clear_favorites": """ Prompt user for confirmation prior to clearing all favorites / removing favorites table """ if not params["sub_site"]: execute = "DROP TABLE " + common.fav_db_table sql_params = "" else: execute = "DELETE FROM " + common.fav_db_table + " WHERE sub_site=?" sql_params = (params["sub_site"],) clear_favs = xbmcgui.Dialog().yesno( common.addon_name + " - " + addon.language(30895, True), " ", addon.language(30896, True), nolabel=addon.language(30899, True), yeslabel=addon.language(30898, True), ) if common.to_bool(clear_favs): cleared = common.db.execute(execute, sql_params) if common.to_bool(cleared): common.db.execute("VACUUM " + common.fav_db_table) addon.alert(str(addon.language(30897, True))) xbmc.executebuiltin("Container.Refresh")
def __init__(self, params): import re from addon import Addon from addondict import AddonDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] base_url = 'https://chaturbate.com' home_url = base_url false_positives = ['#'] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30021), 'content': '', 'url': home_url, 'cover_url': a.image('featuredcams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'bygender', 'title': a.language(30017), 'content': '', 'cover_url': a.image('bygender.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'byage', 'title': a.language(30018), 'content': '', 'cover_url': a.image('byage.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'byregion', 'title': a.language(30019), 'content': '', 'cover_url': a.image('byregion.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'bystatus', 'title': a.language(30020), 'content': '', 'cover_url': a.image('bystatus.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'bygender': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30022), 'content': '', 'url': base_url + '/female-cams/', 'cover_url': a.image('femalecams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30023), 'content': '', 'url': base_url + '/male-cams/', 'cover_url': a.image('malecams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30024), 'content': '', 'url': base_url + '/couple-cams/', 'cover_url': a.image('couplecams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30025), 'content': '', 'url': base_url + '/transsexual-cams/', 'cover_url': a.image('transcams.png', image), 'backdrop_url': a.art(), 'type': 3 }] a.add_items(item_list) a.end_of_directory() elif mode == 'byage': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30026), 'content': '', 'url': base_url + '/teen-cams/', 'cover_url': a.image('teencams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30027), 'content': '', 'url': base_url + '/18to21-cams/', 'cover_url': a.image('18to21cams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30028), 'content': '', 'url': base_url + '/20to30-cams/', 'cover_url': a.image('20to30cams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30029), 'content': '', 'url': base_url + '/30to50-cams/', 'cover_url': a.image('30to50cams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30030), 'content': '', 'url': base_url + '/mature-cams/', 'cover_url': a.image('maturecams.png', image), 'backdrop_url': a.art(), 'type': 3 }] a.add_items(item_list) a.end_of_directory() elif mode == 'byregion': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30031), 'content': '', 'url': base_url + '/north-american-cams/', 'cover_url': a.image('north-americancams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30032), 'content': '', 'url': base_url + '/other-region-cams/', 'cover_url': a.image('other-regioncams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30033), 'content': '', 'url': base_url + '/euro-russian-cams/', 'cover_url': a.image('euro-russiancams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30034), 'content': '', 'url': base_url + '/philippines-cams/', 'cover_url': a.image('philippinescams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30035), 'content': '', 'url': base_url + '/asian-cams/', 'cover_url': a.image('asiancams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30036), 'content': '', 'url': base_url + '/south-american-cams/', 'cover_url': a.image('south-americancams.png', image), 'backdrop_url': a.art(), 'type': 3 }] a.add_items(item_list) a.end_of_directory() elif mode == 'bystatus': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30037), 'content': '', 'url': base_url + '/exhibitionist-cams/', 'cover_url': a.image('exhibitionistcams.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30038), 'content': '', 'url': base_url + '/hd-cams/', 'cover_url': a.image('hdcams.png', image), 'backdrop_url': a.art(), 'type': 3 }] a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'goto': last_item = re.search('page=([0-9]+)', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('page=[0-9]+', 'page=' + str(item), params['url']).replace(' ', '+') else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'c-1 endless_page_template'})) item_list = [] params['mode'] = 'play' params['content'] = 'episodes' params['type'] = 0 params['context'] = 0 params['duration'] = '' params['sub_site'] = site if soup: ul = soup.find('ul', {'class': 'list'}) if ul: addondict = AddonDict(0).update(params) for item in ul.findAll('li'): _dict = addondict.copy() clip_link = item.find('a') if clip_link: url = clip_link.get('href') if not url.startswith('http://'): url = base_url + url _dict['url'] = url ctitle = '' cage = '' cname = '' ccams = '' details = item.find('div', {'class': 'details'}) if details: temp = details.find('a') if temp: cname = str(temp.contents[0]) temp = details.find( 'span', {'class': re.compile('age.*')}) if temp: cage = temp.string.encode('utf-8') temp = details.find('li', {'class': 'cams'}) if temp: ccams = str(temp.contents[0]) temp = details.find('li', {'title': True}) if temp: ctitle = temp.get('title').encode('UTF-8') if cname: usetitle = '%s [%syr, %s] %s' % (cname, cage, ccams, ctitle) _dict['title'] = usetitle _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] img = item.find('img') if img: img = img.get('src') if img.startswith('//'): img = 'http:' + img else: img = '' _dict['cover_url'] = a.image(img) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] item_list.extend([_dict]) pages = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'ul', {'class': 'paging'})) if pages: previouspage = pages.find( 'a', {'class': re.compile('prev.*')}) nextpage = pages.find('a', {'class': re.compile('next.*')}) lastpage = pages.find('span', {'class': 'endless_separator'}) if lastpage: lastpage = lastpage.findNext('a') if previouspage: previouspage = previouspage.get('href').replace( ' ', '+') if previouspage != '#': if not previouspage.startswith('http://'): previouspage = base_url + previouspage item_list.extend([{ 'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if nextpage: nextpage = nextpage.get('href').replace(' ', '+') if nextpage != '#': if not nextpage.startswith('http://'): nextpage = base_url + nextpage item_list.extend([{ 'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if lastpage: lastpage = lastpage.get('href').replace(' ', '+') if lastpage != '#': if not lastpage.startswith('http://'): lastpage = base_url + lastpage item_list.extend([{ 'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) link = re.search('html \+= "src=\'(.+?)\'', html) if link: from playback import Playback Playback().play_this(link.group(1), params['title'], params['cover_url'], a.common.usedirsources()) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] home_url = 'http://xtheatre.net/' search_url = home_url + '?s=' false_positives = [ 'http://watchxxxhd.net/watch-full-movies-hd/', 'http://watchxxxhd.net', 'http://watchxxxhd.net/category/movies/', 'http://watchxxxhd.net/category/ategorized222/', 'http://watchxxxhd.net/watch-full-movies-hd/' ] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': home_url + '?filtre=date&cat=0', 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': home_url + 'categories/', 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'ul', {'class': 'listing-cat'})) item_list = [] if soup: for item in soup.findAll('li'): if item: if item.a.get('href') not in false_positives: try: vidcount = item.findAll( 'span', {'class': 'nb_cat border-radius-5' })[0].string.encode('UTF-8') vidcount = re.sub('\svideo[s]*', '', vidcount) except: vidcount = '0' if vidcount and vidcount != '0': img = item.find('img') if img: try: img = img.get('data-lazy-src') except: try: img = img.get('src') except: img = '' if not img: img = '' title = item.a.get('title').encode( 'UTF-8') + ' (%s)' % vidcount item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.a.get('href'), 'content': '', 'title': title, 'cover_url': a.image(img, image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('/page/([0-9]+)/', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url']) else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup( html, parseOnlyThese=SoupStrainer( 'ul', {'class': 'listing-videos listing-extract'})) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' if soup: xbmcdict = XBMCDict(0).update(params) for item in soup.findAll( 'li', {'class': 'border-radius-5 box-shadow'}): if item: if item.a.get('href') not in false_positives: _dict = xbmcdict.copy() _dict['url'] = item.a.get('href') _dict['title'] = item.a.get('title').encode( 'UTF-8') _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] img = item.find('img') if img: try: img = img.get('data-lazy-src') except: try: img = img.get('src') except: img = '' if not img: img = '' _dict['cover_url'] = a.image(img) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] _dict['sub_site'] = site plot = item.find('div', {'class': 'right'}) if plot: plot = plot.p.contents[0].encode('utf-8') _dict['plot'] = plot _dict['plotoutline'] = plot item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'pagination'})) last_item = False if soup: for item in soup.findAll('a'): if (item.string.encode('UTF-8') == 'Last »') or (item.get('class') == 'last'): last_item = item.get('href') break if last_item is False: for last_item in soup.findAll('a', {'class': 'inactive'}): pass if last_item: last_item = last_item.get('href') item = soup.find('span', {'class': 'current'}) if item: if item.parent: item = item.parent if item.previousSibling: if item.previousSibling.find('a'): item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.previousSibling.a.get('href'), 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if item.nextSibling: if item.nextSibling.find('a'): item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.nextSibling.a.get('href'), 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if last_item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': last_item, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'video-embed'})) item_list = [] if soup: for script in soup.findAll(re.compile('s_*c_*r_*i_*p_*t')): item = '' if script.get('src'): if 'http://videomega.tv/validatehash.php' in script[ 'src']: item = script['src'] elif 'ref=' in script.get('src'): temp = re.search('.*ref=[\'"](.+?)[\'"]', script.get('src')) if temp: item = 'http://videomega.tv/iframe.php?ref=' + temp.group( 1) xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) if soup.find('iframe', src=True): item = '' for iframe in soup.findAll('iframe', src=True): if iframe.get('data-lazy-src'): item = iframe.get('data-lazy-src') r = re.search('.+old=(.+)$', item) if r: item = r.group(1) else: item = iframe.get('src').replace('\\', '') xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'video-infos'})) if soup: item = '' for p in soup.findAll('p'): if p.iframe: item = p.iframe.get('src') xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re import json import urllib from addon import Addon from addondict import AddonDict a = Addon() site = self.__module__ mode = params['mode'] api_version = 'v5' recent_url = 'http://beeg.com/api/%s/index/main/0/pc' % api_version long_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=long%svideos' % (api_version, '%20') search_url = 'http://beeg.com/api/%s/index/search/0/pc?query=' % api_version tag_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=' % api_version img_url = 'http://img.beeg.com/236x177/%s.jpg' data_markers = 'data=pc.US' if mode == 'main': item_list = [{'site': site, 'mode': 'list', 'title': a.language(30003), 'content': '', 'url': recent_url, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': recent_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': site, 'mode': 'list', 'title': a.language(30039), 'content': '', 'url': long_url, 'cover_url': a.image('longvideos.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) data = json.loads(html) item_list = [] tags = data.get('tags', None) if tags: popular = tags.get('popular', None) if popular: for item in popular: url_item = re.search('(.+?)-', str(item)) if url_item: url_item = url_item.group(1) else: url_item = item item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + url_item, 'content': '', 'title': str(item).capitalize(), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) nonpopular = tags.get('nonpopular', None) if nonpopular: for item in nonpopular: url_item = re.search('(.+?)-', str(item)) if url_item: url_item = url_item.group(1) else: url_item = item item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + urllib.quote(url_item), 'content': '', 'title': str(item).capitalize(), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item.replace(' ', '+') else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('/([0-9]+)/pc', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('/[0-9]+/pc', '/' + str(item) + '/pc', params['url']).replace(' ', '+') else: exit(1) html = a.get_page(params['url']) item_list = [] data = json.loads(html) allvideos = [] videos = data.get('videos', None) if videos: for video in videos: nt_name = video.get('nt_name', '').encode('utf-8', 'ignore') ps_name = video.get('ps_name', '').encode('utf-8', 'ignore') atitle = video.get('title', '').encode('utf-8', 'ignore') vid_id = video.get('id', '').encode('utf-8', 'ignore') if nt_name.lower() == 'na': nt_name = '' if ps_name.lower() == 'na': ps_name = '' atitle = '%s - %s' % (atitle, ps_name) if nt_name: atitle += ' (%s)' % nt_name if vid_id: allvideos.append([vid_id, atitle, video]) if allvideos: params['mode'] = 'play' params['content'] = 'episodes' params['type'] = 0 params['context'] = 0 params['duration'] = '480' params['sub_site'] = site addondict = AddonDict(0).update(params) for number, name, idata in allvideos: _dict = addondict.copy() _dict['title'] = name _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] _dict['cover_url'] = a.image(img_url % number) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] _dict['url'] = params['url'] _dict['count'] = number item_list.extend([_dict]) pages = data.get('pages', 0) if pages != 0: pages -= 1 page = re.search('/([0-9]+)/pc', params['url']) if page: page = int(page.group(1)) else: page = 0 previouspage = None nextpage = None lastpage = None if page > 0: previouspage = re.sub('/[0-9]+/pc', '/' + str(page - 1) + '/pc', params['url']) if pages > 1: lastpage = re.sub('/[0-9]+/pc', '/' + str(pages) + '/pc', params['url']) if page < pages: nextpage = re.sub('/[0-9]+/pc', '/' + str(page + 1) + '/pc', params['url']) if previouspage: item_list.extend([{'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3}]) if nextpage: item_list.extend([{'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3}]) if lastpage: item_list.extend([{'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) data = json.loads(html) video = None videos = data.get('videos', None) if videos: for vid in videos: if vid.get('id', None) == params['count']: video = vid break if video: img = img_url % video.get('id') name = params['title'] url = video.get('720p', None) if not url: url = video.get('480p', None) if not url: url = video.get('240p', None) if url: url = 'http:' + re.sub('\{DATA_MARKERS\}', data_markers, url) from playback import Playback Playback().play_this(url, name, img, a.common.usedirsources()) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, __params_): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() __site_ = self.__module__ __mode_ = __params_['mode'] __home_url_ = 'http://www.watchxxxfree.com/' __search_url_ = __home_url_ + '?s=' __false_positives_ = ['http://www.watchxxxfree.com/watch-full-movies-hd/', 'http://www.watchxxxfree.com', 'http://www.watchxxxfree.com/category/movies/', 'http://www.watchxxxfree.com/category/ategorized222/', 'http://www.watchxxxfree.com/watch-full-movies-hd/'] if __mode_ == 'main': __item_list_ = [{'site': __site_, 'mode': 'list', 'title': a.language(30003), 'content': 'movies', 'url': __home_url_ + '?filtre=date&cat=0', 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies', 'url': __home_url_ + 'categories/', 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'topstars', 'title': a.language(30015), 'content': 'movies', 'url': __home_url_ + 'top-pornstars/', 'cover_url': a.image('toppornstars.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}] __item_list_.extend(a.favs_hist_menu(__site_)) __item_list_.extend(a.extended_menu()) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'categories': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'listing-cat'})) __item_list_ = [] if __soup_: for __item_ in __soup_.findAll('li'): if __item_: if __item_.a.get('href') not in __false_positives_: try: __vidcount_ = __item_.findAll('span', {'class': 'nb_cat border-radius-5'})[0].string.encode('UTF-8') __vidcount_ = re.sub('\svideo[s]*', '', __vidcount_) except: __vidcount_ = '0' if __vidcount_ and __vidcount_ != '0': __title_ = __item_.a.get('title').encode('UTF-8') + ' (%s)' % __vidcount_ __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.a.get('href'), 'content': __params_['content'], 'title': __title_, 'cover_url': a.image(__item_.img.get('src'), image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'topstars': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'wp-tag-cloud'})) __item_list_ = [] if __soup_: for __item_ in __soup_.findAll('li'): if __item_: if __item_.a.get('href') not in __false_positives_: try: __vidcount_ = __item_.a.get('title') __vidcount_ = re.sub('\stopic[s]*', '', __vidcount_) except: __vidcount_ = '0' if __vidcount_ and __vidcount_ != '0': __title_ = __item_.a.string.encode('UTF-8') + ' (%s)' % __vidcount_ __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.a.get('href'), 'content': __params_['content'], 'title': __title_, 'cover_url': a.image(), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'list': if __params_['content'] == 'search': __item_ = a.search_input() if __item_: __params_['url'] = __search_url_ + __item_ else: exit(1) elif __params_['content'] == 'goto': __last_item_ = re.search('/page/([0-9]+)/', __params_['url']) if __last_item_: __last_item_ = int(__last_item_.group(1)) else: __last_item_ = 10000 __item_ = a.page_input(__last_item_) if __item_: __params_['url'] = re.sub('/page/[0-9]+/', '/page/' + str(__item_) + '/', __params_['url']) else: exit(1) __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'listing-videos listing-tube'})) __item_list_ = [] __params_['mode'] = 'play' __params_['content'] = 'movies' __params_['type'] = 0 __params_['context'] = 0 __params_['duration'] = '7200' if __soup_: __xbmcdict_ = XBMCDict(0).update(__params_) for __item_ in __soup_.findAll('li', {'class': 'border-radius-5 box-shadow'}): if __item_: if __item_.a.get('href') not in __false_positives_: __dict_ = __xbmcdict_.copy() if 'full movie' not in __dict_['title'].lower() and 'xtheatre' not in __dict_['title'].lower(): __dict_['duration'] = '1500' __dict_['content'] = 'episodes' __dict_['url'] = __item_.a.get('href') __dict_['title'] = __item_.a.get('title').encode('UTF-8') __dict_['tvshowtitle'] = __dict_['title'] __dict_['originaltitle'] = __dict_['title'] __dict_['cover_url'] = a.image(__item_.img.get('data-lazy-src')) __dict_['thumb_url'] = __dict_['cover_url'] __dict_['poster'] = __dict_['cover_url'] __dict_['sub_site'] = __site_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'pagination'})) __last_item_ = False if __soup_: for __item_ in __soup_.findAll('a'): if (__item_.string.encode('UTF-8') == 'Last »') or (__item_.get('class') == 'last'): __last_item_ = __item_.get('href') break if __last_item_ is False: for __last_item_ in __soup_.findAll('a', {'class': 'inactive'}): pass if __last_item_: __last_item_ = __last_item_.get('href') __item_ = __soup_.find('span', {'class': 'current'}) if __item_: if __item_.parent: __item_ = __item_.parent if __item_.previousSibling: if __item_.previousSibling.find('a'): __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.previousSibling.a.get('href'), 'content': __params_['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3}]) if __item_.nextSibling: if __item_.nextSibling.find('a'): __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.nextSibling.a.get('href'), 'content': __params_['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3}]) if __last_item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'play': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'video-embed'})) __item_list_ = [] if __soup_: for __script_ in __soup_.findAll('script'): __item_ = '' if __script_.get('src'): if 'http://videomega.tv/validatehash.php' in __script_['src']: __item_ = __script_['src'] elif 'ref=' in __script_.get('src'): __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __script_.get('src')) if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1) __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) if __soup_.find('iframe', src=True): __item_ = '' for __iframe_ in __soup_.findAll('iframe', src=True): if __iframe_.get('data-lazy-src'): __item_ = __iframe_.get('data-lazy-src') else: __item_ = __iframe_.get('src').replace('\\', '') __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'video-infos'})) if __soup_: __item_ = '' for __p_ in __soup_.findAll('p'): if __p_.iframe: __item_ = __p_.iframe.get('src') __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) if __item_list_: from playback import Playback Playback().choose_sources(__item_list_) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] home_url = 'http://pornhardx.com/' movies_url = home_url + 'category/full-movie/' scenes_url = home_url + 'video/' search_url = home_url + '?s=' false_positives = [ 'http://pornhardx.com/video', 'http://pornhardx.com/video/?order=viewed', 'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/' ] if mode == 'main': item_list = [] item_list.extend([{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': scenes_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item_list.extend([{ 'site': site, 'mode': 'list', 'title': a.language(30003), 'content': '', 'url': home_url, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item_list.extend([{ 'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': scenes_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item_list.extend([{ 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'navigation-wrapper'})) item_list = [] if soup: for item in soup.findAll('a', {'href': True}): if item: if item.get('href') not in false_positives: if 'full-movie' in params['url']: if movies_url != item.get( 'href') and 'full-movie' in item.get( 'href'): item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': '', 'title': item.contents[0].encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) elif 'full-movie' not in item.get('href'): item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': '', 'title': item.contents[0].encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('/page/([0-9]+)/', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url']) else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup( html, parseOnlyThese=SoupStrainer( 'div', {'class': re.compile('col-sm-8(?:\s*main-content)*')})) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' if soup: xbmcdict = XBMCDict(0).update(params) for item in soup.findAll( 'div', { 'class': re.compile( '.*(?:col-xs-6 item|post type-post status-publish).*' ) }): if item: if item.a.get('href') not in false_positives: _dict = xbmcdict.copy() if 'full-movie' not in params['url']: _dict['duration'] = '1500' _dict['content'] = 'episodes' if item.h3: _dict['url'] = item.h3.a.get('href') if item.h3.a.contents: _dict['title'] = item.h3.a.contents[ 0].encode('UTF-8') else: _dict['title'] = 'Untitled' elif item.h2: _dict['url'] = item.h2.a.get('href') if item.h2.a.contents: _dict['title'] = item.h2.a.contents[ 0].encode('UTF-8') else: _dict['title'] = 'Untitled' _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] _dict['cover_url'] = a.image(item.img.get('src')) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] _dict['sub_site'] = site item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'ul', {'class': 'pagination'})) if soup.li: item = soup.find('a', {'class': 'prev page-numbers'}) if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) item = soup.find('a', {'class': 'next page-numbers'}) if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) if len(soup.findAll('a')) > 2: last_item = soup.find('a', { 'class': 'next page-numbers' }).parent.previousSibling.a.get('href') item_list.extend([{ 'site': site, 'mode': 'list', 'url': last_item, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) else: item = soup.find('span', {'class': 'page-numbers current'}) if item: if len(soup.findAll('a')) > 2: last_item = soup.find( 'span', { 'class': 'page-numbers current' }).parent.previousSibling.a.get('href') item_list.extend([{ 'site': site, 'mode': 'list', 'url': last_item, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) else: soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'ul', {'class': 'pager'})) item = soup.find('li', {'class': 'previous'}) if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.previousSibling.get('href'), 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item = soup.find('li', {'class': 'next'}) if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.previousSibling.get('href'), 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body')) item = '' item_list = [] if soup: for item in soup.findAll('param', {'name': 'FlashVars'}): item = item.get('value') item = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?', item) if item: if item not in item_list: item = item.group(1) else: item = '' else: item = '' xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) item = '' for item in soup.findAll('video'): for source in soup.findAll('source'): src = source.get('src') if src: xbmcdict = XBMCDict(0).update(params) if item and ('..' not in src): _dict = xbmcdict.copy() try: _dict['src_title'] = source.get( 'data-res') + 'p' except: pass _dict['url'] = src item_list.extend([_dict]) try: src = item.get('src') if src: xbmcdict = XBMCDict(0).update(params) if item and ('..' not in src): _dict = xbmcdict.copy() try: _dict['src_title'] = source.get( 'data-res') + 'p' except: pass _dict['url'] = src item_list.extend([_dict]) except: pass for script in soup.findAll('script'): item = '' if script.get('src'): if 'http://videomega.tv/validatehash.php' in script[ 'src']: item = script['src'] elif 'ref=' in script.get('src'): temp = re.search('.*ref=[\'"](.+?)[\'"]', script.get('src')) if temp: item = 'http://videomega.tv/iframe.php?ref=' + temp.group( 1) xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) for iframe in soup.findAll('iframe'): item = '' if iframe.get('src'): if 'http://videomega.tv/validatehash.php' in iframe[ 'src']: item = iframe['src'] elif 'ref=' in iframe.get('src'): temp = re.search('.*ref=[\'"](.+?)[\'"]', iframe.get('src')) if temp: item = 'http://videomega.tv/iframe.php?ref=' + temp.group( 1) else: item = iframe.get('src') xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] home_url = 'http://www.freeomovie.com/' movies_url = home_url + 'category/full-movie/' scenes_url = home_url + 'category/clips/' search_url = home_url + '/?s=' false_positives = [ 'http://www.freeomovie.com/category/full-movie/', 'http://www.freeomovie.com/category/clips/' ] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30001), 'content': '', 'url': movies_url, 'cover_url': a.image('movies.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30002), 'content': '', 'url': scenes_url, 'cover_url': a.image('scenes.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'multi-column-taxonomy-list'})) item_list = [] if soup: for item in soup.findAll('a'): if item: if item.get('href') not in false_positives: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': '', 'title': item.string.encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('/page/([0-9]+)/', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url']) else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'content'})) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' if soup: xbmcdict = XBMCDict(0).update(params) for item in soup.findAll('div', {'class': 'postbox'}): if item: if item.h2.a.get('href') not in false_positives: _dict = xbmcdict.copy() if scenes_url in params['url']: _dict['duration'] = '1500' _dict['content'] = 'episodes' _dict['url'] = item.h2.a.get('href') _dict['title'] = item.h2.a.get('title').encode( 'UTF-8') _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] _dict['cover_url'] = a.image(item.img.get('src')) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] _dict['sub_site'] = site item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'wp-pagenavi'})) last_item = False if soup: for item in soup.findAll('a', href=True): if item: if item.get('class') == 'previouspostslink': item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if item.get('class') == 'nextpostslink': item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if item.get('class') == 'last': last_item = item.get('href') if not last_item: try: if not soup.find('a', {'class': 'nextpostslink'}): last_item = soup.findAll('a', href=True)[-1].get('href') else: last_item = soup.findAll('a', href=True)[-2].get('href') except: pass if last_item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': last_item, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) item_list = [] soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'videosection'})) if soup: xbmcdict = XBMCDict(0).update(params) pages = soup.findAll('li', {'class': re.compile('pg.')}) if pages: old_li = pages[0].get('class') _dict = xbmcdict.copy() _dict['multi-part'] = True parts = [] for li in pages: if old_li != li.get('class'): _dict['parts'] = parts item_list.extend([_dict]) _dict = xbmcdict.copy() _dict['multi-part'] = True old_li = li.get('class') parts = [] url = re.search('.+myurl=(.+)', li.a.get('href'), re.IGNORECASE) if url: url = url.group(1) parts.extend([url]) if parts: _dict['parts'] = parts item_list.extend([_dict]) alink = soup.find('a', {'target': '_blank'}) if alink: alink = alink.get('href') if 'main.exoclick.com' not in alink: _dict = xbmcdict.copy() _dict['url'] = alink item_list.extend([_dict]) iframes = soup.findAll('iframe', {'src': True}) if iframes: for iframe in iframes: iframe = iframe.get('src') if 'main.exoclick.com' not in iframe: _dict = xbmcdict.copy() _dict['url'] = iframe item_list.extend([_dict]) if not item_list: soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'ul', {'id': 'countrytabs'})) if soup: xbmcdict = XBMCDict(0).update(params) for index, items in enumerate( soup.findAll('a', href=True)): item = '' if not items.get('id') == 'jpg': item = items.get('href') item = re.search('.*myURL\[\]=(.+)$', item, re.DOTALL) if item: item = re.sub('&tab=[0-9]+', '', item.group(1)) if item: _dict = xbmcdict.copy() _dict['url'] = item _dict['count'] = index item_list.extend([_dict]) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] home_url = 'http://playporn.to/' search_url = home_url + '?submit=Search&s=' movies_url = home_url + 'category/xxx-movie-stream/' scenes_url = home_url + 'category/xxx-clips-scenes-stream/' false_positives = ['http://playporn.to/deutsche-milfs-anonym-sex/'] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30003), 'content': '', 'url': home_url, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'sub', 'title': a.language(30001), 'content': '', 'url': movies_url, 'cover_url': a.image('movies.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'sub', 'title': a.language(30002), 'content': '', 'url': scenes_url, 'cover_url': a.image('scenes.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'sub': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': params['url'], 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'category', 'title': a.language(30005), 'content': '', 'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3 }] a.add_items(item_list) a.end_of_directory() elif mode == 'category': index = 1 if 'scenes' in params['url'].lower(): index = 2 html = a.get_page(home_url) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('ul', 'nav fl')) item_list = [] for item in soup.findAll('ul')[index].findAll({'a': True}): item_list.extend([{ 'site': 'playporn', 'mode': 'list', 'url': item.get('href'), 'content': '', 'title': item.contents[0].encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) if item_list: a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('/page/([0-9]+)/', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url']) else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body')) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' xbmcdict = XBMCDict(0).update(params) for item in soup.findAll('div', 'photo-thumb-image'): if not item.a.get('href') in false_positives: _dict = xbmcdict.copy() if 'scenes' in params['url']: _dict['duration'] = '2700' _dict['content'] = 'episodes' _dict['url'] = item.a.get('href') _dict['title'] = item.a.get('title').encode('UTF-8') _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] _dict['cover_url'] = a.image(item.img.get('src')) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] _dict['sub_site'] = site item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', 'more_entries')) if soup: item = soup.find('a', 'previouspostslink') if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item = soup.find('a', 'nextpostslink') if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) item = soup.find('a', 'last') if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href'), 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if item_list: a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'loopedSlider'})) soup = soup.find(text=lambda text: isinstance(text, Comment)) if soup: soup = re.sub('<', '<', soup.encode('utf-8')) soup = re.sub('>', '>', soup) soup = BeautifulSoup(soup, parseOnlyThese=SoupStrainer( 'div', 'video')) if soup: item_list = [] xbmcdict = XBMCDict(0).update(params) for item in soup.findAll('iframe'): _dict = xbmcdict.copy() _dict['url'] = item.get('src').replace( 'http://playporn.to/stream/all/?file=', '').encode('UTF-8') if 'flashx.tv' in _dict['url'].lower(): item = re.search('hash=(.+?)&', _dict['url']) if item: _dict[ 'url'] = 'http://flashx.tv/video/' + item.group( 1) + '/' elif 'played.to' in _dict['url'].lower(): item = re.search('embed-([a-zA-Z0-9]+?)-.+?html', _dict['url']) if item: _dict[ 'url'] = 'http://played.to/' + item.group( 1) item_list.extend([_dict]) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False) else: a.alert(a.language(30904, True), sound=False) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] base_url = 'http://filmikz.ch' home_url = base_url + '/index.php?genre=14' search_url = home_url + '&search=' false_positives = ['#'] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('pg=([0-9]+)', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 last_item = int(last_item / 10) item = a.page_input(last_item) if item: item = str(int(item) * 10) params['url'] = re.sub('pg=[0-9]+', 'pg=' + str(item), params['url']).replace(' ', '+') else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'td', {'width': '490'})) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' params['sub_site'] = site if soup: addondict = AddonDict(0).update(params) for item in soup.findAll('table', { 'width': '100%', 'height': '155' }): _dict = addondict.copy() ahref = item.find('a', {'href': True}) if ahref: url = ahref.get('href') if not url.startswith('http://'): url = base_url + url _dict['url'] = url data = item.find('strong') _dict['title'] = str(data.contents[0]).rstrip(' XXX :') _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] img = item.find('img') if img: img = img.get('src') if not img.startswith('http://'): img = base_url + '/' + img else: img = '' _dict['cover_url'] = a.image(img) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] cast = item.find('p', text=re.compile('[Ss]tarring:.+')) if cast: _dict['plot'] = str(cast) _dict['plotoutline'] = _dict['plot'] cast = re.search('[Ss]tarring:\s*(.+?)\s*\.+', str(cast)) if cast: cast = cast.group(1) _dict['cast'] = cast.split(', ') item_list.extend([_dict]) pages = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'table', {'width': '250'})) if pages: previouspage = None nextpage = None lastpage = None for ahref in pages.findAll('a', {'href': True}): astr = ahref.string.encode('utf-8') if astr == '‹‹ ': previouspage = base_url + '/' + ahref.get('href') elif astr == '››': nextpage = base_url + '/' + ahref.get('href') elif astr == ' Last ': lastpage = base_url + '/' + ahref.get('href') last_item = re.search('pg=(-*[0-9]+)', str(lastpage)) if last_item: last_item = int(last_item.group(1)) if last_item < 10: lastpage = None if previouspage: item_list.extend([{ 'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if nextpage: item_list.extend([{ 'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if lastpage: item_list.extend([{ 'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body')) item_list = [] _bad_hosts = ['NowDownload', 'ePornik'] if soup: buttons = soup.findAll('input', { 'type': 'button', 'onclick': True }) if buttons: addondict = AddonDict(0).update(params) for button in buttons: value = button.get('value') newhost = re.search('.+?-([a-zA-Z]+)', value) if newhost: newhost = newhost.group(1) else: newhost = '' if newhost not in _bad_hosts: item = button.get('onclick') item = re.sub( 'javascript:popUp\([\'"](.+?)[\'"]\);*', '\g<01>', item) item = base_url + item value = button.get('value') if not re.search('[Pp]art ', value): try: thtml = a.get_page(item) tsoup = BeautifulSoup(thtml) source = tsoup.find('frame') if source: source = source.get('src') if 'ads.php' not in source: _dict = addondict.copy() _dict['url'] = source item_list.extend([_dict]) except: continue parts = [] oldhost = '' _dict = addondict.copy() _dict['multi-part'] = True for button in buttons: value = button.get('value') newhost = re.search('.+?-([a-zA-Z]+)', value) if newhost: newhost = newhost.group(1) else: newhost = '' if newhost not in _bad_hosts: item = button.get('onclick') item = re.sub( 'javascript:popUp\([\'"](.+?)[\'"]\);*', '\g<01>', item) item = base_url + item if re.search('[Pp]art ', value): if oldhost != newhost: if oldhost != '': _dict['parts'] = parts item_list.extend([_dict]) _dict = addondict.copy() _dict['multi-part'] = True parts = [] oldhost = newhost try: thtml = a.get_page(item) tsoup = BeautifulSoup(thtml) source = tsoup.find('frame') if source: source = source.get('src') if 'ads.php' not in source: parts.extend([source]) except: continue if parts: _dict['parts'] = parts item_list.extend([_dict]) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] base_url = 'http://yespornplease.com' home_url = base_url + '/index.php' popular_url = base_url + '/index.php?p=1&m=today' search_url = base_url + '/search.php?q=' false_positives = [''] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30016), 'content': '', 'url': popular_url, 'cover_url': a.image('popular.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'categories'})) item_list = [] if soup: for item in soup.findAll('a'): if item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href').replace(' ', '+'), 'content': '', 'title': item.string.encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item.replace(' ', '+') else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('p=([0-9]+)', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('p=[0-9]+', 'p=' + str(item), params['url']).replace(' ', '+') else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'videos'})) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' if soup: xbmcdict = XBMCDict(0).update(params) for item in soup.findAll('div', {'class': 'video-preview'}): if item: _dict = xbmcdict.copy() temp = item.find('div', {'class': 'jcarousel'}).a if temp: temp = temp.get('href') if not temp.startswith('http://'): temp = base_url + temp _dict['url'] = temp _dict['title'] = item.find('div', { 'class': 'preview-title' }).get('title').encode('UTF-8') _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] temp = item.find('div', { 'class': 'jcarousel' }).img.get('src') if temp.startswith('//'): temp = 'http:' + temp _dict['cover_url'] = a.image(temp) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] temp = item.find('div', { 'class': 'preview-info-box length' }).b.string if temp: temp = re.search('([0-9]+):([0-9]+):([0-9]+)', temp) _dict['duration'] = str( (int(temp.group(1)) * 60 * 60) + (int(temp.group(2)) * 60) + int(temp.group(3))) _dict['sub_site'] = site item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body')) if soup.find('a', {'id': 'prev-page'}): item = soup.find('a', { 'id': 'prev-page' }).get('href').replace(' ', '+') if not item.startswith('http://'): item = base_url + item if 'index.php' in params['url']: item = item.replace('search.php', 'index.php') item_list.extend([{ 'site': site, 'mode': 'list', 'url': item, 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if soup.find('a', {'id': 'next-page'}): item = soup.find('a', { 'id': 'next-page' }).get('href').replace(' ', '+') if 'index.php' in params['url']: item = item.replace('search.php', 'index.php') if not item.startswith('http://'): item = base_url + item item_list.extend([{ 'site': site, 'mode': 'list', 'url': item, 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'id': 'pagination'})) last_item = False if soup: for item in reversed(soup.findAll('a')): last_item = item.get('href') if not last_item.startswith('http://'): last_item = base_url + last_item break if last_item: item_list.extend([{ 'site': site, 'mode': 'list', 'url': last_item, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'object', {'id': 'videoContainer'})) item_list = [] if soup: item = soup.find('param', {'name': 'flashvars'}) item = re.search('.*?video_url=(.+?)&.*?', str(item)) if item: item = item.group(1) xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item item_list.extend([_dict]) else: a.alert(a.language(30904, True), sound=False) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] home_url = 'http://qwertty.net' search_url = home_url + '/index.php?do=search&subaction=search&full_search=0&search_start=0&result_from=1&story=' false_positives = [''] if mode == 'main': item_list = [{'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}] item_list.extend(a.favs_hist_menu(site)) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'navi-wrap'})) item_list = [] if soup: for item in soup.findAll('a'): if item: item_list.extend([{'site': site, 'mode': 'list', 'url': home_url + item.get('href'), 'content': '', 'title': item.string.encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item else: exit(1) elif params.get('content', '') == 'goto': if 'do=search' in params['url']: last_item = re.search('search_start=([0-9]+)', params['url']) else: last_item = re.search('/page/([0-9]+)/', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: if 'do=search' in params['url']: page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + str(item), params['url']) params['url'] = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(str(item)) * 10 + 1), page) else: params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url']) else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'id': 'dle-content'})) item_list = [] params['mode'] = 'play' params['content'] = 'movies' params['type'] = 0 params['context'] = 0 params['duration'] = '7200' if soup: xbmcdict = XBMCDict(0).update(params) for item in soup.findAll('div', {'class': 'short-item'}): if item: _dict = xbmcdict.copy() _dict['url'] = item.a.get('href') _dict['title'] = item.a.img.get('alt').encode('UTF-8') _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] item = home_url + item.a.img.get('src').replace('/thumbs', '') _dict['cover_url'] = a.image(item) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] _dict['sub_site'] = site item_list.extend([_dict]) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'bottom-nav'})) if soup: last_item = len(soup.findAll('a', href=True)) - 1 for index, item in enumerate(soup.findAll('a', href=True)): page = '' if item: if index == 0 and item.string.encode('UTF-8') != 'Back': last_item -= 1 if item.string.encode('UTF-8') == 'Back': if item.get('href') == '#': temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick')) if temp: page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url']) page = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(temp.group(1)) * 10 + 1), page) else: page = item.get('href') if page: item_list.extend( [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3}]) if item.string.encode('UTF-8') == 'Next': if item.get('href') == '#': temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick')) if temp: page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url']) page = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(temp.group(1)) * 10 + 1), page) else: page = item.get('href') if page: item_list.extend( [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3}]) if index == last_item: if item.get('href') == '#': temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick')) if temp: page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url']) page = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(temp.group(1)) * 10 + 1), page) else: page = item.get('href') if page: item_list.extend([{'site': site, 'mode': 'list', 'url': page, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'full-text clearfix desc-text'})) item = soup.find('a') item_list = [] xbmcdict = XBMCDict(0).update(params) if item: _dict = xbmcdict.copy() _dict['url'] = item.get('href') item_list.extend([_dict]) else: a.alert(a.language(30904, True), sound=False) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, __params_): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() __site_ = self.__module__ __mode_ = __params_['mode'] __home_url_ = 'http://pornhardx.com/' __movies_url_ = __home_url_ + 'category/full-movie/' __scenes_url_ = __home_url_ + 'video/' __search_url_ = __home_url_ + '?s=' __false_positives_ = ['http://pornhardx.com/video', 'http://pornhardx.com/video/?order=viewed', 'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/'] if __mode_ == 'main': __item_list_ =[] __item_list_.extend([{'site': __site_, 'mode': 'list', 'title': a.language(30006), 'content': 'movies', 'url': __scenes_url_, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3}]) __item_list_.extend([{'site': __site_, 'mode': 'list', 'title': a.language(30003), 'content': 'movies', 'url': __home_url_, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3}]) __item_list_.extend([{'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies', 'url': __scenes_url_, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}]) __item_list_.extend([{'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}]) __item_list_.extend(a.favs_hist_menu(__site_)) __item_list_.extend(a.extended_menu()) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'categories': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'navigation-wrapper'})) __item_list_ = [] if __soup_: for __item_ in __soup_.findAll('a', {'href': True}): if __item_: if __item_.get('href') not in __false_positives_: if 'full-movie' in __params_['url']: if __movies_url_ != __item_.get('href') and 'full-movie' in __item_.get('href'): __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title':__item_.contents[0].encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) elif 'full-movie' not in __item_.get('href'): __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title':__item_.contents[0].encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'list': if __params_['content'] == 'search': __item_ = a.search_input() if __item_: __params_['url'] = __search_url_ + __item_ else: exit(1) elif __params_['content'] == 'goto': __last_item_ = re.search('/page/([0-9]+)/', __params_['url']) if __last_item_: __last_item_ = int(__last_item_.group(1)) else: __last_item_ = 10000 __item_ = a.page_input(__last_item_) if __item_: __params_['url'] = re.sub('/page/[0-9]+/', '/page/' + str(__item_) + '/', __params_['url']) else: exit(1) __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': re.compile('col-sm-8(?:\s*main-content)*')})) __item_list_ = [] __params_['mode'] = 'play' __params_['content'] = 'movies' __params_['type'] = 0 __params_['context'] = 0 __params_['duration'] = '7200' if __soup_: __xbmcdict_ = XBMCDict(0).update(__params_) for __item_ in __soup_.findAll('div', {'class': re.compile('.*(?:col-xs-6 item|post type-post status-publish).*')}): if __item_: if __item_.a.get('href') not in __false_positives_: __dict_ = __xbmcdict_.copy() if 'full-movie' not in __params_['url']: __dict_['duration'] = '1500' __dict_['content'] = 'episodes' if __item_.h3: __dict_['url'] = __item_.h3.a.get('href') if __item_.h3.a.contents: __dict_['title'] = __item_.h3.a.contents[0].encode('UTF-8') else: __dict_['title'] = 'Untitled' elif __item_.h2: __dict_['url'] = __item_.h2.a.get('href') if __item_.h2.a.contents: __dict_['title'] = __item_.h2.a.contents[0].encode('UTF-8') else: __dict_['title'] = 'Untitled' __dict_['tvshowtitle'] = __dict_['title'] __dict_['originaltitle'] = __dict_['title'] __dict_['cover_url'] = a.image(__item_.img.get('src')) __dict_['thumb_url'] = __dict_['cover_url'] __dict_['poster'] = __dict_['cover_url'] __dict_['sub_site'] = __site_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'pagination'})) if __soup_.li: __item_ = __soup_.find('a', {'class': 'prev page-numbers'}) if __item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title': a.language(30017, True), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) __item_ = __soup_.find('a', {'class': 'next page-numbers'}) if __item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title': a.language(30018, True), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) if len(__soup_.findAll('a')) > 2: __last_item_= __soup_.find('a', {'class': 'next page-numbers'}).parent.previousSibling.a.get('href') __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) else: __item_ = __soup_.find('span', {'class': 'page-numbers current'}) if __item_: if len(__soup_.findAll('a')) > 2: __last_item_ = __soup_.find('span', {'class': 'page-numbers current'}).parent.previousSibling.a.get('href') __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3}]) else: __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'pager'})) __item_ = __soup_.find('li', {'class': 'previous'}) if __item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.previousSibling.get('href'), 'content': __params_['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3}]) __item_ = __soup_.find('li', {'class': 'next'}) if __item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.previousSibling.get('href'), 'content': __params_['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'play': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('object', {'id': re.compile('flashplayer.+')})) __item_ = '' __item_list_ = [] if __soup_: for __item_ in __soup_.findAll('param', {'name': 'FlashVars'}): __item_ = __item_.get('value') __item_ = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?', __item_) if __item_: if __item_ not in __item_list_: __item_ = __item_.group(1) else: __item_ = '' else: __item_ = '' __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('video')) __item_ = '' if __soup_: for __item_ in __soup_.findAll('source'): __src_ = __item_.get('src') if __src_: __xbmcdict_ = XBMCDict(0).update(__params_) if __item_ and ('..' not in __src_): __dict_ = __xbmcdict_.copy() try: __dict_['src_title'] = __item_.get('data-res') + 'p' except: pass __dict_['url'] = __src_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'videoWrapper player'})) __item_ = '' if __soup_: for __script_ in __soup_.findAll('script'): __item_ = '' if __script_.get('src'): if 'http://videomega.tv/validatehash.php' in __script_['src']: __item_ = __script_['src'] elif 'ref=' in __script_.get('src'): __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __script_.get('src')) if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1) __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) for __iframe_ in __soup_.findAll('iframe'): __item_ = '' if __iframe_.get('src'): if 'http://videomega.tv/validatehash.php' in __iframe_['src']: __item_ = __iframe_['src'] elif 'ref=' in __iframe_.get('src'): __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __iframe_.get('src')) if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1) else: __item_ = __iframe_.get('src') __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': re.compile('player player-small.*')})) __item_ = '' if __soup_: for __iframe_ in __soup_.findAll('iframe'): __item_ = '' if __iframe_.get('src'): if 'http://videomega.tv/validatehash.php' in __iframe_['src']: __item_ = __iframe_['src'] elif 'ref=' in __iframe_.get('src'): __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __iframe_.get('src')) if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1) else: __item_ = __iframe_.get('src') __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) if __item_list_: from playback import Playback Playback().choose_sources(__item_list_) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, __params_): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() __site_ = self.__module__ __mode_ = __params_['mode'] __base_url_ = 'http://yespornplease.com' __home_url_ = __base_url_ + '/index.php' __popular_url_ = __base_url_ + '/index.php?p=1&m=today' __search_url_ = __base_url_ + '/search.php?q=' __false_positives_ = [''] if __mode_ == 'main': __item_list_ = [{'site': __site_, 'mode': 'list', 'title': a.language(30006), 'content': 'movies', 'url': __home_url_, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'list', 'title': a.language(30016), 'content': 'movies', 'url': __popular_url_, 'cover_url': a.image('popular.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies', 'url': __home_url_, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}] __item_list_.extend(a.favs_hist_menu(__site_)) __item_list_.extend(a.extended_menu()) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'categories': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'categories'})) __item_list_ = [] if __soup_: for __item_ in __soup_.findAll('a'): if __item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href').replace(' ', '+'), 'content': __params_['content'], 'title': __item_.string.encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'list': if __params_['content'] == 'search': __item_ = a.search_input() if __item_: __params_['url'] = __search_url_ + __item_.replace(' ', '+') else: exit(1) elif __params_['content'] == 'goto': __last_item_ = re.search('p=([0-9]+)', __params_['url']) if __last_item_: __last_item_ = int(__last_item_.group(1)) else: __last_item_ = 10000 __item_ = a.page_input(__last_item_) if __item_: __params_['url'] = re.sub('p=[0-9]+', 'p=' + str(__item_), __params_['url']).replace(' ', '+') else: exit(1) __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'videos'})) __item_list_ = [] __params_['mode'] = 'play' __params_['content'] = 'movies' __params_['type'] = 0 __params_['context'] = 0 __params_['duration'] = '7200' if __soup_: __xbmcdict_ = XBMCDict(0).update(__params_) for __item_ in __soup_.findAll('div', {'class': 'video-preview'}): if __item_: __dict_ = __xbmcdict_.copy() __temp_ = __item_.find('div', {'class': 'jcarousel'}).a if __temp_: __temp_ = __temp_.get('href') if not __temp_.startswith('http://'): __temp_ = __base_url_ + __temp_ __dict_['url'] = __temp_ __dict_['title'] = __item_.find('div', {'class': 'preview-title'}).get('title').encode('UTF-8') __dict_['tvshowtitle'] = __dict_['title'] __dict_['originaltitle'] = __dict_['title'] __temp_ = __item_.find('div', {'class': 'jcarousel'}).img.get('src') if __temp_.startswith('//'): __temp_ = 'http:' + __temp_ __dict_['cover_url'] = a.image(__temp_) __dict_['thumb_url'] = __dict_['cover_url'] __dict_['poster'] = __dict_['cover_url'] __temp_ = __item_.find('div', {'class': 'preview-info-box length'}).b.string if __temp_: __temp_ = re.search('([0-9]+):([0-9]+):([0-9]+)', __temp_) __dict_['duration'] = str((int(__temp_.group(1)) * 60 * 60) + (int(__temp_.group(2)) * 60) + int(__temp_.group(3))) __dict_['sub_site'] = __site_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('body')) if __soup_.find('a', {'id': 'prev-page'}): __item_ = __soup_.find('a', {'id': 'prev-page'}).get('href').replace(' ', '+') if not __item_.startswith('http://'): __item_ = __base_url_ + __item_ if 'index.php' in __params_['url']: __item_ = __item_.replace('search.php', 'index.php') __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_, 'content': __params_['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3}]) if __soup_.find('a', {'id': 'next-page'}): __item_ = __soup_.find('a', {'id': 'next-page'}).get('href').replace(' ', '+') if 'index.php' in __params_['url']: __item_ = __item_.replace('search.php', 'index.php') if not __item_.startswith('http://'): __item_ = __base_url_ + __item_ __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_, 'content': __params_['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3}]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'pagination'})) __last_item_ = False if __soup_: for __item_ in reversed(__soup_.findAll('a')): __last_item_ = __item_.get('href') if not __last_item_.startswith('http://'): __last_item_ = __base_url_ + __last_item_ break if __last_item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'play': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('object', {'id': 'videoContainer'})) __item_list_ = [] if __soup_: __item_ = __soup_.find('param', {'name': 'flashvars'}) __item_ = re.search('.*?video_url=(.+?)&.*?', str(__item_)) if __item_: __item_ = __item_.group(1) __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) else: a.alert(a.language(30904, True), sound=False) if __item_list_: from playback import Playback Playback().choose_sources(__item_list_) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): site = self.__module__ addon = Addon() common = addon.common mode = params['mode'] common.update_favorites_db() if mode == 'main': __item_list_ = [{'site': site, 'mode': 'list_favorites', 'title': addon.language(30884, True), 'content': 'all', 'sub_site': params['sub_site'], 'cover_url': addon.image('all.png'), 'backdrop_url': addon.art(), 'type': 3}, {'site': site, 'mode': 'list_favorites', 'title': addon.language(30885, True), 'content': 'movies', 'sub_site': params['sub_site'], 'cover_url': addon.image('movies.png'), 'backdrop_url': addon.art(), 'type': 3}, {'site': site, 'mode': 'list_favorites', 'title': addon.language(30886, True), 'content': 'tvshows', 'sub_site': params['sub_site'], 'cover_url': addon.image('tvshows.png'), 'backdrop_url': addon.art(), 'type': 3}, {'site': site, 'mode': 'list_favorites', 'title': addon.language(30888, True), 'content': 'episodes', 'sub_site': params['sub_site'], 'cover_url': addon.image('scenes.png'), 'backdrop_url': addon.art(), 'type': 3}] addon.add_items(__item_list_) addon.end_of_directory() elif mode == 'add_favorite': params = AddonDict(common.addon_type()).str_update(params['__params_']) execute = 'INSERT INTO ' + common.fav_db_table + ' (sub_site, content, url, __params_) VALUES (?, ?, ?, ?)' inserted = common.db.execute(execute, (params['sub_site'], params['content'], params['url'], str(params))) if common.to_bool(inserted): if inserted == 1: addon.alert(str(addon.language(30891, True) + ' ' + params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30893, True))) if inserted == 2: addon.alert(str(params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30890, True))) elif mode == 'delete_favorite': params = AddonDict(common.addon_type()).str_update(params['__params_']) execute = 'DELETE FROM ' + common.fav_db_table + ' WHERE sub_site=? AND content=? AND url=?' deleted = common.db.execute(execute, (params['sub_site'], params['content'], params['url'])) if common.to_bool(deleted): addon.alert(str(addon.language(30892, True) + ' ' + params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30894, True))) xbmc.executebuiltin('Container.Refresh') elif mode == 'list_favorites': if params['content'] == 'all': sql_params = (params['sub_site'],) execute = 'SELECT * FROM ' + common.fav_db_table + ' WHERE sub_site=?' else: sql_params = (params['sub_site'], params['content']) execute = 'SELECT * FROM ' + common.fav_db_table + ' WHERE sub_site=? AND content=?' selected = common.db.fetchall(execute, sql_params) item_list = [] if selected: for this_id, site, content, url, params in selected: params = AddonDict(common.addon_type()).str_update(params) params['context'] = 4 item_list.extend([params]) if item_list: addon.add_items(item_list) addon.end_of_directory() elif mode == 'clear_favorites': """ Prompt user for confirmation prior to clearing all favorites / removing favorites table """ if not params['sub_site']: execute = 'DROP TABLE ' + common.fav_db_table sql_params = '' else: execute = 'DELETE FROM ' + common.fav_db_table + ' WHERE sub_site=?' sql_params = (params['sub_site'],) clear_favs = xbmcgui.Dialog().yesno( common.addon_name + ' - ' + addon.language(30895, True), ' ', addon.language(30896, True), nolabel=addon.language(30899, True), yeslabel=addon.language(30898, True)) if common.to_bool(clear_favs): cleared = common.db.execute(execute, sql_params) if common.to_bool(cleared): common.db.execute('VACUUM ' + common.fav_db_table) addon.alert(str(addon.language(30897, True))) xbmc.executebuiltin('Container.Refresh')
def __init__(self, __params_): import re from addon import Addon from addondict import AddonDict as XBMCDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() __site_ = self.__module__ __mode_ = __params_['mode'] __home_url_ = 'http://www.freeomovie.com/' __movies_url_ = __home_url_ + 'category/full-movie/' __scenes_url_ = __home_url_ + 'category/clips/' __search_url_ = __home_url_ + '/?s=' __false_positives_ = ['http://www.freeomovie.com/category/full-movie/', 'http://www.freeomovie.com/category/clips/'] if __mode_ == 'main': __item_list_ = [{'site': __site_, 'mode': 'list', 'title': a.language(30006), 'content': 'movies', 'url': __home_url_, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'list', 'title': a.language(30001), 'content': 'movies', 'url': __movies_url_, 'cover_url': a.image('movies.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'list', 'title': a.language(30002), 'content': 'movies', 'url': __scenes_url_, 'cover_url': a.image('scenes.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies', 'url': __home_url_, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}, {'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}] __item_list_.extend(a.favs_hist_menu(__site_)) __item_list_.extend(a.extended_menu()) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'categories': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'multi-column-taxonomy-list'})) __item_list_ = [] if __soup_: for __item_ in __soup_.findAll('a'): if __item_: if __item_.get('href') not in __false_positives_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title': __item_.string.encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'list': if __params_['content'] == 'search': __item_ = a.search_input() if __item_: __params_['url'] = __search_url_ + __item_ else: exit(1) elif __params_['content'] == 'goto': __last_item_ = re.search('/page/([0-9]+)/', __params_['url']) if __last_item_: __last_item_ = int(__last_item_.group(1)) else: __last_item_ = 10000 __item_ = a.page_input(__last_item_) if __item_: __params_['url'] = re.sub('/page/[0-9]+/', '/page/' + str(__item_) + '/', __params_['url']) else: exit(1) __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'content'})) __item_list_ = [] __params_['mode'] = 'play' __params_['content'] = 'movies' __params_['type'] = 0 __params_['context'] = 0 __params_['duration'] = '7200' if __soup_: __xbmcdict_ = XBMCDict(0).update(__params_) for __item_ in __soup_.findAll('div', {'class': 'postbox'}): if __item_: if __item_.h2.a.get('href') not in __false_positives_: __dict_ = __xbmcdict_.copy() if __scenes_url_ in __params_['url']: __dict_['duration'] = '1500' __dict_['content'] = 'episodes' __dict_['url'] = __item_.h2.a.get('href') __dict_['title'] = __item_.h2.a.get('title').encode('UTF-8') __dict_['tvshowtitle'] = __dict_['title'] __dict_['originaltitle'] = __dict_['title'] __dict_['cover_url'] = a.image(__item_.img.get('src')) __dict_['thumb_url'] = __dict_['cover_url'] __dict_['poster'] = __dict_['cover_url'] __dict_['sub_site'] = __site_ __item_list_.extend([__dict_]) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'wp-pagenavi'})) __last_item_ = False if __soup_: for __item_ in __soup_.findAll('a', href=True): if __item_: if __item_.get('class') == 'previouspostslink': __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3}]) if __item_.get('class') == 'nextpostslink': __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3}]) if __item_.get('class') == 'last': __last_item_ = __item_.get('href') if not __last_item_: try: if not __soup_.find('a', {'class': 'nextpostslink'}): __last_item_ = __soup_.findAll('a', href=True)[-1].get('href') else: __last_item_ = __soup_.findAll('a', href=True)[-2].get('href') except: pass if __last_item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3}]) a.add_items(__item_list_) a.end_of_directory() elif __mode_ == 'play': __html_ = a.get_page(__params_['url']) __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'id': 'countrytabs'})) __item_list_ = [] if __soup_: for __index_, __items_ in enumerate(__soup_.findAll('a', href=True)): __item_ = '' if not __items_.get('id') == 'jpg': __item_ = __items_.get('href') __item_ = re.search('.*myURL\[\]=(.+)$', __item_, re.DOTALL) if __item_: __item_ = re.sub('&tab=[0-9]+', '', __item_.group(1)) __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __dict_['count'] = __index_ __item_list_.extend([__dict_]) if __item_list_: from playback import Playback Playback().choose_sources(__item_list_) else: __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'videosection'})) if __soup_: for __items_ in __soup_.findAll('a', href=True): __item_ = __items_.get('href') __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) for __items_ in __soup_.findAll('iframe', src=True): __item_ = __items_.get('src') __xbmcdict_ = XBMCDict(0).update(__params_) if __item_: __dict_ = __xbmcdict_.copy() __dict_['url'] = __item_ __item_list_.extend([__dict_]) else: a.alert(a.language(30904, True), sound=False) if __item_list_: from playback import Playback Playback().choose_sources(__item_list_) else: a.alert(a.language(30904, True), sound=False) else: a.alert(a.language(30904, True), sound=False)
def __init__(self, params): import re import urllib2 from addon import Addon from addondict import AddonDict from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment a = Addon() site = self.__module__ mode = params['mode'] base_url = 'http://urbanhentai.com' home_url = base_url search_url = base_url + '/?s=' false_positives = ['#'] if mode == 'main': item_list = [{ 'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '', 'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '', 'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3 }, { 'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search', 'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3 }] item_list.extend(a.favs_hist_menu(site)) item_list.extend(a.extended_menu()) a.add_items(item_list) a.end_of_directory() elif mode == 'categories': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'li', {'id': 'menu-item-4538'})) item_list = [] if soup: genre_list = soup.find('ul', {'class': 'sub-menu'}) if genre_list: for item in soup.findAll('a'): if item.get('href') not in false_positives: item_list.extend([{ 'site': site, 'mode': 'list', 'url': item.get('href').replace(' ', '+'), 'content': '', 'title': item.string.encode('UTF-8'), 'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'list': if params.get('content', '') == 'search': item = a.search_input() if item: params['url'] = search_url + item.replace(' ', '+') else: exit(1) elif params.get('content', '') == 'goto': last_item = re.search('/page/([0-9]+)/', params['url']) if last_item: last_item = int(last_item.group(1)) else: last_item = 10000 item = a.page_input(last_item) if item: params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url']).replace(' ', '+') else: exit(1) html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': re.compile('loop-content.*')})) item_list = [] params['mode'] = 'play' params['content'] = 'episodes' params['type'] = 0 params['context'] = 0 params['duration'] = '1500' params['sub_site'] = site if soup: addondict = AddonDict(0).update(params) for item in soup.findAll('div', {'id': re.compile('post-[0-9]+')}): _dict = addondict.copy() clip_link = item.find('a', {'class': 'clip-link'}) if clip_link: url = clip_link.get('href') if not url.startswith('http://'): url = base_url + url _dict['url'] = url try: _dict['title'] = clip_link.get('title').encode( 'UTF-8') except: data = item.find('h2', {'class': 'entry-title'}) if data: _dict['title'] = str(data.a.contents[0]) _dict['tvshowtitle'] = _dict['title'] _dict['originaltitle'] = _dict['title'] img = item.find('img') if img: img = img.get('src') if img.startswith('//'): img = 'http:' + img else: img = '' _dict['cover_url'] = a.image(img) _dict['thumb_url'] = _dict['cover_url'] _dict['poster'] = _dict['cover_url'] item_list.extend([_dict]) pages = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': 'wp-pagenavi'})) if pages: previouspage = pages.find('a', {'class': 'previouspostslink'}) nextpage = pages.find('a', {'class': 'nextpostslink'}) lastpage = pages.find('a', {'class': 'last'}) if previouspage: previouspage = previouspage.get('href').replace( ' ', '+') item_list.extend([{ 'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'], 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if nextpage: nextpage = nextpage.get('href').replace(' ', '+') item_list.extend([{ 'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'], 'title': a.language(30018, True), 'cover_url': a.image('next.png', image), 'backdrop_url': a.art(), 'type': 3 }]) if lastpage: lastpage = lastpage.get('href').replace(' ', '+') item_list.extend([{ 'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto', 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image), 'backdrop_url': a.art(), 'type': 3 }]) a.add_items(item_list) a.end_of_directory() elif mode == 'play': html = a.get_page(params['url']) soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer( 'div', {'class': re.compile('entry-content.*')})) item_list = [] if soup: item = re.search('file\s*:\s*[\'"](.+?)[\'"]', str(soup.contents[0])) if item: item = item.group(1) if base_url in item: try: opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/45.0.2454.101 Safari/537.36') ] opener.addheaders = [('Referer', params['url'])] opener.addheaders = [ ('Accept', 'text/html,application/xhtml+xml,' 'application/xml;q=0.9,image/webp,*/*;q=0.8') ] urllib2.install_opener(opener) item = urllib2.urlopen(item).geturl() except urllib2.HTTPError as e: if item != e.geturl(): item = e.geturl() else: item = None if item: addondict = AddonDict(0).update(params) _dict = addondict.copy() _dict['url'] = item item_list.extend([_dict]) if item_list: from playback import Playback Playback().choose_sources(item_list) else: a.alert(a.language(30904, True), sound=False)