def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources url = url + self.video_tab result = client.request(url) rows = client.parseDOM(result, 'ul', attrs={'class': 'player_ul'})[0] rows = client.parseDOM(rows, 'li') for row in rows: try: desc = client.parseDOM(row, 'a')[0] link = client.parseDOM(row, 'a', ret='href')[0] host, lang, info, q = self.get_info_from_desc(desc) sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def search(self, query_bases, options): i = 0 j = 0 result = None for option in options: for query_base in query_bases : q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base+option) q = q.replace(" ", " ").replace(" ", "+") log_utils.log("RLSSCN query : " + q) url = self.search_link % (q) html = requests.get(url) log_utils.log("RLSSCN try test " + str(i) + " - html : " + str(html)) if html.status_code == 200 : log_utils.log("RLSSCN test " + str(i) + " Ok") url = client.parseDOM(html.content, "h2", attrs={"class": "title"}) url = client.parseDOM(url, "a", ret='href') log_utils.log("RLSSCN test " + str(i) + " : " + str(url)) if len(url) > 0: html = requests.get(url[0]) if html.status_code == 200 : return html.content else : log_utils.log("RLSSCN test "+ str(i) + " return code : " + result.status_code + "- next test " + str(i+1)) i += 1 return None
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources result = client.request(urlparse.urljoin(self.base_link, url), redirect=False) section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0] link = client.parseDOM(section, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources spans = client.parseDOM(section, 'span') info = None for span in spans: if span == 'Z lektorem': info = 'Lektor' q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources r = client.request(urlparse.urljoin(self.base_link, url), redirect=False) info = self.get_lang_by_type(client.parseDOM(r, 'title')[0]) r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0] r = client.parseDOM(r, 'script')[0] script = r.split('"')[1] decoded = self.shwp(script) link = client.parseDOM(decoded, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: return sources
def search(self, title, localtitle, year): try: import sys reload(sys) sys.setdefaultencoding('utf8') simply_name = cleantitle.get(localtitle) simply_name2 = cleantitle.get(title) simply_name = cleantitle.query(localtitle).split(' ') simply_name2 = cleantitle.query(title).split(' ') query = self.search_link % urllib.quote_plus( cleantitle.query(localtitle)) url = urlparse.urljoin(self.base_link, query) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'row search-results'}) results = client.parseDOM( result, 'div', attrs={'class': 'item-detail-bigblock title title-bigblock'}) for result in results: movieneourl = client.parseDOM(result, 'a', ret='href')[0] result = client.parseDOM(result, 'a')[0] for word in simply_name: if word in result and year in result: return [ urlparse.urljoin(self.base_link, movieneourl), result ] continue except Exception, e: print str(e) return
def search(self, localtitle, year, search_type): try: simply_name = cleantitle.get(localtitle) query = self.search_link % urllib.quote_plus( cleantitle.query(localtitle)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'div', attrs={'id': search_type}) links = client.parseDOM(result, 'figcaption') names = client.parseDOM(result, 'figcaption', ret='title') urls = [] for i in range(len(names)): name = cleantitle.get(names[i]) url = client.parseDOM(links[i], 'a', ret='href')[0] if (name == simply_name): urls.append(url) if len(urls) == 1: return urls[0] else: return self.findMatchByYear(year, urls) except: return
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title)) r = client.request(url, headers=headers, timeout='15') r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2]) ][0] except: url = None pass if (url == None): url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urlparse.urljoin(self.base_link, '%s/watching.html' % url) return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: t = cleantitle.get(tvshowtitle) q = urllib.quote_plus(cleantitle.query(tvshowtitle)) p = urllib.urlencode({'term': q}) r = client.request(self.search_link, post=p, XHR=True) try: r = json.loads(r) except: r = None r = None if r: r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i] else: r = proxy.request(self.search_link_2 % q, 'tv shows') r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0], i[1], i[2][-1]) for i in r if i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] url = r[0][0] url = proxy.parse(url) url = url.strip('/').split('/')[-1] url = url.encode('utf-8') return url except: failure = traceback.format_exc() log_utils.log('XWatchSeries - Exception: \n' + str(failure)) return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url[ 'episode'] = premiered, season, episode try: clean_title = cleantitle.geturl( url['tvshowtitle']) + '-season-%d' % int(season) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) search_results = self.scraper.get(search_url).content parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'}) parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed] parsed = [ (i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title) ] url = parsed[0][0] except: pass data = self.scraper.get(url).content data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = '%s/serie/%s' % (self.base_link, url) r = proxy.request(url, 'tv shows') r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'}) t = cleantitle.get(title) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r] r = [(i[0], i[1][0].split(' ')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]] r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]] r = [(i[0][0], i[1], i[2]) for i in r if i[0]] url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1] if not url: url = [i for i in r if t == cleantitle.get(i[1])] if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]] if len(url) > 1 or not url: raise Exception() url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: failure = traceback.format_exc() log_utils.log('XWatchSeries - Exception: \n' + str(failure)) return
def links(self, url): urls = [] try: if url is None: return r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'entry'}) r = client.parseDOM(r, 'a', ret='href') r1 = [(i) for i in r if 'money' in i][0] r = client.request(r1) r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0] if 'enter the password' in r: plink = client.parseDOM(r, 'form', ret='action')[0] post = {'post_password': '******', 'Submit': 'Submit'} send_post = client.request(plink, post=post, output='cookie') link = client.request(r1, cookie=send_post) else: link = client.request(r1) link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0] link = client.parseDOM(link, 'a', ret='href') link = [(i.split('=')[-1]) for i in link] for i in link: urls.append(i) return urls except: pass
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title))) r = self.s.get(url, headers=headers).content r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r] results = [] for i in r: try: info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers, timeout='15') y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0] if self.matchAlias(i[1], aliases) and (year == y): url = i[0] break #results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]]) except: url = None pass #try: # r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] # url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0] #except: # url = None # pass if (url == None): url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0] return url except: return
def get_from_main_player(self, result, sources): q = 'SD' if len(sources) == 0 and (len( client.parseDOM(result, 'span', attrs={'class': 'calidad2'})) > 0): q = 'HD' player2 = client.parseDOM(result, 'div', attrs={'id': 'player2'}) links = client.parseDOM(player2, 'iframe', ret='src') player_nav = client.parseDOM(result, 'div', attrs={'class': 'player_nav'}) transl_type = client.parseDOM(player_nav, 'a') result_sources = [] for i in range(0, len(links)): url = links[i] if (self.url_not_on_list(url, sources)): lang, info = self.get_lang_by_type(transl_type[i]) host = url.split("//")[-1].split("/")[0] result_sources.append({ 'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False }) return result_sources
def do_search(self, title, local_title, year, video_type): try: url = urlparse.urljoin(self.base_link, self.search_link) url = url % urllib.quote_plus(cleantitle.query(title)) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) for row in result: row_type = client.parseDOM(row, 'div', attrs={'class': 'typepost'})[0] if row_type != video_type: continue names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0] names = names.split('/') year_found = client.parseDOM(row, 'span', attrs={'class': 'year'}) titles = [cleantitle.get(i) for i in [title, local_title]] if self.name_matches(names, titles, year) and (len(year_found) == 0 or year_found[0] == year): url = client.parseDOM(row, 'a', ret='href')[0] return urlparse.urljoin(self.base_link, url) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return result = client.request(url) # cant user dom parser here because HTML is bugged div is not closed result = re.findall('<ul class="episodios">(.*?)</ul>', result, re.MULTILINE | re.DOTALL) for item in result: season_episodes = re.findall('<li>(.*?)</li>', item, re.MULTILINE | re.DOTALL) for row in season_episodes: s = client.parseDOM(row, 'div', attrs={'class': 'numerando'})[0].split('x') season_found = s[0].strip() episode_found = s[1].strip() if (season_found != season): break if episode_found == episode: return client.parseDOM(row, 'a', ret='href')[0] except: return
def search(self, localtitle, year, search_type): try: url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, redirect=False, post={ 'q': cleantitle.query(localtitle), 'sb': '' }) r = client.parseDOM(r, 'div', attrs={'class': 'small-item'}) local_simple = cleantitle.get(localtitle) for row in r: name_found = client.parseDOM(row, 'a')[1] year_found = name_found[name_found.find("(") + 1:name_found.find(")")] url = client.parseDOM(row, 'a', ret='href')[1] if not search_type in url: continue if cleantitle.get( name_found) == local_simple and year_found == year: return url except: return
def search(self, localtitle, year): try: simply_name = cleantitle.get(localtitle) query = self.search_link % urllib.quote_plus( cleantitle.query(localtitle)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'article') for row in result: a_href = client.parseDOM(row, 'h3')[0] url = client.parseDOM(a_href, 'a', ret='href')[0] name = client.parseDOM(a_href, 'a')[0] name = cleantitle.get(name) year_found = client.parseDOM(row, 'span', attrs={'class': 'dtyear'}) if year_found: year_found = year_found[0] if (name == simply_name and (not year_found or not year or year_found == year)): return url except: return
def get_rows(self, r, search_type): divs = client.parseDOM(r, 'div', attrs={'class': 'col-sm-12'}) for div in divs: header = client.parseDOM(div, 'h2', attrs={'class': 'headline'}) if header and header[0] == search_type: return client.parseDOM(div, 'div', attrs={'class': 'item-block clearfix'})
def findMatchByYear(self, year, urls): for url in urls: result = client.request(url) result = client.parseDOM(result, 'h1')[0] result = client.parseDOM(result, 'a')[0] found_year = result[result.find("(") + 1:result.find(")")] if (found_year == year): return url
def sources(self, url, hostDict, hostprDict): try: sources = [] try: search_url = url['url'] post = url['post'] referer = urlparse.urljoin(self.film_web, post['urlstrony']) result = client.request(search_url, post=post, referer=referer) if not result.startswith('http'): return sources valid, host = source_utils.is_host_valid(result, hostDict) q = source_utils.check_sd_url(result) info = '' if 'lektor' in result: info = 'Lektor' if 'napisy' in result: info = 'Napisy' first_found = { 'source': host, 'quality': '720p', 'language': 'pl', 'url': result, 'info': info, 'direct': False, 'debridonly': False } first_found['info'] = self.get_info_from_others(sources) sources.append(first_found) except: pass search_more_post = url['more'] #search_url = urlparse.urljoin(self.base_link, self.search_more) result = client.request(self.base_link2, post=search_more_post) provider = client.parseDOM(result, 'option', ret='value') links = client.parseDOM(result, 'div', ret='data') wersja = client.parseDOM(result, 'div', attrs={'class': 'wersja'}) #result = dom_parser.parse_dom(result, 'a') counter = 0 for link in links: valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue q = source_utils.check_sd_url(link) sources.append({ 'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': wersja[counter], 'direct': False, 'debridonly': False }) counter += 1 return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) aliases = eval(data['aliases']) headers = {} if 'tvshowtitle' in data: ep = data['episode'] url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep) r = client.request(url, headers=headers, timeout='10', output='geturl') if url == None: url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers) else: url = self.searchMovie(data['title'], data['year'], aliases, headers) if url == None: raise Exception() r = client.request(url, headers=headers, timeout='10') r = client.parseDOM(r, 'div', attrs={'class': 'les-content'}) if 'tvshowtitle' in data: ep = data['episode'] links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data') else: links = client.parseDOM(r, 'a', ret='player-data') for link in links: if '123movieshd' in link or 'seriesonline' in link: r = client.request(link, headers=headers, timeout='10') r = re.findall('(https:.*?redirector.*?)[\'\"]', r) for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass else: try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False}) except: pass return sources except: failure = traceback.format_exc() log_utils.log('Series9 - Exception: \n' + str(failure)) return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources html = self.scraper.get(url).content try: v = re.findall('document.write\(Base64.decode\("(.+?)"\)', html)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] try: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass except: pass parsed = client.parseDOM(html, 'div', {'class': 'server_line'}) parsed = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in parsed] if parsed: for i in parsed: try: host = re.sub('Server|Link\s*\d+', '', i[1]).lower() url = i[0] host = client.replaceHTMLCodes(host) host = host.encode('utf-8') if 'other' in host: continue sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False }) except: pass return sources except: return
def resolve(self, url): try: r = client.request(url, output='extended') url_res = client.parseDOM(r[0], 'a', attrs={'class':'submit'}, ret='href')[0] mycookie = self.crazy_cookie_hash(r[4]) r = client.request(url_res, cookie=mycookie) return client.parseDOM(r, 'iframe', ret='src')[0] except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return txts = 's%02de%02d' % (int(season), int(episode)) result = client.request(url) result = client.parseDOM(result, 'li', attrs={'class': 'episode'}) result = [i for i in result if txts in i][0] url = client.parseDOM(result, 'a', ret='href')[0] url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url is None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) query = '%s S%02dE%02d' % (data['tvshowtitle'], int( data['season']), int(data['episode'])) query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query) url = self.search_link % urllib.quote_plus(query) r = urlparse.urljoin(self.base_link, url) r = client.request(r) r = client.parseDOM(r, 'item') title = client.parseDOM(r, 'title')[0] if hdlr in title: r = re.findall( '<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL) for name, size, url in r: quality, info = source_utils.get_release_quality(name, url) try: size = re.sub('i', '', size) div = 1 if size.endswith(('GB', 'GiB')) else 1024 size = float(re.sub('[^0-9|/.|/,]', '', size)) / div size = '%.2f GB' % size info.append(size) except: pass info = ' | '.join(info) valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True }) return sources except: failure = traceback.format_exc() log_utils.log('SeriesCR - Exception: \n' + str(failure)) return sources
def searchShow(self, title, season, aliases, headers): try: title = cleantitle.normalize(title) search = '%s Season %01d' % (title, int(season)) url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search))) r = self.s.get(url, headers=headers).content r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0] return url except: return
def resolve(self, url): if self.base_link in url: try: r = client.request(url) v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] except: r = client.request(url) r = client.parseDOM(r, 'div', attrs={'class': 'player'}) url = client.parseDOM(r, 'a', ret='href')[0] return url
def episode(self, url, imdb, tvdb, title, premiered, season, episode): url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'li', attrs={'class': 'active'}) for row in r: span_season = client.parseDOM(row, 'span')[0] span_season = span_season.split(' ')[1] if span_season == season: eps = client.parseDOM(row, 'li') for ep in eps: ep_no = client.parseDOM(ep, 'a')[0].split(' ')[1] if ep_no == episode: return client.parseDOM(ep, 'a', ret='href')[0] return None
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) r = proxy.request(url, 'tv shows') links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'}) links = [x for y,x in enumerate(links) if x not in links[:y]] for i in links: try: url = i url = proxy.parse(url) url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0] url = url.decode('base64') url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: failure = traceback.format_exc() log_utils.log('XWatchSeries - Exception: \n' + str(failure)) return sources
def resolve(self, url): if self.base_link in url: url = self.scraper.get(url).content v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0] b64 = base64.b64decode(v) url = client.parseDOM(b64, 'iframe', ret='src')[0] return url