def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) content = oRequest.request() quality = dom_parser.parse_dom(content, 'div', attrs={'class': 'tabformat'}) for quali in quality: if len(quality) > 1: oRequest = cRequestHandler( urlparse.urljoin( self.base_link, dom_parser.parse_dom(quali, 'a')[0].attrs['href'])) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) content = oRequest.request() self.__getRelease(sources, content, hostDict) self.__getRelease(sources, content, hostDict) if len(sources) == 0: raise Exception() return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape, url) return sources
def resolve(self, url): try: h = urlparse.urlparse(url.strip().lower()).netloc oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = r.rsplit('"underplayer"')[0].rsplit("'underplayer'")[0] u = re.findall('\'(.+?)\'', r) + re.findall('\"(.+?)\"', r) u = [client.replaceHTMLCodes(i) for i in u] u = [i for i in u if i.startswith('http') and not h in i] url = u[-1].encode('utf-8') if 'bit.ly' in url: oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) oRequest.request() url = oRequest.getHeaderLocationUrl() elif 'nullrefer.com' in url: url = url.replace('nullrefer.com/?', '') return url except: source_faultlog.logFault(__name__, source_faultlog.tagResolve) return
def __search(self, imdb): try: oRequest = cRequestHandler( urlparse.urljoin(self.base_link, self.search_link % imdb)) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = re.findall(r'linkto\".href=\"(.*?)\"\>', r) url = None if len(r) > 1: for i in r: oRequest = cRequestHandler( urlparse.urljoin(self.base_link, i)) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) data = oRequest.request() data = re.compile('(imdbid\s*[=|:]\s*"%s"\s*,)' % imdb, re.DOTALL).findall(data) if len(data) >= 1: url = i elif len(r) > 0: url = r[0] if url: return source_utils.strip_domain(url) except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, imdb) except: return return
def __search(self, imdb, isMovieSearch): try: oRequest = cRequestHandler(self.base_link) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) sHtmlContent = oRequest.request() pattern = '<meta name="csrf-token" content="([^"]+)">' string = str(sHtmlContent) token = re.compile(pattern, flags=re.I | re.M).findall(string) if len(token) == 0: return #No Entry found? # first iteration of session object to be parsed for search oRequest = cRequestHandler(self.search % imdb) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) oRequest.addHeaderEntry('X-CSRF-TOKEN', token[0]) oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') sHtmlContent = oRequest.request() content = json.loads(sHtmlContent) if isMovieSearch: returnObjects = content["movies"] else: returnObjects = content["series"] return returnObjects except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, imdb) except: return return
def resolve(self, url): try: if 'google' in url: return url url, id, controlId, movieSearch = url oRequest = cRequestHandler(url) content = oRequest.request() token = re.findall("_token':'(.*?)'", content)[0] link = urlparse.urljoin( self.base_link, self.link_url_movie if movieSearch else self.link_url) oRequest = cRequestHandler(link) oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') oRequest.addParameters('_token', token) oRequest.addParameters('PartID', id) oRequest.addParameters('ControlID', controlId) oRequest.setRequestType(1) result = oRequest.request() if 'false' in result: return else: return dom_parser.parse_dom(result, 'iframe')[0].attrs['src'] except: source_faultlog.logFault(__name__, source_faultlog.tagResolve) return
def clearCacheAll(self): control.idle() yes = control.yesnoDialog("Sind Sie sicher?", '', '') if not yes: return cache.cache_clear_all() from resources.lib.modules.handler.requestHandler import cRequestHandler cRequestHandler('dummy').clearCache() control.infoDialog("Vorgang abgeschlossen", sound=True, icon='INFO')
def __getlinks(self, e, h, url, key): try: url = url + '/stream' params = {'e': e, 'h': h, 'lang': 'de', 'q': '', 'grecaptcha': key} oRequest = cRequestHandler(url[:-7]) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() csrf = dom_parser.parse_dom(r, "meta", attrs={"name": "csrf-token" })[0].attrs["content"] oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) oRequest.addHeaderEntry('X-CSRF-TOKEN', csrf) oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') oRequest.addParameters('e', e) oRequest.addParameters('h', h) oRequest.addParameters('lang', 'de') oRequest.addParameters('q', '') oRequest.addParameters('grecaptcha', key) oRequest.setRequestType(1) sHtmlContent = oRequest.request() helper = json.loads(sHtmlContent) mainData = utils.byteify(helper) tmp = mainData.get('d', '') + mainData.get('c', '') + mainData.get( 'iv', '') + mainData.get('f', '') + mainData.get( 'h', '') + mainData.get('b', '') tmp = utils.byteify(json.loads(base64.b64decode(tmp))) salt = unhexlify(tmp['s']) ciphertext = base64.b64decode(tmp['ct'][::-1]) b = base64.b64encode(csrf[::-1]) tmp = utils.cryptoJS_AES_decrypt(ciphertext, b, salt) tmp = utils.byteify(json.loads(base64.b64decode(tmp))) ciphertext = base64.b64decode(tmp['ct'][::-1]) salt = unhexlify(tmp['s']) b = '' a = csrf for idx in range(len(a) - 1, 0, -2): b += a[idx] if mainData.get('e', None): b += '1' else: b += '0' tmp = utils.cryptoJS_AES_decrypt(ciphertext, str(b), salt) return utils.byteify(json.loads(tmp)) except Exception: return
def sources(self, url, hostDict, hostprDict): xbmc.log("logge: sources aufgerufen", xbmc.LOGNOTICE) data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) sUrl = urlparse.urljoin(self.base_link, data.get('url', '')) xbmc.log("logge: sources URL: %s" % sUrl, xbmc.LOGNOTICE) sUrl = urlparse.urljoin(self.base_link, url) xbmc.log('logge: Now searching with url %s' % sUrl, xbmc.LOGNOTICE) hosters = [] xbmc.log("logge: hosters", xbmc.LOGNOTICE) sHtmlContent = cRequestHandler(sUrl).request() xbmc.log("logge: request handler", xbmc.LOGNOTICE) pattern = "</span><a data-id='([\d]+)' " xbmc.log("logge: pattern", xbmc.LOGNOTICE) isMatch, aResult = cParser().parse(sHtmlContent, pattern) xbmc.log("logge: cparser", xbmc.LOGNOTICE) if isMatch: xbmc.log("logge: ismatching", xbmc.LOGNOTICE) for post in aResult: xbmc.log("logge: vor filecrypt", xbmc.LOGNOTICE) oRequest = cRequestHandler(self.base_link + 'wp-admin/admin-ajax.php') oRequest.addParameters('action', 'doo_player_ajax') oRequest.addParameters('post', post) oRequest.addParameters('nume', '1') if 'tvshows' in sUrl: oRequest.addParameters('type', 'tv') else: oRequest.addParameters('type', 'movie') oRequest.setRequestType(1) sHtmlContent = oRequest.request() isMatch, aResult = cParser().parse(sHtmlContent, "src=[^>]([^']+)") xbmc.log("logge: kommst du bis hier?", xbmc.LOGNOTICE) for sUrl in aResult: valid, host = source_utils.is_host_valid(sUrl, hostDict) source = { 'source': '', 'quality': '720p', 'language': 'de', 'url': sUrl, 'direct': False, 'debridonly': False, 'checkquality': False } xbmc.log("logge: source gefunden: %s" % source, xbmc.LOGNOTICE) hosters.append(source) # Ab hier auskommentiert wie du sagtest! # if hosters: # hosters.append('getHosterUrl') return hosters
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data.get('url')) season = data.get('season') episode = data.get('episode') oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() if season and episode: r = dom_parser.parse_dom(r, 'select', attrs={'id': 'SeasonSelection'}, req='rel')[0] r = client.replaceHTMLCodes(r.attrs['rel'])[1:] r = urlparse.parse_qs(r) r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r]) r = urlparse.urljoin(self.base_link, self.get_links_epi % (r['Addr'], r['SeriesID'], season, episode)) oRequest = cRequestHandler(r) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'HosterList'})[0] r = dom_parser.parse_dom(r, 'li', attrs={'id': re.compile('Hoster_\d+')}, req='rel') r = [(client.replaceHTMLCodes(i.attrs['rel']), i.content) for i in r if i[0] and i[1]] r = [(i[0], re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1])) for i in r] r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2]) for i in r if len(i[1]) > 0] for link, hoster, mirrors in r: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue u = urlparse.parse_qs('&id=%s' % link) u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u]) for x in range(0, int(mirrors)): tempLink = self.mirror_link % (u['id'], u['Hoster'], x + 1) if season and episode: tempLink += "&Season=%s&Episode=%s" % (season, episode) try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': tempLink, 'direct': False, 'debridonly': False}) except: pass if len(sources) == 0: raise Exception() return sources except: source_faultlog.logFault(__name__,source_faultlog.tagScrape, url) return sources
def __search(self, titles, year, season='0'): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) titles = [cleantitle.get(i) for i in set(titles) if i] oRequest = cRequestHandler(query) sHtmlContent = oRequest.request() url = urlparse.urljoin(self.base_link, self.search_link_query) token = re.findall(r"token':'(.*?)'}", sHtmlContent)[0] oRequest = cRequestHandler(url) # if sSearchText: # oRequest.addParameters('search', sSearchText) # page = '1' # type = 'Alle' # sortBy = 'latest' oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') oRequest.addParameters('_token', token) oRequest.addParameters('from', 1900) oRequest.addParameters('page', '1') oRequest.addParameters('rating', 0) oRequest.addParameters('sortBy', 'latest') oRequest.addParameters('to', time.strftime("%Y", time.localtime())) oRequest.addParameters('type', 'Alle') oRequest.addParameters('search', titles[0]) oRequest.setRequestType(1) searchResult = oRequest.request() results = re.findall(r'title=\\"(.*?)\\" href=\\"(.*?)" ', searchResult) usedIndex = 0 #Find result with matching name and season for x in range(0, len(results)): title = cleantitle.get(results[x][0]) if any(i in title for i in titles): return source_utils.strip_domain(results[x][1].replace( '\\', '')) usedIndex += 1 return except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] oRequest = cRequestHandler(urlparse.urljoin(self.base_link, query)) r = oRequest.request() pageTitle = dom_parser.parse_dom(r, 'title')[0].content.lower() if "search" not in pageTitle and 'such' not in pageTitle: if len([year in pageTitle and i in pageTitle for i in t]) > 0: return dom_parser.parse_dom(r, 'meta', attrs={'property': 'og:url'})[0].attrs['content'] r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'}) r = [(dom_parser.parse_dom(i.content, 'h4', attrs={'class': 'title-list'}), dom_parser.parse_dom(i.content, 'a', attrs={'href': re.compile('.*/year/.*')})) for i in r] r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'), i[1][0].content if i[1] else '0') for i in r if i[0]] r = [(i[0][0].attrs['href'], i[0][0].content, re.sub('<.+?>|</.+?>', '', i[1])) for i in r if i[0] and i[1]] r = [(i[0], i[1], i[2].strip()) for i in r if i[2]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year][0] return source_utils.strip_domain(r) except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return query = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) content = oRequest.request() links = dom_parser.parse_dom(content, 'div', attrs={'id': 'seasons'}) links = dom_parser.parse_dom(links, 'div', attrs={'class': 'se-c'}) links = [(dom_parser.parse_dom(i, 'span', attrs={'class': 'se-t'})[0].content, dom_parser.parse_dom(i, 'li')) for i in links] links = [i[1] for i in links if season == i[0]][0] links = dom_parser.parse_dom(links, 'div', attrs={'class': 'episodiotitle'}) links = dom_parser.parse_dom(links, 'a') links = [(i.attrs['href'], re.findall("x(\d+)", i.attrs['href'])[0]) for i in links] links = [i[0] for i in links if episode == i[1]] if len(links) > 0: return source_utils.strip_domain(links[0]) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return url = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() if season == 1 and episode == 1: season = episode = '' r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'episodios'}) r = dom_parser.parse_dom( r, 'a', attrs={ 'href': re.compile('[^\'"]*%s' % ('-%sx%s' % (season, episode))) })[0].attrs['href'] return source_utils.strip_domain(r) except: return ""
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus(titles[0])) query = urlparse.urljoin(self.base_link, query) oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() dom_parsed = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'}) links = [(dom_parser.parse_dom(i, 'a')[0], dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})[0].content) for i in dom_parsed] r = sorted(links, key=lambda i: int(i[1]), reverse=True) # with year > no year r = [x[0].attrs['href'] for x in r if int(x[1]) == int(year)] if len(r) > 0: return source_utils.strip_domain(r[0]) return except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return ""
def __search(self, titles, year, season='0'): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) titles = [cleantitle.get(i) for i in set(titles) if i] oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) searchResult = oRequest.request() results = re.findall( r'<div class=\"movie-title\">\n((?s).*?)\"(.*?)\">(.*?)</a>', searchResult) usedIndex = 0 #Find result with matching name and season for x in range(0, len(results)): title = cleantitle.get(results[x][2]) if any(i in title for i in titles): return source_utils.strip_domain(results[x][1]) usedIndex += 1 return except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return
def __search(self, imdb): try: oRequest = cRequestHandler( urlparse.urljoin(self.base_link, self.search_link % imdb)) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = dom_parser.parse_dom(r, 'table', attrs={'id': 'RsltTableStatic'}) r = dom_parser.parse_dom(r, 'tr') r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'alt': 'language'}, req='src')) for i in r] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].attrs['src']) for i in r if i[0] and i[1]] r = [(i[0], i[1], re.findall('.+?(\d+)\.', i[2])) for i in r] r = [(i[0], i[1], i[2][0] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2])) # german > german/subbed r = [i[0] for i in r if i[2] in ['1', '15']] if len(r) > 0: return source_utils.strip_domain(r[0]) return "" except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, imdb) except: return return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url is None: return url = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() seasonMapping = dom_parser.parse_dom(r, 'select', attrs={'name': 'season'}) seasonMapping = dom_parser.parse_dom(seasonMapping, 'option', req='value') seasonIndex = [ i.attrs['value'] for i in seasonMapping if season in i.content ] seasonIndex = int(seasonIndex[0]) - 1 seasons = dom_parser.parse_dom( r, 'div', attrs={'id': re.compile('episodediv.+?')}) seasons = seasons[seasonIndex] episodes = dom_parser.parse_dom(seasons, 'option', req='value') url = [ i.attrs['value'] for i in episodes if episode == re.findall('\d+', i.content)[0] ] if len(url) > 0: return url[0] except: return
def __search(self, title): try: t = cleantitle.get(title) oRequest = cRequestHandler( urlparse.urljoin(self.base_link, self.search_link)) oRequest.setRequestType(1) oRequest.addParameters('suchbegriff', title) r = oRequest.request() r = dom_parser.parse_dom(r, 'a', attrs={'class': 'ausgabe_1'}, req='href') r = [(i.attrs['href'], i.content) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) == t] if len(r) == 0: return return source_utils.strip_domain(r[0]) except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, title) except: return return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources episode = None season = None if isinstance(url, list): season, episode, url = url url = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(url, caching=False) content = oRequest.request() quality = re.findall(r'\<span class=\"film-rip ignore-select\"><a href=\"https://cinemaxx.cc/xfsearch/rip/(.*?)/', content)[0] if "HD" in quality: quality = '1080p' else: quality = 'SD' link = dom_parser.parse_dom(content, 'div', attrs={'id': 'full-video'}) if season: try: link = re.findall("vk.show\(\d+,(.*?)\)", link[0].content)[0] link = re.findall("\[(.*?)\]", link)[int(season)-1] link = re.findall("'(.*?)'", link) link = link[int(episode)-1] valid, hoster = source_utils.is_host_valid(link, hostDict) if valid: sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': '', 'url': link, 'direct': False, 'debridonly': False, 'checkstreams': True}) else: pass except: #we have a tvshow, but no seasons to choose #cinemaxx can host specific seasons, its stated in the url (i.e. http://cinemaxx.cc/serien/743-homeland-7-staffel.html) link = dom_parser.parse_dom(link, 'iframe') link = link[0].attrs['src'] valid, hoster = source_utils.is_host_valid(link, hostDict) if valid: sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': '', 'url': link, 'direct': False, 'debridonly': False, 'checkstreams': True}) else: pass else: link = dom_parser.parse_dom(link, 'iframe') link = link[0].attrs['src'] valid, hoster = source_utils.is_host_valid(link, hostDict) if valid: sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': '', 'url': link, 'direct': False, 'debridonly': False, 'checkstreams': True}) else: pass if len(sources) == 0: raise Exception() return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape, url) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources oRequest = cRequestHandler(urlparse.urljoin(self.base_link, url)) content = oRequest.request() links = dom_parser.parse_dom(content, 'a') links = [ i for i in links if 'href' in i.attrs and url in i.attrs['href'] ] links = [(i.attrs['href'], i.attrs['title'].replace('HD', ''), '720p' if 'HD' in i.attrs['href'] else 'SD') for i in links if 'title' in i.attrs] for link, hoster, quality in links: valid, hoster = source_utils.is_host_valid(hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'info': 'Recaptcha', 'url': link, 'direct': False, 'debridonly': False, 'captcha': True }) return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape) return sources
def __search(self, titles): try: t = [cleantitle.get(i) for i in set(titles) if i] for title in titles: oRequest = cRequestHandler(self.base_link + "/", caching = False) oRequest.addHeaderEntry('Host', 'cinemaxx.cc') oRequest.addHeaderEntry('Referer', 'https://cinemaxx.cc/') oRequest.addParameters('do', 'search') oRequest.addParameters('subaction', 'search') oRequest.addParameters('story', title) oRequest.addParameters('full_search', '0') oRequest.addParameters('search_start', '0') oRequest.addParameters('result_from', '1') oRequest.addParameters('submit', 'submit') oRequest.setRequestType(1) result = oRequest.request() links = dom_parser.parse_dom(result, 'div', attrs={'class': 'shortstory-in'}) links = [dom_parser.parse_dom(i, 'a')[0] for i in links] links = [(i.attrs['href'], i.attrs['title']) for i in links] links = [i[0] for i in links if any(a in cleantitle.get(i[1]) for a in t)] if len(links) > 0: return source_utils.strip_domain(links[0]) return except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return
def resolve(self, url): try: url = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(url, caching=False) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) content = oRequest.request() content = client.request(url) url = dom_parser.parse_dom(content, 'iframe')[0].attrs['src'] recap = recaptcha_app.recaptchaApp() key = recap.getSolutionWithDialog( url, "6LeiZSYUAAAAAI3JZXrRnrsBzAdrZ40PmD57v_fs", self.recapInfo) print "Recaptcha2 Key: " + key response = None if key != "" and "skipped" not in key.lower(): content = client.request(url) s = dom_parser.parse_dom(content, 'input', attrs={'name': 's'})[0].attrs['value'] link = client.request(url + '?t=%s&s=%s' % (key, s), output='geturl') return link elif not response or "skipped" in key.lower(): return except Exception as e: source_faultlog.logFault(__name__, source_faultlog.tagResolve) return
def __search(self, imdb, titles): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'}) r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [ i.attrs['href'] for i in r if i and cleantitle.get(i.content) in t ] if len(r) == 0: return None r = r[0] url = source_utils.strip_domain(r) oRequest = cRequestHandler(urlparse.urljoin(self.base_link, url)) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href') r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r] r = [i[0] for i in r if i] return url if imdb in r else None except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return
def resolve(self, url): try: link = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(link, caching=False) content = oRequest.request() url = oRequest.getRealUrl() return url if self.base_link not in url else None except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = data.get('url') episode = int(data.get('episode', 1)) oRequest = cRequestHandler(urlparse.urljoin(self.base_link, url)) r = oRequest.request() r = { '': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'}) } for info, data in r.iteritems(): data = dom_parser.parse_dom(data, 'tr') data = [ dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)}) ] data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i] data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]] data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data] data = [(i[0], i[1][0]) for i in data if i[1]] for link, hoster in data: valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False }) return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape, url) return sources
def __search(self, isSerieSearch, titles, isTitleClean): try: t = [cleantitle.get(i) for i in set(titles) if i] if isTitleClean: t = [ cleantitle.get(self.titleclean(i)) for i in set(titles) if i ] for title in titles: if isTitleClean: title = self.titleclean(title) query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href') r = [(i.attrs['href'], i.content) for i in r] if len(r) > 0: if isSerieSearch: r = [ i[0] for i in r if cleantitle.get(i[1]) in t and not isSerieSearch or cleantitle.get(re.findall('(.*?)S\d', i[1])[0]) and isSerieSearch ] else: r = [ i[0] for i in r if cleantitle.get(i[1]) in t and not isSerieSearch ] if len(r) > 0: url = source_utils.strip_domain(r[0]) return url return except: try: source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0]) except: return return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources query = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(query) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() quality = dom_parser.parse_dom( r, 'span', attrs={'id': 'release_text'})[0].content.split(' ')[0] quality, info = source_utils.get_release_quality(quality) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'}) r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), re.findall(r' data-player-url="(.*?)">', i.content)) for i in r] r = [(re.sub('\shd', '', i[0][0].content.lower()), i[1][0]) for i in r if i[0] and i[1]] for hoster, id in r: if 'verystream' in hoster: sources = hdgo.getStreams(id, sources) else: valid, hoster = source_utils.is_host_valid( hoster, hostDict) if not valid: continue sources.append({ 'source': hoster, 'quality': quality, 'language': 'de', 'info': '', 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True }) if len(sources) == 0: raise Exception() return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape, url) return sources
def resolve(self, url): try: if 'kinoger' in url: oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) request = oRequest.request() pattern = 'src: "(.*?)"' request = re.compile(pattern, re.DOTALL).findall(request) return request[0] + '|Referer=' + url return url except: source_faultlog.logFault(__name__, source_faultlog.tagResolve) return url
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) r = oRequest.request() r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue link = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] link = client.replaceHTMLCodes(link) link = urlparse.urljoin(self.base_link, link) link = link.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) except: pass if len(sources) == 0: raise Exception() return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape, url) return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) url = urlparse.urljoin(self.base_link, data['url']) season = data.get('season') episode = data.get('episode') if season and episode: url = url + '/staffel-%s/episode-%s' % (season, episode) oRequest = cRequestHandler(url) oRequest.removeBreakLines(False) oRequest.removeNewLines(False) moviecontent = oRequest.request() r = re.findall('''({".*?"}).*?class=\"(.*?)linkbutton''', moviecontent) for link, quli in r: link = self.decrypt(link) valid, host = source_utils.is_host_valid(link, hostDict) if not valid: continue if quli == "hd": quli = '720p' else: quli = 'SD' sources.append({ 'source': host, 'quality': quli, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False }) if len(sources) == 0: raise Exception() return sources except: source_faultlog.logFault(__name__, source_faultlog.tagScrape, url) return sources