def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r if i[0]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: n = cache.get(self.__get_nonce, 24) query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0])), n) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = json.loads(r) r = [(r[i].get('url'), r[i].get('title'), r[i].get('extra').get('date')) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'}) r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'}) r = dom_parser.parse_dom(r, 'a', req='href') for i in r: title = i[1] if re.search('\*(?:.*?)\*', title) is not None: title = re.sub('\*(?:.*?)\*', '', title) title = cleantitle.get(title) if title in t: return source_utils.strip_domain(i[0]['href']) else: return except: return
def __search(self, search_link, imdb, titles): try: query = search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'}) r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [ i.attrs['href'] for i in r if i and cleantitle.get(i.content) in t ][0] url = source_utils.strip_domain(r) r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href') r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r] r = [i[0] for i in r if i] return url if imdb in r else None except: return
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])}) r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title') r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r] r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return scraper = cfscrape.create_scraper() url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['premiered'], url['season'], url[ 'episode'] = premiered, season, episode try: clean_title = cleantitle.geturl( url['tvshowtitle']) + '-season-%d' % int(season) search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = scraper.get(search_url).content r = client.parseDOM(r, 'div', {'id': 'movie-featured'}) r = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in r] r = [(i[0][0], i[1][0]) for i in r if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)] url = r[0][0] except: pass data = scraper.get(url).content data = client.parseDOM(data, 'div', attrs={'id': 'details'}) data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href')) url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))] return url[0][1] except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0] + ' ' + year))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'figure', attrs={'class': 'pretty-figure'}) r = dom_parser.parse_dom(r, 'figcaption') for i in r: title = client.replaceHTMLCodes(i[0]['title']) title = cleantitle.get(title) if title in t: x = dom_parser.parse_dom(i, 'a', req='href') return source_utils.strip_domain(x[0][0]['href']) return except: return
def __search(self, titles, year): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query, XHR=True) if r and r.startswith('{'): r = '[%s]' % r r = json.loads(r) r = [(i['url'], i['name']) for i in r if 'name' in i and 'url' in i] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})?\)*$', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] url = source_utils.strip_domain(r) url = url.replace('serien/', '') return url except: return
def __search(self, titles, imdb, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query, XHR=True) r = json.loads(r) r = [(i.get('title'), i.get('custom_fields', {})) for i in r.get('posts', [])] r = [(i[0], i[1]) for i in r if i[0] and i[1]] r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']), i[1].get('IMDb-Link', [''])) for i in r if i] r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0])) for i in r if i[0] and i[1] and i[2] and i[3]] r = [ i[1] for i in r if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y) ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, season='0'): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'class': ['products', 'row']}) r = dom_parser.parse_dom( r, 'div', attrs={'class': ['box-product', 'clearfix']}) if int(season) > 0: r = [ i for i in r if dom_parser.parse_dom( i, 'div', attrs={'class': 'episode'}) ] else: r = [ i for i in r if not dom_parser.parse_dom( i, 'div', attrs={'class': 'episode'}) ] r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title-product'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] url = source_utils.strip_domain(r) url = url.replace('-info', '-stream') return url except: return
def __search(self, titles, year, imdb): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]] r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t] if len(r) > 1: for i in r: data = client.request(urlparse.urljoin(self.base_link, i)) data = dom_parser.parse_dom( data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name') data = [ re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data ] data = [d[0] for d in data if len(d) > 0 and d[0] == imdb] if len(data) >= 1: url = i else: url = r[0] if url: return source_utils.strip_domain(url) except: return
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = cache.get(self.__get_json, 12, "series") r = [(i.get('id'), i.get('series')) for i in r] r = [(i[0], i[1], re.findall('(.+?) \((\d{4})\)?', i[1])) for i in r if cleantitle.get(i[1]) in t] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if i[2] in y][0] return 'series/%s/' % r except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: query = self.search_link % urllib.quote_plus(cleantitle.query(tvshowtitle)) result = client.request(query) #tvshowtitle = cleantitle.get(tvshowtitle) t = [tvshowtitle] + source_utils.aliases_to_array(aliases) t = [cleantitle.get(i) for i in set(t) if i] result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result) for i in result: if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0] url = url.encode('utf-8') return url except: return
def __search(self, titles, year, season='0'): try: aj = cache.get(self.__get_ajax_object, 24) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(aj.get('ajax_url'), post={ 'action': aj.get('search'), 'nonce': aj.get('snonce'), 'query': cleantitle.query(titles[0]) }) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href') r = [(i.attrs['href'], i.content) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) in t][0] return source_utils.strip_domain(r) except: return
def __search(self, title): try: t = cleantitle.get(title) r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'suchbegriff': title}) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'ausgabe_1'}, req='href') r = [(i.attrs['href'], i.content) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) == t][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = json.loads(r) r = [(i.get('id'), i.get('value')) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) in t][0] return r except: return
def __search(self, titles, year, season='0'): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'article', attrs={'class': 'shortstory'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'}) r = dom_parser.parse_dom(r, 'h2') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1]), re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, content): try: t = [cleantitle.get(i) for i in set(titles) if i] c = client.request(urlparse.urljoin(self.base_link, self.year_link % int(year)), output='cookie') p = urllib.urlencode({'search': cleantitle.query(titles[0])}) c = client.request(urlparse.urljoin(self.base_link, self.search_link), cookie=c, post=p, output='cookie') r = client.request(urlparse.urljoin(self.base_link, self.type_link % content), cookie=c, post=p) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'}) r = dom_parser.parse_dom(r, 'tr') r = [dom_parser.parse_dom(i, 'td') for i in r] r = [dom_parser.parse_dom(i, 'a', req='href') for i in r] r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r if i] x = [] for i in r: if re.search('(?<=<i>\().*$', i[1]): x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(), re.search('(?<=<i>\().*$', i[1]).group(), i[2])) else: x.append((i[0], i[1], i[1], i[2])) r = [ i[0] for i in x if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] == year ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus(urllib.quote_plus(cleantitle.query(titles[0])))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] post = urllib.urlencode({'movlang_de': '1', 'movlang': ''}) r = client.request(query, post=post) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'table'}) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'PreviewImage'}) for x in r: title = cleantitle.get(x[1]) if title in t: return source_utils.strip_domain(x[0]['href']) return except: return
def __search(self, titles): try: query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'keyword': titles[0]}) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query, post=post) r = json.loads(r) r = r['content'] r = dom_parser.parse_dom(r, 'li') for i in r: title = dom_parser.parse_dom(i[1], 'a', attrs={'class': 'ss-title'}) if cleantitle.get(title[0][1]) in t: return source_utils.strip_domain(title[0][0]['href']) except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'}) r = [(dom_parser.parse_dom(i.content, 'h4', attrs={'class': 'title-list'}), dom_parser.parse_dom(i.content, 'a', attrs={'href': re.compile('.*/year/.*')})) for i in r] r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'), i[1][0].content if i[1] else '0') for i in r if i[0]] r = [(i[0][0].attrs['href'], i[0][0].content, re.sub('<.+?>|</.+?>', '', i[1])) for i in r if i[0] and i[1]] r = [(i[0], i[1], i[2].strip()) for i in r if i[2]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: r = urllib.urlencode({'keyword': titles[0]}) r = client.request(urlparse.urljoin(self.base_link, self.search_link), XHR=True, post=r) if r is None: r = urllib.urlencode({'keyword': cleantitle.query(titles[0])}) r = client.request(urlparse.urljoin(self.base_link, self.search_link), XHR=True, post=r) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = json.loads(r) r = [(i['link'], re.sub('<.+?>|</.+?>', '', i['title'])) for i in r if 'title' in i and 'link' in i] r = [(i[0], i[1], re.findall('(.+?)\s*Movie \d+:.+?$', i[1], re.DOTALL)) for i in r] r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1]) for i in r] r = [(i[0], i[1], re.findall('(.+?) \((\d{4})\)?', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, episode): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0]) + ' ' + str(episode)) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) + str(episode) for i in set(titles) if i] r = client.request(query) r = r.split('</style>')[-1].strip() r = json.loads(r) r = [(i.get('title', {}).get('rendered'), i.get('content', {}).get('rendered')) for i in r] r = [(re.sub('ger (?:sub|dub)', '', i[0], flags=re.I).strip(), i[1]) for i in r if i[0] and i[1]] r = [(i[0], re.findall('(.+?) (\d*)$', i[0]), i[1]) for i in r] r = [ (i[0] if not i[1] else i[1][0][0] + ' ' + str(int(i[1][0][1])), i[2]) for i in r ] r = [ dom_parser.parse_dom(i[1], 'div') for i in r if cleantitle.get(i[0]) in t ] r = [[ x.attrs['href'] for x in dom_parser.parse_dom(i, 'a', req='href') ] + [ x.attrs['src'] for x in dom_parser.parse_dom(i, 'iframe', req='src') ] for i in r] return r[0] except: return
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request( urlparse.urljoin(self.base_link, self.search_link), post=urllib.urlencode({'val': cleantitle.query(titles[0])}), XHR=True) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content, re.findall('\((\d{4})', i.content)) for i in r] r = [(i[0], i[1], i[2][0] if i[2] else '0') for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, content_type): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])), content_type) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'search'}) r = dom_parser.parse_dom(r, 'table') r = dom_parser.parse_dom(r, 'tr', attrs={'class': re.compile('entry\d+')}) r = [(dom_parser.parse_dom(i, 'a'), dom_parser.parse_dom(i, 'img', attrs={'class': 'flag', 'alt': 'de'})) for i in r] r = [i[0] for i in r if i[0] and i[1]] r = [(i[0].attrs['href'], i[0].content) for i in r] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( urllib.quote_plus(cleantitle.query(titles[0])))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'coverBox'}) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'span', attrs={'class': 'name'}) r = dom_parser.parse_dom(r, 'a') title = r[0][1] title = cleantitle.get(title) if title in t: return source_utils.strip_domain(r[0][0]['href']) else: return except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'div', attrs={'class': 'title'}) r = dom_parser.parse_dom(r, 'a', req='href') for i in r: title = client.replaceHTMLCodes(r[0][1]) title = cleantitle.get(title) if title in t: return source_utils.strip_domain(i[0]['href']) return except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.request(url) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0] premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0]) items = dom_parser.parse_dom(result, 'a', attrs={'itemprop':'url'}) url = [i.attrs['href'] for i in items if bool(re.compile('<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' % (season,episode)).search(i.content))][0] url = url.encode('utf-8') return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) try: if not 'tvshowtitle' in data: raise Exception() links = [] f = ['S%02dE%02d' % (int(data['season']), int(data['episode']))] t = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', data['tvshowtitle']) t = t.replace("&", "") q = self.search_link + urllib.quote_plus('%s %s' % (t, f[0])) q = urlparse.urljoin(self.base_link, q) result = client.request(q) print(q) result = json.loads(result) result = result['results'] except: links = result = [] for i in result: try: if not cleantitle.get(t) == cleantitle.get(i['showName']): raise Exception() y = i['release'] y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1] y = y.upper() if not any(x == y for x in f): raise Exception() quality = i['quality'] quality = quality.upper() size = i['size'] size = float(size)/1024 size = '%.2f GB' % size if any(x in quality for x in ['HEVC', 'X265', 'H265']): info = '%s | HEVC' % size else: info = size if '1080P' in quality: quality = '1080p' elif '720P' in quality: quality = 'HD' else: quality = 'SD' url = i['links'] #for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info}) links = [] for x in url.keys(): links.append({'url': url[x], 'quality': quality}) for link in links: try: url = link['url'] quality2 = link['quality'] #url = url[1] #url = link if len(url) > 1: raise Exception() url = url[0].encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostprDict: raise Exception() host = host.encode('utf-8') sources.append({'source': host, 'quality': quality2, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True}) except: pass except: pass return sources except: return sources