def __search(self, imdb): try: l = ['1', '15'] r = client.request( urlparse.urljoin(self.base_link, self.search_link % imdb)) r = dom_parser.parse_dom(r, 'table', attrs={'id': 'RsltTableStatic'}) r = dom_parser.parse_dom(r, 'tr') r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'alt': 'language'}, req='src')) for i in r] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].attrs['src']) for i in r if i[0] and i[1]] r = [(i[0], i[1], re.findall('.+?(\d+)\.', i[2])) for i in r] r = [(i[0], i[1], i[2][0] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2])) # german > german/subbed r = [i[0] for i in r if i[2] in l][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])}) r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title') r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r] r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, search_link, imdb, titles): try: query = search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'}) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'}) r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [ i.attrs['href'] for i in r if i and cleantitle.get(i.content) in t ][0] url = source_utils.strip_domain(r) r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href') r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r] r = [i[0] for i in r if i] return url if imdb in r else None except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return query = urlparse.urljoin(self.base_link, url) r = client.request(query) r = dom_parser.parse_dom(r, 'td', attrs={ 'data-title-name': re.compile('Season %02d' % int(season)) }) r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href'] r = client.request(urlparse.urljoin(self.base_link, r)) r = dom_parser.parse_dom(r, 'td', attrs={ 'data-title-name': re.compile('Episode %02d' % int(episode)) }) r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href'] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: n = cache.get(self.__get_nonce, 24) query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0])), n) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = json.loads(r) r = [(r[i].get('url'), r[i].get('title'), r[i].get('extra').get('date')) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, imdb): try: query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]] r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t] if len(r) > 1: for i in r: data = client.request(urlparse.urljoin(self.base_link, i)) data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name') data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data] data = [d[0] for d in data if len(d) > 0 and d[0] == imdb] if len(data) >= 1: url = i else: url = r[0] if url: return source_utils.strip_domain(url) except: return
def __search_movie(self, imdb, year): try: query = urlparse.urljoin(self.base_link, self.search_link % imdb) y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'}) r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href'), dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})) for i in r] r = [(i[0][0].attrs['href'], re.findall('calendar.+?>.+?(\d{4})', ''.join([x.content for x in i[1]]))) for i in r if i[0] and i[1]] r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year r = [i[0] for i in r if i[1] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, imdb, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query, XHR=True) r = json.loads(r) r = [(i.get('title'), i.get('custom_fields', {})) for i in r.get('posts', [])] r = [(i[0], i[1]) for i in r if i[0] and i[1]] r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']), i[1].get('IMDb-Link', [''])) for i in r if i] r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0])) for i in r if i[0] and i[1] and i[2] and i[3]] r = [ i[1] for i in r if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y) ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r if i[0]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, season='0'): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'class': ['products', 'row']}) r = dom_parser.parse_dom(r, 'div', attrs={'class': ['box-product', 'clearfix']}) if int(season) > 0: r = [i for i in r if dom_parser.parse_dom(i, 'div', attrs={'class': 'episode'})] else: r = [i for i in r if not dom_parser.parse_dom(i, 'div', attrs={'class': 'episode'})] r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title-product'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0]for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0] url = source_utils.strip_domain(r) url = url.replace('-info', '-stream') return url except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0] + ' ' + year))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'figure', attrs={'class': 'pretty-figure'}) r = dom_parser.parse_dom(r, 'figcaption') for i in r: title = client.replaceHTMLCodes(i[0]['title']) title = cleantitle.get(title) if title in t: x = dom_parser.parse_dom(i, 'a', req='href') return source_utils.strip_domain(x[0][0]['href']) return except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) tvshowtitle = data['tvshowtitle'] localtvshowtitle = data['localtvshowtitle'] aliases = source_utils.aliases_to_array(eval(data['aliases'])) url = self.__search([localtvshowtitle] + aliases, data['year'], season) if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + aliases, data['year'], season) if not url: return r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'ul', attrs={'class': ['list-inline', 'list-film']}) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content) for i in r if i] r = [(i[0], i[1] if re.compile("^(\d+)$").match(i[1]) else '0') for i in r] r = [i[0] for i in r if int(i[1]) == int(episode)][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'}) r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'}) r = dom_parser.parse_dom(r, 'a', req='href') for i in r: title = i[1] if re.search('\*(?:.*?)\*', title) is not None: title = re.sub('\*(?:.*?)\*', '', title) title = cleantitle.get(title) if title in t: return source_utils.strip_domain(i[0]['href']) else: return except: return
def __search(self, title): try: t = cleantitle.get(title) r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'suchbegriff': title}) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'ausgabe_1'}, req='href') r = [(i.attrs['href'], i.content) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) == t][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(urlparse.urljoin(self.base_link, self.search_link), post=urllib.urlencode({'val': cleantitle.query(titles[0])}), XHR=True) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content, re.findall('\((\d{4})', i.content)) for i in r] r = [(i[0], i[1], i[2][0] if i[2] else '0') for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, season='0'): try: aj = cache.get(self.__get_ajax_object, 24) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(aj.get('ajax_url'), post={ 'action': aj.get('search'), 'nonce': aj.get('snonce'), 'query': cleantitle.query(titles[0]) }) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] return source_utils.strip_domain(r) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return r = client.request(urlparse.urljoin(self.base_link, url)) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'episodes'}) r = dom_parser.parse_dom(r, 'tr', attrs={'class': ['episode', 'season_%s' % season]}) r = dom_parser.parse_dom(r, 'span', attrs={'class': 'normal'}) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'b')) for i in r] r = [(i[0][0].attrs['href'], i[1][0].content) for i in r if i[0] and i[1]] r = [i[0] for i in r if i[1].upper() == 'E%s' % episode] if len(r) >= 1: return source_utils.strip_domain(r[0]) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'article') r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href') r = [(i.attrs['href'], i.content) for i in r] r = [i[0] for i in r if cleantitle.get(i[1]) in t][0] return source_utils.strip_domain(r) except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if not url: return url = urlparse.urljoin(self.base_link, url) url = client.request(url, output='geturl') if season == 1 and episode == 1: season = episode = '' r = client.request(url) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'episodios'}) r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('[^\'"]*%s' % ('-%sx%s' % (season, episode)))})[0].attrs['href'] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, season='0'): try: query = self.search_link % urllib.quote_plus( cleantitle.query(titles[0])) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'article', attrs={'class': 'shortstory'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'}) r = dom_parser.parse_dom(r, 'h2') r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href'], i.content.lower()) for i in r if i] r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1]), re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r] r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r] r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season) ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles, year, content): try: t = [cleantitle.get(i) for i in set(titles) if i] c = client.request(urlparse.urljoin(self.base_link, self.year_link % int(year)), output='cookie') p = urllib.urlencode({'search': cleantitle.query(titles[0])}) c = client.request(urlparse.urljoin(self.base_link, self.search_link), cookie=c, post=p, output='cookie') r = client.request(urlparse.urljoin(self.base_link, self.type_link % content), cookie=c, post=p) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'}) r = dom_parser.parse_dom(r, 'tr') r = [dom_parser.parse_dom(i, 'td') for i in r] r = [dom_parser.parse_dom(i, 'a', req='href') for i in r] r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r if i] x = [] for i in r: if re.search('(?<=<i>\().*$', i[1]): x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(), re.search('(?<=<i>\().*$', i[1]).group(), i[2])) else: x.append((i[0], i[1], i[1], i[2])) r = [ i[0] for i in x if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] == year ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'MovieList'}) r = dom_parser.parse_dom(r, 'li', attrs={'class': 'TPostMv'}) r = dom_parser.parse_dom(r, 'a') for i in r: title = dom_parser.parse_dom(i, 'h2', attrs={'class': 'Title'}) title = cleantitle.get(title[0][1]) if title in t: return source_utils.strip_domain(i[0]['href']) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( urllib.quote_plus(cleantitle.query(titles[0])))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] post = urllib.urlencode({'movlang_de': '1', 'movlang': ''}) r = client.request(query, post=post) r = dom_parser.parse_dom(r, 'table', attrs={'class': 'table'}) r = dom_parser.parse_dom(r, 'a', attrs={'class': 'PreviewImage'}) for x in r: title = cleantitle.get(x[1]) if title in t: return source_utils.strip_domain(x[0]['href']) return except: return
def __search(self, titles): try: query = urlparse.urljoin(self.base_link, self.search_link) post = urllib.urlencode({'keyword': titles[0]}) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query, post=post) r = json.loads(r) r = r['content'] r = dom_parser.parse_dom(r, 'li') for i in r: title = dom_parser.parse_dom(i[1], 'a', attrs={'class': 'ss-title'}) if cleantitle.get(title[0][1]) in t: return source_utils.strip_domain(title[0][0]['href']) except: return
def __search(self, titles, year, content_type): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0])), content_type) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'search'}) r = dom_parser.parse_dom(r, 'table') r = dom_parser.parse_dom(r, 'tr', attrs={'class': re.compile('entry\d+')}) r = [(dom_parser.parse_dom(i, 'a'), dom_parser.parse_dom(i, 'img', attrs={ 'class': 'flag', 'alt': 'de' })) for i in r] r = [i[0] for i in r if i[0] and i[1]] r = [(i[0].attrs['href'], i[0].content) for i in r] r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __search(self, imdb): try: r = client.request( urlparse.urljoin(self.base_link, self.search_link % imdb)) r = dom_parser.parse_dom(r, 'a', req='href') r = [i.attrs['href'] for i in r if i] if len(r) > 1: for i in r: data = client.request(urlparse.urljoin(self.base_link, i)) data = re.compile('(imdbid\s*[=|:]\s*"%s"\s*,)' % imdb, re.DOTALL).findall(data) if len(data) >= 1: url = i else: url = r[0] if url: return source_utils.strip_domain(url) except: return
def __search(self, titles, year): try: r = urllib.urlencode({'keyword': titles[0]}) r = client.request(urlparse.urljoin(self.base_link, self.search_link), XHR=True, post=r) if r is None: r = urllib.urlencode({'keyword': cleantitle.query(titles[0])}) r = client.request(urlparse.urljoin(self.base_link, self.search_link), XHR=True, post=r) t = [cleantitle.get(i) for i in set(titles) if i] y = [ '%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0' ] r = json.loads(r) r = [(i['link'], re.sub('<.+?>|</.+?>', '', i['title'])) for i in r if 'title' in i and 'link' in i] r = [(i[0], i[1], re.findall('(.+?)\s*Movie \d+:.+?$', i[1], re.DOTALL)) for i in r] r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1]) for i in r] r = [(i[0], i[1], re.findall('(.+?) \((\d{4})\)?', i[1])) for i in r] r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
def __get_correct_link(_url, content, checkval): try: if not _url: return _url = urlparse.urljoin(self.base_link, _url) r = client.request(_url) r = re.findall('<h4>%s[^>]*</h4>(.*?)<div' % content, r, re.DOTALL | re.IGNORECASE)[0] r = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(r)) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'span')) for i in r] r = [(i[0][0].attrs['href'], i[1][0].content) for i in r if i[0] and i[1]] r = [(i[0], i[1] if i[1] else '0') for i in r] r = [i[0] for i in r if int(i[1]) == int(checkval)][0] r = re.sub('/(1080p|720p|x264|3d)', '', r, flags=re.I) return source_utils.strip_domain(r) except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'}) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'}) r = [ (dom_parser.parse_dom(i.content, 'h4', attrs={'class': 'title-list'}), dom_parser.parse_dom(i.content, 'a', attrs={'href': re.compile('.*/year/.*')})) for i in r ] r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'), i[1][0].content if i[1] else '0') for i in r if i[0]] r = [(i[0][0].attrs['href'], i[0][0].content, re.sub('<.+?>|</.+?>', '', i[1])) for i in r if i[0] and i[1]] r = [(i[0], i[1], i[2].strip()) for i in r if i[2]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [ i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year ][0] return source_utils.strip_domain(r) except: return
def __search(self, titles): try: query = self.search_link % (urllib.quote_plus( urllib.quote_plus(cleantitle.query(titles[0])))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] r = client.request(query) r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'coverBox'}) r = dom_parser.parse_dom(r, 'li') r = dom_parser.parse_dom(r, 'span', attrs={'class': 'name'}) r = dom_parser.parse_dom(r, 'a') title = r[0][1] title = cleantitle.get(title) if title in t: return source_utils.strip_domain(r[0][0]['href']) else: return except: return