def movie(self, imdb, title, localtitle, aliases, year): try: items = [] clean_title = cleantitle.geturl(title) + '-' + year search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = client.parseDOM(r, 'div', {'class': 'col-sm-12'}) r = client.parseDOM(r, 'div', {'class': 'col-sm-2.+?'}) r1 = client.parseDOM(r, 'h3') r1 = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in r1] y = [re.findall('</i>\s*(\d{4})</span>', i) for i in r] items += [(r1[i], y[i]) for i in range(len(y))] r = [(i[0][0], i[1][0], i[0][1]) for i in items if (cleantitle.get(i[0][1]) == cleantitle.get(title) and i[1][0] == year)] url = r[0][0] return url except Exception: return
def search(self, localtitle, year, search_type): try: #import pydevd #pydevd.settrace(stdoutToServer=True, stderrToServer=True) simply_name = cleantitle.get(localtitle) query = self.search_link % urllib.quote_plus( cleantitle.query(localtitle)) query = urlparse.urljoin(self.base_link, query) result = client.request(query) result = client.parseDOM(result, 'div', attrs={'class': 'result-item'}) for x in result: correct_type = client.parseDOM(x, 'span', attrs={'class': search_type}) correct_year = client.parseDOM(x, 'span', attrs={'class': 'year'})[0] == year name = client.parseDOM(x, 'div', attrs={'class': 'title'})[0] url = client.parseDOM(name, 'a', ret='href')[0] name = cleantitle.get(client.parseDOM(name, 'a')[0]) if (correct_type and correct_year and name == simply_name): return url except: return
def search(self, title, localtitle, year): try: import sys reload(sys) sys.setdefaultencoding('utf8') simply_name = cleantitle.get(localtitle) simply_name2 = cleantitle.get(title) simply_name = cleantitle.query(localtitle).split(' ') simply_name2 = cleantitle.query(title).split(' ') query = self.search_link % urllib.quote_plus( cleantitle.query(localtitle)) url = urlparse.urljoin(self.base_link, query) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'row search-results'}) results = client.parseDOM( result, 'div', attrs={'class': 'item-detail-bigblock title title-bigblock'}) for result in results: movieneourl = client.parseDOM(result, 'a', ret='href')[0] result = client.parseDOM(result, 'a')[0] for word in simply_name: if word in result and year in result: return [ urlparse.urljoin(self.base_link, movieneourl), result ] continue except Exception, e: print str(e) return
def matchAlias(self, title, aliases): try: for alias in aliases: if cleantitle.get(title) == cleantitle.get(alias['title']): return True except: return False
def do_search(self, title, localtitle, year, is_movie_search): try: url = urlparse.urljoin(self.base_link, self.search_link) url = url % urllib.quote(title) result = client.request(url) result = result.decode('utf-8') result = client.parseDOM(result, 'ul', attrs={'id': 'resultList2'}) li_list = [] for el in result: li_list.extend(client.parseDOM(el, 'li')) result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'div', attrs={'class': 'title'})[0], (client.parseDOM(i, 'div', attrs={'class': 'title_org'}) + [None])[0], client.parseDOM(i, 'div', attrs={'class': 'info'})[0], ) for i in li_list] search_type = 'Film' if is_movie_search else 'Serial' cleaned_titles = [cleantitle.get(title), cleantitle.get(localtitle)] # filter by name result = [x for x in result if self.check_titles(cleaned_titles, [x[2], x[1]])] # filter by type result = [x for x in result if x[3].startswith(search_type)] # filter by year result = [x for x in result if x[3].endswith(str(year))] if len(result) > 0: return result[0][0] else: return except : return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'main_body') result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'}) title = cleantitle.get(title) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result] result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]] url = client.replaceHTMLCodes(url[0][0]) url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'} lang = 'http://www.imdb.com/title/%s/' % imdb lang = client.request(lang) lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang) lang = [i for i in lang if 'primary_language' in i] lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang] lang = [i['primary_language'] for i in lang if 'primary_language' in i] lang = langMap[lang[0][0]] q = self.search_link % (lang, urllib.quote_plus(title)) q = urlparse.urljoin(self.base_link, q) t = cleantitle.get(title) r = client.request(q) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r] r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]] r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r] r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]] r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0] url = str(r) return url except: return
def check_titles(self, cleaned_titles, found_titles): test = cleaned_titles[0] == cleantitle.get( found_titles[0]) or cleaned_titles[1] == cleantitle.get( found_titles[1]) or cleaned_titles[0] == cleantitle.get( found_titles[1]) or cleaned_titles[1] == cleantitle.get( found_titles[0]) return test
def search(self, localtitle, year, search_type): try: url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, redirect=False, post={ 'q': cleantitle.query(localtitle), 'sb': '' }) r = client.parseDOM(r, 'div', attrs={'class': 'small-item'}) local_simple = cleantitle.get(localtitle) for row in r: name_found = client.parseDOM(row, 'a')[1] year_found = name_found[name_found.find("(") + 1:name_found.find(")")] url = client.parseDOM(row, 'a', ret='href')[1] if not search_type in url: continue if cleantitle.get( name_found) == local_simple and year_found == year: return url except: return
def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus( cleantitle.getsearch(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = cleantitle.get(titles[0]) scraper = cfscrape.create_scraper() data = scraper.get(query).content #data = client.request(query, referer=self.base_link) data = client.parseDOM(data, 'div', attrs={'class': 'result-item'}) r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'}) r = zip( dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'})) url = [] for i in range(len(r)): title = cleantitle.get(r[i][0][1]) title = re.sub('(\d+p|4k|3d|hd|season\d+)', '', title) y = r[i][1][1] link = r[i][0][0]['href'] if 'season' in title: continue if t == title and y == year: if 'season' in link: url.append(source_utils.strip_domain(link)) print url[0] return url[0] else: url.append(source_utils.strip_domain(link)) return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: tv_maze = tvmaze.tvMaze() tvshowtitle = tv_maze.showLookup('thetvdb', tvdb) tvshowtitle = tvshowtitle['name'] t = cleantitle.get(tvshowtitle) q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(tvshowtitle) r = client.request(q) r = client.parseDOM(r, 'ul', attrs={'class': 'items'}) r = client.parseDOM(r, 'li') r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i)) for i in r] r = [(i[0][0], i[1][0], i[2][-1]) for i in r if i[0] and i[1] and i[2]] r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = self.api_link % cleantitle.geturl(title) r = client.request(url) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) url = [i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1])][0] return url except: return
def do_search(self, search_string, title, localtitle, year, search_type): url = urlparse.urljoin(self.base_link, self.search_link) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3435.0 Safari/537.36', 'Host': 'ekino-tv.pl' } r = client.request("https://ekino-tv.pl/se/search?q=%s" % str.lower(search_string).replace(" ", "+"), headers=headers) r = client.parseDOM(r, 'div', attrs={'class': 'movies-list-item'}) r = [x.encode('utf-8') for x in r] local_simple = cleantitle.get(localtitle) title_simple = cleantitle.get(title) for row in r: row = client.parseDOM(row, 'div', attrs={'class': 'opis-list'})[0] title_found = client.parseDOM(row, 'div', attrs={'class': 'title'})[0] link = client.parseDOM(title_found, 'a', ret='href')[0] if not search_type in link: continue local_found = client.parseDOM( str(title_found).replace("Å ", "ń"), 'a')[0] local_found = local_found.replace(' ', '') local_found = local_found.replace('ENG', '') local_found = local_found.replace('CAM', '') local_found = local_found.replace('HD', '') local_found = local_found.replace('-', '') local_found = local_found.replace(' ', '') title_found = client.parseDOM(title_found, 'a', attrs={'class': 'blue'}) if not title_found or not title_found[0]: title_found = local_found else: title_found = title_found[0] local_found = local_found.replace(' ', '') title_found = title_found.replace(' ', '') year_found = client.parseDOM(row, 'p', attrs={'class': 'cates'}) if year_found: year_found = year_found[0][:4] title_match = cleantitle.get( local_found) == local_simple or cleantitle.get( title_found) == title_simple year_match = (not year_found) or year == year_found if title_match and year_match: return link
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: clean_title = cleantitle.geturl(tvshowtitle) search_url = urlparse.urljoin(self.base_link, self.tv_search_link % clean_title.replace('-', '+')) r = client.request(search_url) r = json.loads(r) url = [(r[i]['url']) for i in r if (cleantitle.get(r[i]['title']) == cleantitle.get(tvshowtitle))] url = source_utils.strip_domain(url[0]) return url except Exception: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: self.basetester() query = urlparse.urljoin(self.base_link, self.search_link %cleantitle.geturl(tvshowtitle).replace('-','+')) result = client.request(query) t = [tvshowtitle] + source_utils.aliases_to_array(aliases) t = [cleantitle.get(i) for i in set(t) if i] result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result) for i in result: if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0] url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, redirect=False, post={'szukaj' :cleantitle.query(localtitle)}) r = client.parseDOM(r, 'div', attrs={'class':'video_info'}) local_simple = cleantitle.get(localtitle) for row in r: name_found = client.parseDOM(row, 'h1')[0] year_found = name_found[name_found.find("(") + 1:name_found.find(")")] if cleantitle.get(name_found) == local_simple and year_found == year: url = client.parseDOM(row, 'a', ret='href')[0] return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) r = client.request(search_url) r = client.parseDOM(r, 'div', {'class': 'result-item'}) r = [(dom_parser2.parse_dom(i, 'a', req='href')[0], re.sub('<.*?>', '' , re.findall('alt=\"(.*?)\"', i)[0]), dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if (cleantitle.get(i[1]) == cleantitle.get(title) and i[2][0].content == year)] url = r[0][0] return url except Exception: return
def sources(self, url, hostDict, locDict): sources = [] req = requests.Session() headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com', 'X-Requested-With': 'XMLHttpRequest'} try: if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] query = urllib.quote_plus(title).lower() result = req.get(self.base_link, headers=headers).text darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0] result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text r = client.parseDOM(result, 'div', attrs={'id':'showList'}) r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0]) r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0] url = r[0] print("INFO - " + url) result = req.get(url, headers=headers).text nonce = re.findall(r"nonce = '(.*?)'", result)[0] tipi = re.findall(r'tipi = (.*?);', result)[0] postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce} result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result) linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue') for i in r: print(str(i)) try: q = source_utils.label_to_quality(i[2]) sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en', 'url': i[0].replace('\\','') + '|' + linkHeaders, 'direct': True, 'debridonly': False}) except: traceback.print_exc() pass for i in sources: print("INFO SOURCES " + str(i)) return sources except: traceback.print_exc() return sources
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: key = urlparse.urljoin(self.base_link, self.key_link) key = proxy.request(key, 'main_body') key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0] query = self.tvsearch_link % (urllib.quote_plus(cleantitle.query(tvshowtitle)), key) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'main_body')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'main_body')) result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'main_body') r = re.findall('(tt\d+)', r) if imdb in r: url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def name_matches(self, names_found, titles, year): for name in names_found: name = name.strip().encode('utf-8') # if ends with year clean_found_title = cleantitle.get(name) # sometimes they add year to title so we need to check thet if clean_found_title in titles: return True return False
def getAlternativTitle(title): try: t = cleantitle.get(title) r = _getAniList('/anime/search/%s' % title) r = [(i.get('title_romaji'), i.get('synonyms', [])) for i in utils.json_loads_as_str(r) if cleantitle.get(i.get('title_english', '')) == t] r = [i[1][0] if i[0] == title and len(i[1]) > 0 else i[0] for i in r] r = [i for i in r if i if i != title][0] return r except: pass
def search(self, title, localtitle, year): try: simply_name = cleantitle.get(localtitle) simply_name2 = cleantitle.get(title) query = self.search_link % urllib.quote_plus( cleantitle.query(localtitle)) url = urlparse.urljoin(self.base_link, query) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Referer': 'https://segos.es/?page=login' } data = { "login": self.user_name, 'password': self.user_pass, 'loguj': '' } url = 'https://segos.es/?page=login' s = requests.Session() s.post('https://segos.es/?page=login', data=data, headers=headers) url = urlparse.urljoin(self.base_link, query) k = s.get(url) result = k.text results = client.parseDOM( result, 'div', attrs={'class': 'col-lg-12 col-md-12 col-xs-12'}) for result in results: segosurl = client.parseDOM(result, 'a', ret='href')[0] result = client.parseDOM(result, 'a') segostitles = cleantitle.get(result[1]).split('/') rok = str(result[1][-5:-1]) for segostitle in segostitles: if (simply_name == segostitle or simply_name2 == segostitle) and year == rok: return urlparse.urljoin(self.base_link, segosurl) continue except Exception, e: print str(e) return
def search_ep(self, titles, season, episode): try: for title in titles: simply_name = cleantitle.get(title) query = self.search_link % str(title).replace(" ", "+") url = urlparse.urljoin(self.base_link, query) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Referer': 'https://segos.es/?page=login' } data = { "login": self.user_name, 'password': self.user_pass, 'loguj': '' } url = 'https://segos.es/?page=login' s = requests.Session() s.post('https://segos.es/?page=login', data=data, headers=headers) url = urlparse.urljoin(self.base_link, query) k = s.get(url) result = k.text results = client.parseDOM( result, 'div', attrs={'class': 'col-lg-12 col-md-12 col-xs-12'}) for result in results: segosurl = client.parseDOM(result, 'a', ret='href')[0] segosurl = segosurl + "&s=%s&o=%s" % (season, episode) result = client.parseDOM(result, 'a') segostitles = cleantitle.get(result[1]).split('/') for segostitle in segostitles: if simply_name == segostitle: link = urlparse.urljoin(self.base_link, segosurl) return link continue except: return
def movie(self, imdb, title, localtitle, aliases, year): try: if (self.user == '' or self.password == ''): raise Exception() t = cleantitle.get(title) u = urlparse.urljoin(self.base_link, self.search_link) p = { 'q': title.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': '' } p = urllib.urlencode(p) r = client.request(u, post=p, XHR=True) r = json.loads(r) r = [ i for i in r if i['meta'].strip().split()[0].lower() == 'movie' ] r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2] r = [(i, urlparse.urljoin(self.base_link, i)) for i in r] r = [(i[0], client.request(i[1])) for i in r] r = [(i[0], i[1]) for i in r if not i[1] == None] r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r] r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r] r = [(i[0], i[1][0]) for i in r if i[1]] r = [i for i in r if year in i[1]] r = r[0][0] url = re.findall('(?://.+?|)(/.+)', r)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = client.request(url) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0] premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0]) items = dom_parser.parse_dom(result, 'a', attrs={'itemprop':'url'}) url = [i.attrs['href'] for i in items if bool(re.compile('<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' % (season,episode)).search(i.content))][0] url = url.encode('utf-8') return url except: return
def do_search(self, title, local_title, year, video_type): try: cookie = '' cookie = client.request(self.base_link, output='cookie', error=True) url = urlparse.urljoin(self.base_link, self.search_link) url = url % urllib.quote_plus(cleantitle.query(title)) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3435.0 Safari/537.36', 'Referer': 'https://cda-hd.co/' } result = client.request(url, headers=headers, cookie=cookie, redirect=False) cookie = '' result = client.parseDOM(result, 'div', attrs={'class': 'item'}) for row in result: row_type = client.parseDOM(row, 'div', attrs={'class': 'typepost'})[0] if row_type != video_type: continue names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0] names = names.split('/') year_found = client.parseDOM(row, 'span', attrs={'class': 'year'}) titles = [cleantitle.get(i) for i in [title, local_title]] if self.name_matches(names, titles, year) and (len(year_found) == 0 or year_found[0] == year): url = client.parseDOM(row, 'a', ret='href')[0] return urlparse.urljoin(self.base_link, url) except: return
def do_search(self, title, local_title, year, video_type): try: url = urlparse.urljoin(self.base_link, self.search_link) url = url % urllib.quote_plus(cleantitle.query(title)) result = client.request(url) result = client.parseDOM(result, 'div', attrs={'class': 'item'}) for row in result: row_type = client.parseDOM(row, 'div', attrs={'class': 'typepost'})[0] if row_type != video_type: continue names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0] names = names.split('/') year_found = client.parseDOM(row, 'span', attrs={'class': 'year'}) titles = [cleantitle.get(i) for i in [title,local_title]] if self.name_matches(names, titles, year) and (len(year_found) == 0 or year_found[0] == year): url = client.parseDOM(row, 'a', ret='href')[0] return urlparse.urljoin(self.base_link, url) except : return
def search(self, title, localtitle, year, search_type): try: title = cleantitle.normalize(cleantitle.getsearch(title + " 3d")) names = [cleantitle.get(i) for i in [title, localtitle]] r = client.request( urlparse.urljoin(self.base_link, self.search_link), post={'search': cleantitle.query(title + " 3d")}) r = self.get_rows(r, search_type) localtitle = cleantitle.normalize( cleantitle.getsearch(localtitle + " 3d")) for row in r: url = client.parseDOM(row, 'a', ret='href')[0] names_found = client.parseDOM(row, 'h3')[0] if names_found.startswith( 'Zwiastun') and not localtitle.startswith('Zwiastun'): continue names_found = names_found.split('/') names_found = [ cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found ] for name in names_found: name = name.replace(" ", " ") title = title.replace(" ", " ") localtitle = localtitle.replace(" ", " ") words = title.split(" ") words2 = localtitle.split(" ") found_year = self.try_read_year(url) if (self.contains_all_wors(name, words) or self.contains_all_wors(name, words2)) and ( not found_year or found_year == year): return url else: continue continue except: return
def getMeta(self, meta): try: poster = meta['poster'] if 'poster' in meta else '0' thumb = meta['thumb'] if 'thumb' in meta else poster if poster == '0': poster = control.addonPoster() return (poster, thumb, meta) except: pass try: if not self.content == 'movie': raise Exception() meta = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["title", "originaltitle", "year", "genre", "studio", "country", "runtime", "rating", "votes", "mpaa", "director", "writer", "plot", "plotoutline", "tagline", "thumbnail", "file"]}, "id": 1}' % (self.year, str(int(self.year)+1), str(int(self.year)-1))) meta = unicode(meta, 'utf-8', errors='ignore') meta = json.loads(meta)['result']['movies'] t = cleantitle.get(self.title) meta = [i for i in meta if self.year == str(i['year']) and (t == cleantitle.get(i['title']) or t == cleantitle.get(i['originaltitle']))][0] for k, v in meta.iteritems(): if type(v) == list: try: meta[k] = str(' / '.join([i.encode('utf-8') for i in v])) except: meta[k] = '' else: try: meta[k] = str(v.encode('utf-8')) except: meta[k] = str(v) if not 'plugin' in control.infoLabel('Container.PluginName'): self.DBID = meta['movieid'] poster = thumb = meta['thumbnail'] return (poster, thumb, meta) except: pass try: if not self.content == 'episode': raise Exception() meta = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["title", "year", "thumbnail", "file"]}, "id": 1}' % (self.year, str(int(self.year)+1), str(int(self.year)-1))) meta = unicode(meta, 'utf-8', errors='ignore') meta = json.loads(meta)['result']['tvshows'] t = cleantitle.get(self.title) meta = [i for i in meta if self.year == str(i['year']) and t == cleantitle.get(i['title'])][0] tvshowid = meta['tvshowid'] ; poster = meta['thumbnail'] meta = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params":{ "tvshowid": %d, "filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["title", "season", "episode", "showtitle", "firstaired", "runtime", "rating", "director", "writer", "plot", "thumbnail", "file"]}, "id": 1}' % (tvshowid, self.season, self.episode)) meta = unicode(meta, 'utf-8', errors='ignore') meta = json.loads(meta)['result']['episodes'][0] for k, v in meta.iteritems(): if type(v) == list: try: meta[k] = str(' / '.join([i.encode('utf-8') for i in v])) except: meta[k] = '' else: try: meta[k] = str(v.encode('utf-8')) except: meta[k] = str(v) if not 'plugin' in control.infoLabel('Container.PluginName'): self.DBID = meta['episodeid'] thumb = meta['thumbnail'] return (poster, thumb, meta) except: pass poster, thumb, meta = '', '', {'title': self.name} return (poster, thumb, meta)
def searchname(r): r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0] r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])] r = [] if r == [] else [i[0] for i in r][0] return r