コード例 #1
0
ファイル: alltube.py プロジェクト: 17Q/modules4all
	def search(self, title, localtitle, year, search_type):
		try:
			titles = []
			titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
			titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))
			cookies = client.request(self.base_link, output='cookie')
			cache.cache_insert('alltube_cookie', cookies)
			for title in titles:
				r = client.request(urljoin(self.base_link, self.search_link),
				                   post={'search': cleantitle.query(title)}, headers={'Cookie': cookies})
				r = self.get_rows(r, search_type)

				for row in r:
					url = client.parseDOM(row, 'a', ret='href')[0]
					names_found = client.parseDOM(row, 'h3')[0]
					if names_found.startswith('Zwiastun') and not title.startswith('Zwiastun'):
						continue
					names_found = names_found.encode('utf-8').split('/')
					names_found = [cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found]
					for name in names_found:
						name = name.replace("  ", " ")
						title = title.replace("  ", " ")
						words = title.split(" ")
						found_year = self.try_read_year(url)
						if self.contains_all_words(name, words) and (not found_year or found_year == year):
							return url
						else:
							continue
					continue
		except:
			return
コード例 #2
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        is_anime = url[3]
        try:
            titles = []
            titles.append(url[0])
            titles.append(url[1])
            try:
                year = url[2]
            except:
                year = ''
            for url_single in titles:
                url_single = cleantitle.normalize(
                    cleantitle.getsearch(url_single))
                words = url_single.split(' ')
                search_url = urlparse.urljoin(
                    self.base_link,
                    self.search_link) % (url_single + " " + year)

                cookies = client.request(self.base_link, output='cookie')
                verifyGet = client.request(self.verify, cookie=cookies)
                cookies = cookies + ";tmvh=" + self.crazy_cookie_hash(
                    verifyGet)
                cache.cache_insert('szukajka_cookie', cookies)

                result = client.request(search_url, cookie=cookies)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'element'})

                for el in result:

                    found_title = str(
                        client.parseDOM(
                            el, 'div',
                            attrs={'class': 'title'})[0]).lower().replace(
                                "_", " ").replace(".", " ").replace("-", " ")
                    if is_anime:
                        numbers = [
                            int(s) for s in found_title.split() if s.isdigit()
                        ]
                        if not int(words[-1]) in numbers:
                            continue
                    if ("zwiastun" or "trailer") in str(found_title).lower():
                        continue
                    if len(words) >= 4 or is_anime:
                        if not self.contains_all_words(found_title, words):
                            continue
                    else:
                        if not self.contains_all_words(
                                found_title, words) or year not in found_title:
                            continue
                    q = 'SD'
                    if self.contains_word(found_title,
                                          '1080p') or self.contains_word(
                                              found_title, 'FHD'):
                        q = '1080p'
                    elif self.contains_word(found_title, '720p'):
                        q = 'HD'

                    link = client.parseDOM(el,
                                           'a',
                                           attrs={'class': 'link'},
                                           ret='href')[0]
                    transl_type = client.parseDOM(el,
                                                  'span',
                                                  attrs={'class':
                                                         'version'})[0]
                    transl_type = transl_type.split(' ')
                    transl_type = transl_type[-1]

                    host = client.parseDOM(el, 'span', attrs={'class':
                                                              'host'})[0]
                    host = host.split(' ')
                    host = host[-1]
                    lang, info = self.get_lang_by_type(transl_type)
                    sources.append({
                        'source': host,
                        'quality': q,
                        'language': lang,
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                    continue
            return sources
        except Exception as e:
            print(str(e))
            return sources