Exemple #1
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			# [BUBBLES]
			tvshowtitle = utils.encodeUnicode(tvshowtitle)
			localtvshowtitle = utils.encodeUnicode(localtvshowtitle)
			aliases = utils.encodeUnicode(aliases)
			# [/BUBBLES]
			url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
			if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases), year)
			return urllib.urlencode({'url': url}) if url else None
		except:
			return
Exemple #2
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			# [BUBBLES]
			title = utils.encodeUnicode(title)
			localtitle = utils.encodeUnicode(localtitle)
			aliases = utils.encodeUnicode(aliases)
			# [/BUBBLES]
			url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
			if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
			return urllib.urlencode({'url': url}) if url else None
		except:
			return
Exemple #3
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			# [BUBBLES]
			tvshowtitle = utils.encodeUnicode(tvshowtitle)
			localtvshowtitle = utils.encodeUnicode(localtvshowtitle)
			aliases = utils.encodeUnicode(aliases)
			# [/BUBBLES]
			url = {'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'aliases': aliases, 'year': year}
			url = urllib.urlencode(url)
			return url
		except:
			return
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			# [BUBBLES]
			tvshowtitle = utils.encodeUnicode(tvshowtitle)
			localtvshowtitle = utils.encodeUnicode(localtvshowtitle)
			aliases = utils.encodeUnicode(aliases)
			# [/BUBBLES]
			url = {'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'aliases': aliases, 'year': year}
			url = urllib.urlencode(url)
			return url
		except:
			return
Exemple #5
0
    def __search(self, titles, year):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(urlparse.urljoin(self.base_link,
                                                self.search_link),
                               post={'query': titles[0]},
                               XHR=True)

            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.split('<br')[0]) for i in r]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if i[2] else i[1],
                  i[2][0][1] if i[2] else '0') for i in r]
            r = [(i[0], re.sub(u'\(с \d+ по \d+ сезон\)', '', i[1]), i[2])
                 for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            # [BUBBLES]
            # r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
            r = [
                i[0] for i in r
                if cleantitle.get(utils.encodeUnicode([1])) in t and i[2] in y
            ][0]
            # [/BUBBLES]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #6
0
	def __search(self, titles, year, season='0'):
		try:
			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

			post = {'story': utils.uni2cp(titles[0]), 'titleonly': 3, 'do': 'search', 'subaction': 'search', 'search_start': 1, 'full_search': 0, 'result_from': 1}
			html = client.request(self.base_link, post=post)

			html = html.decode('cp1251').encode('utf-8')

			r = dom_parser.parse_dom(html, 'div', attrs={'id': re.compile('news-id-\d+')})
			r = [(i.attrs['id'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
			r = [(re.sub('[^\d]+', '', i[0]), dom_parser.parse_dom(i[1], 'img', req='title')) for i in r]
			r = [(i[0], i[1][0].attrs['title'], '') for i in r if i[1]]
			r = [(i[0], i[1], i[2], re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
			r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
			r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1]), i[3]) for i in r]
			r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0', i[3]) for i in r]
			r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
			r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
			# [BUBBLES]
			# r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
			r = [i[0] for i in r if cleantitle.get(utils.encodeUnicode(i[1])) in t and i[2] in y and int(i[3]) == int(season)][0]
			# [/BUBBLES]
			r = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('.*/%s-' % r)}, req='href')[0].attrs['href']

			return source_utils.strip_domain(r)
		except:
			return
	def __search(self, titles, year, season='0'):
		try:
			url = urlparse.urljoin(self.base_link, self.search_link)

			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

			post = {'story': utils.uni2cp(titles[0]), 'titleonly': 3, 'do': 'search', 'subaction': 'search', 'search_start': 1, 'full_search': 0, 'result_from': 1}
			r = client.request(url, post=post)

			r = r.decode('cp1251').encode('utf-8')

			r = dom_parser.parse_dom(r, 'table', attrs={'class': 'eBlock'})
			r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'eTitle'}), dom_parser.parse_dom(i[1], 'a', attrs={'href': re.compile('.*\d+_goda/')})) for i in r]
			r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), [x.content for x in i[1] if re.match('\d{4}', x.content)][0] if i[1] else '0') for i in r if i[0]]
			r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]]
			r = [(i[0], i[1], i[2], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r if i]
			r = [(i[0], i[3][0][0] if i[3] else i[1], i[2]) for i in r]
			r = [(i[0], i[1], i[2], re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
			r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
			r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
			# [BUBBLES]
			# r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
			r = [i[0] for i in r if cleantitle.get(utils.encodeUnicode(i[1])) in t and i[2] in y and int(i[3]) == int(season)][0]
			# [/BUBBLES]

			return source_utils.strip_domain(r)
		except:
			return
Exemple #8
0
	def __search(self, titles, year):
		try:
			url = urlparse.urljoin(self.base_link, self.search_link)

			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

			post = {'story': titles[0], 'years_ot': str(int(year) - 1), 'years_do': str(int(year) + 1)}
			r = client.request(url, post=post, XHR=True)

			r = dom_parser.parse_dom(r, 'article')
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'full'})
			r = [(dom_parser.parse_dom(i, 'a', attrs={'itemprop': 'url'}, req='href'),
				  dom_parser.parse_dom(i, 'h3', attrs={'class': 'name'}, req='content'),
				  dom_parser.parse_dom(i, 'div', attrs={'class': 'origin-name'}, req='content'),
				  dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
			r = [(i[0][0].attrs['href'], i[1][0].attrs['content'], i[2][0].attrs['content'], dom_parser.parse_dom(i[3], 'a', attrs={'itemprop': 'copyrightYear'})) for i in r if i[0] and i[1] and i[2]]
			r = [(i[0], i[1], i[2], i[3][0].content) for i in r if i[3]]
			# [BUBBLES]
			# r = [i[0] for i in r if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] in y][0]
			r = [i[0] for i in r if (cleantitle.get(utils.encodeUnicode(i[1])) in t or cleantitle.get(utils.encodeUnicode(i[2])) in t) and i[3] in y][0]
			# [/BUBBLES]
			

			return source_utils.strip_domain(r)
		except:
			return
Exemple #9
0
    def __search(self, titles, year, season='0'):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            post = {
                'story': utils.uni2cp(titles[0]),
                'titleonly': 3,
                'do': 'search',
                'subaction': 'search',
                'search_start': 1,
                'full_search': 0,
                'result_from': 1
            }
            r = client.request(url, post=post)

            r = r.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'eBlock'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'eTitle'}),
                  dom_parser.parse_dom(
                      i[1], 'a', attrs={'href': re.compile('.*\d+_goda/')}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'),
                  [x.content for x in i[1]
                   if re.match('\d{4}', x.content)][0] if i[1] else '0')
                 for i in r if i[0]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r
                 if i[0]]
            r = [(i[0], i[1], i[2], re.findall('(.+?) \(*(\d{4})', i[1]))
                 for i in r if i]
            r = [(i[0], i[3][0][0] if i[3] else i[1], i[2]) for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            # [BUBBLES]
            # r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
            r = [
                i[0] for i in r
                if cleantitle.get(utils.encodeUnicode(i[1])) in t and i[2] in y
                and int(i[3]) == int(season)
            ][0]
            # [/BUBBLES]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #10
0
	def __search(self, titles, year):
		try:
			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

			r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': titles[0]}, XHR=True)

			r = dom_parser.parse_dom(r, 'a', req='href')
			r = [(i.attrs['href'], i.content.split('<br')[0]) for i in r]
			r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
			r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
			r = [(i[0], i[2][0][0] if i[2] else i[1], i[2][0][1] if i[2] else '0') for i in r]
			r = [(i[0], re.sub(u'\(с \d+ по \d+ сезон\)', '', i[1]), i[2]) for i in r]
			r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
			# [BUBBLES]
			# r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
			r = [i[0] for i in r if cleantitle.get(utils.encodeUnicode([1])) in t and i[2] in y][0]
			# [/BUBBLES]

			return source_utils.strip_domain(r)
		except:
			return