예제 #1
0
    def searchMovie(self, title, year):
        title = cleantitle.normalize(title)
        url = self.search_link % cleantitle.geturl(title)
        r = self.scraper.get(url, params={'link_web': self.base_link}).content
        r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
        r = zip(client.parseDOM(r, 'a', ret='href'),
                client.parseDOM(r, 'a', ret='title'))
        results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
        try:
            r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if cleantitle.get(i[1]) == cleantitle.get(title) and (
                    year == i[2])
            ][0]
        except:
            url = None
            log_utils.log('series9 - Exception: \n' +
                          str(traceback.format_exc()))
            pass

        if (url == None):
            url = [
                i[0] for i in results
                if cleantitle.get(i[1]) == cleantitle.get(title)
            ][0]

        url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
        return url
예제 #2
0
 def __search(self, titles, year):
     try:
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(urlparse.urljoin(self.base_link,
                                             self.search_link),
                            post={'query': titles[0]},
                            XHR=True)
         r = dom_parser.parse_dom(r, 'a', req='href')
         r = [(i.attrs['href'], i.content.split('<br')[0]) for i in r]
         r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
         r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
         r = [(i[0], i[2][0][0] if i[2] else i[1],
               i[2][0][1] if i[2] else '0') for i in r]
         r = [(i[0], re.sub(u'\(с \d+ по \d+ сезон\)', '', i[1]), i[2])
              for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
         return source_utils.strip_domain(r)
     except:
         return
예제 #3
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'v_pict'})

            for i in r:
                title = re.findall('alt="(.+?)"', i[1], re.DOTALL)[0]
                y = re.findall('(\d{4})', title, re.DOTALL)[0]
                title = re.sub('<\w+>|</\w+>', '', title)
                title = cleantitle.get(title)
                title = re.findall('(\w+)', cleantitle.get(title))[0]

                if title in t and year == y:
                    url = re.findall('href="(.+?)"', i[1], re.DOTALL)[0]
                    return source_utils.strip_domain(url)
            return
        except:
            return
예제 #4
0
    def __search(self, titles, year, season='0'):
        try:
            aj = cache.get(self.__get_ajax_object, 24)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(aj.get('ajax_url'), post={'action': aj.get('search'), 'nonce': aj.get('snonce'),
                                                         'query': cleantitle.query(titles[0])})

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #5
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query.lower())
            result = client.request(query, referer=self.base_link)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                      for i in result if i]
            result = [(
                i.attrs['href']
            ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
                re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    i.attrs['title'],
                    flags=re.I))][0]

            url = client.replaceHTMLCodes(result)
            url = url.encode('utf-8')
            return url
        except Exception:
            return
예제 #6
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = self.scraper.get(query).content

            r = dom_parser.parse_dom(r, 'article')
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #7
0
    def do_search(self, search_string, title, localtitle, year, search_type):
        url = urlparse.urljoin(self.base_link, self.search_link)
        r = self.scraper.post(url, data={'search_field': search_string})
        r = client.parseDOM(r, 'div', attrs={'class': 'movies-list-item'})

        local_simple = cleantitle.get(localtitle)
        title_simple = cleantitle.get(title)
        for row in r:
            row = client.parseDOM(row, 'div', attrs={'class': 'opis-list'})[0]
            title_found = client.parseDOM(row, 'div', attrs={'class': 'title'})[0]
            link = client.parseDOM(title_found, 'a', ret='href')[0]
            if not search_type in link:
                continue

            local_found = client.parseDOM(title_found, 'a')[0]
            title_found = client.parseDOM(title_found, 'a', attrs={'class': 'blue'})
            if not title_found or not title_found[0]:
                title_found = local_found
            else:
                title_found = title_found[0]

            local_found = local_found.replace('&nbsp;', '')
            title_found = title_found.replace('&nbsp;', '')
            year_found = client.parseDOM(row, 'p', attrs={'class': 'cates'})
            if year_found:
                year_found = year_found[0][:4]
            title_match = cleantitle.get(local_found) == local_simple or cleantitle.get(title_found) == title_simple
            year_match = (not year_found) or year == year_found

            if title_match and year_match:
                return link
예제 #8
0
	def __search(self, titles, year):
		try:
			url = urlparse.urljoin(self.base_link, self.search_link)
			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
			post = {'story': titles[0], 'years_ot': str(int(year) - 1), 'years_do': str(int(year) + 1)}
			r = client.request(url, post=post, XHR=True)
			if len(r) < 1000:
				url = urlparse.urljoin(self.base_link, self.search_old % urllib.quote_plus(titles[0]))
				r = client.request(url)
			r = r.decode('cp1251').encode('utf-8')
			r = dom_parser.parse_dom(r, 'article')
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'full'})
			r = [(dom_parser.parse_dom(i, 'a', attrs={'itemprop': 'url'}, req='href'),
			      dom_parser.parse_dom(i, 'h3', attrs={'class': 'name'}, req='content'),
			      dom_parser.parse_dom(i, 'div', attrs={'class': 'origin-name'}, req='content'),
			      dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
			r = [(i[0][0].attrs['href'], i[1][0].attrs['content'], i[2][0].attrs['content'],
			      dom_parser.parse_dom(i[3], 'a', attrs={'itemprop': 'copyrightYear'})) for i in r if
			     i[0] and i[1] and i[2]]
			r = [(i[0], i[1], i[2], i[3][0].content) for i in r if i[3]]
			r = [i[0] for i in r if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] in y][0]
			return source_utils.strip_domain(r)
		except:
			return
예제 #9
0
    def search(self, localtitle, year, search_type):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'result-item'})
            for x in result:
                correct_type = client.parseDOM(x,
                                               'span',
                                               attrs={'class': search_type})
                correct_year = client.parseDOM(x,
                                               'span',
                                               attrs={'class':
                                                      'year'})[0] == year
                name = client.parseDOM(x, 'div', attrs={'class': 'title'})[0]
                url = client.parseDOM(name, 'a', ret='href')[0]
                name = cleantitle.get(client.parseDOM(name, 'a')[0])
                if (correct_type and correct_year and name == simply_name):
                    return url

        except:
            return
예제 #10
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         query = self.search_link % urllib.quote_plus(
             cleantitle.query(tvshowtitle))
         for i in range(3):
             result = self.scraper.get(query).content
             if not result is None:
                 break
         t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
         t = [cleantitle.get(i) for i in set(t) if i]
         items = dom_parser.parse_dom(result,
                                      'div',
                                      attrs={'class': 'result'})
         url = None
         for i in items:
             result = re.findall(r'href="([^"]+)">(.*)<', i.content)
             if re.sub('<[^<]+?>', '', cleantitle.get(cleantitle.normalize(result[0][1]))) in t and year in \
               result[0][1]:
                 url = result[0][0]
             if not url is None:
                 break
         url = url.encode('utf-8')
         return url
     except:
         return
예제 #11
0
    def __search(self, titles, year):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query, XHR=True)

            if r and r.startswith('{'): r = '[%s]' % r

            r = json.loads(r)
            r = [(i['url'], i['name']) for i in r
                 if 'name' in i and 'url' in i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})?\)*$', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            url = source_utils.strip_domain(r)
            url = url.replace('serien/', '')
            return url
        except:
            return
예제 #12
0
	def do_search(self, title, localtitle, year, is_movie_search):
		try:
			url = urlparse.urljoin(self.base_link, self.search_link)
			url = url % urllib.quote(title)
			result = client.request(url)
			result = result.decode('utf-8')

			result = client.parseDOM(result, 'ul', attrs={'id': 'resultList2'})
			li_list = []
			for el in result:
				li_list.extend(client.parseDOM(el, 'li'))

			result = [(client.parseDOM(i, 'a', ret='href')[0],
			           client.parseDOM(i, 'div', attrs={'class': 'title'})[0],
			           (client.parseDOM(i, 'div', attrs={'class': 'title_org'}) + [None])[0],
			           client.parseDOM(i, 'div', attrs={'class': 'info'})[0],
			           ) for i in li_list]

			search_type = 'Film' if is_movie_search else 'Serial'
			cleaned_titles = [cleantitle.get(title), cleantitle.get(localtitle)]
			# filter by name
			result = [x for x in result if self.check_titles(cleaned_titles, [x[2], x[1]])]
			# filter by type
			result = [x for x in result if x[3].startswith(search_type)]
			# filter by year
			result = [x for x in result if x[3].endswith(str(year))]

			if len(result) > 0:
				return result[0][0]
			else:
				return

		except:
			return
예제 #13
0
    def search(self, localtitle, year, search_type):
        try:

            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url,
                               redirect=False,
                               post={
                                   'q': cleantitle.query(localtitle),
                                   'sb': ''
                               })
            r = client.parseDOM(r, 'div', attrs={'class': 'small-item'})

            local_simple = cleantitle.get(localtitle)
            for row in r:
                name_found = client.parseDOM(row, 'a')[1]
                year_found = name_found[name_found.find("(") +
                                        1:name_found.find(")")]
                url = client.parseDOM(row, 'a', ret='href')[1]
                if not search_type in url:
                    continue

                if cleantitle.get(
                        name_found) == local_simple and year_found == year:
                    return url
        except:
            return
예제 #14
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall(
                    '<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i,
                    re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return
예제 #15
0
 def __search(self, titles, imdb, year):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(query, XHR=True)
         r = json.loads(r)
         r = [(i.get('title'), i.get('custom_fields', {}))
              for i in r.get('posts', [])]
         r = [(i[0], i[1]) for i in r if i[0] and i[1]]
         r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']),
               i[1].get('IMDb-Link', [''])) for i in r if i]
         r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0]))
              for i in r if i[0] and i[1] and i[2] and i[3]]
         r = [
             i[1] for i in r
             if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y)
         ][0]
         return source_utils.strip_domain(r)
     except:
         return
예제 #16
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if url is None:
				return
			url = urlparse.parse_qs(url)
			url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
			clean_title = cleantitle.geturl(url['tvshowtitle']) + '+s%02d' % int(season)
			search_url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title.replace('-', '+'),
			                                                                   url['year'])))
			search_results = self.scraper.get(search_url, headers={'referer': self.base_link}).content

			not_found = dom_parser.parse_dom(search_results, 'div', {'class': 'not-found'})
			if len(not_found) > 0:
				return

			links = client.parseDOM(search_results, "a", ret="href", attrs={"class": "ml-mask jt"})
			results = []
			for link in links:
				if '%ss%02d' % (cleantitle.get(url['tvshowtitle']), int(season)) in cleantitle.get(link):
					link_results = self.scraper.get(link, headers={'referer': search_url}).content
					r2 = dom_parser.parse_dom(link_results, 'div', {'id': 'ip_episode'})
					r3 = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r2 if i]
					for i in r3[0]:
						if i.content == 'Episode %s' % episode:
							results.append(i.attrs['href'])
			return results
		except:
			return
예제 #17
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (cleantitle.getsearch(titles[0].replace(
             ' ', '%20')))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(
             r,
             'li',
             attrs={'class': 'item everyone-item over_online haveTooltip'})
         for i in r:
             title = client.parseDOM(i, 'a', ret='title')[0]
             url = client.parseDOM(i, 'a', ret='href')[0]
             data = client.request(url)
             y = re.findall('<p><span>Año:</span>(\d{4})', data)[0]
             original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',
                                     data, re.DOTALL)[0]
             original_t, title = cleantitle.get(original_t), cleantitle.get(
                 title)
             if (t in title or t in original_t) and y == year:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
         return
     except:
         return
예제 #18
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'article', attrs={'class': 'shortstory'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'})
            r = dom_parser.parse_dom(r, 'h2')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1]), re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #19
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = self.scraper.get(search_url).content
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = self.scraper.get(url).content
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
    def get_filmdb_data(self, season, episode, title, localtitle, year):
        try:
            import requests

            titles = {localtitle, title}
            for item in titles:
                try:
                    if item == 'Vikings':
                        item = 'Wikingowie'
                    headers = {
                        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                        'Referer': 'http://filmdb.pl/'}
                    url = urlparse.urljoin(self.filmdb, self.filmdb_search)
                    data = {"search_film": item}
                    r = requests.post(url, data=data, headers=headers)
                    result = r.text
                    hrefs = client.parseDOM(result, 'a', ret='href')
                    for href in hrefs:
                        result = self.scraper.get(self.filmdb + str(href)).content
                        fdbtitle = client.parseDOM(result, 'input', ret='value')[0]
                        fdbsubtitle = client.parseDOM(result, 'input', ret='value')[1]
                        fdbyear = client.parseDOM(result, 'input', ret='value')[2]
                        fdbfilmid = client.parseDOM(result, 'input', ret='value')[3]
                        fdbsp = client.parseDOM(result, 'input', ret='value')[4]
                        local_clean = cleantitle.get(localtitle)
                        title_clean = cleantitle.get(title)
                        found_clean = cleantitle.get(fdbsubtitle)
                        if found_clean == '':
                            found_clean = cleantitle.get(fdbtitle)
                        if title_clean == found_clean or local_clean == found_clean:
                            return {'sp': fdbsp, 'filmid': fdbfilmid, 'sezon': season, 'odcinek': episode}
                except:
                    pass
        except Exception as e:
            print(str(e))
예제 #21
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

            for i in r:
                title = i[1]
                if re.search('\*(?:.*?)\*', title) is not None:
                    title = re.sub('\*(?:.*?)\*', '', title)
                title = cleantitle.get(title)
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
                else:
                    return
        except:
            return
예제 #22
0
 def __search(self, titles, year, content):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r,
                             'div',
                             attrs={'class': 'tab-content clearfix'})
         if content == 'movies':
             r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
         else:
             r = client.parseDOM(r, 'div', attrs={'id': 'series'})
         data = dom_parser.parse_dom(r, 'figcaption')
         for i in data:
             title = i[0]['title']
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
             else:
                 url = dom_parser.parse_dom(i, 'a', req='href')
                 data = client.request(url[0][0]['href'])
                 data = re.findall(
                     '<h3>Pelicula.+?">(.+?)\((\d{4})\).+?</a>', data,
                     re.DOTALL)[0]
                 if titles[0] in data[0] and year == data[1]:
                     return source_utils.strip_domain(url[0][0]['href'])
         return
     except:
         return
예제 #23
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            r = self.scraper.get(url, headers={'referer': self.base_link}).content

            r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'itemprop': 'name'}),
                  re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2])
                 for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url:
                url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url:
                url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url:
                raise Exception()

            return url[0][0]
        except:
            return
예제 #24
0
    def __search(self, titles, year):
        try:
            n = cache.get(self.__get_nonce, 24)

            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])), n)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)
            r = json.loads(r)
            r = [(r[i].get('url'), r[i].get('title'),
                  r[i].get('extra').get('date')) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #25
0
 def __search(self, titles, year):
     try:
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(urlparse.urljoin(self.base_link,
                                             self.search_link),
                            post={'query': cleantitle.query(titles[0])})
         r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
         r = dom_parser.parse_dom(r,
                                  'div',
                                  attrs={'class': 've-screen'},
                                  req='title')
         r = [(dom_parser.parse_dom(i, 'a', req='href'),
               i.attrs['title'].split(' - ')[0]) for i in r]
         r = [(i[0][0].attrs['href'], i[1],
               re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
         r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
               i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
         return source_utils.strip_domain(r)
     except:
         return
예제 #26
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = client.request(query)
         r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'})
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'})
         r = [
             (dom_parser.parse_dom(i.content,
                                   'h4',
                                   attrs={'class': 'title-list'}),
              dom_parser.parse_dom(i.content,
                                   'a',
                                   attrs={'href':
                                          re.compile('.*/year/.*')}))
             for i in r
         ]
         r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'),
               i[1][0].content if i[1] else '0') for i in r if i[0]]
         r = [(i[0][0].attrs['href'], i[0][0].content,
               re.sub('<.+?>|</.+?>', '', i[1])) for i in r
              if i[0] and i[1]]
         r = [(i[0], i[1], i[2].strip()) for i in r if i[2]]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year
         ][0]
         return source_utils.strip_domain(r)
     except:
         return
예제 #27
0
	def matchAlias(self, title, aliases):
		try:
			for alias in aliases:
				if cleantitle.get(title) == cleantitle.get(alias['title']):
					return True
		except:
			return False
예제 #28
0
 def __search(self, search_link, imdb, titles):
     try:
         query = search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = self.scraper.get(query).content
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
         r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
         r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
         r = dom_parser.parse_dom(r, 'a', req='href')
         r = [
             i.attrs['href'] for i in r
             if i and cleantitle.get(i.content) in t
         ][0]
         url = source_utils.strip_domain(r)
         r = self.scraper.get(urlparse.urljoin(self.base_link, url)).content
         r = dom_parser.parse_dom(r,
                                  'a',
                                  attrs={'href': re.compile('.*/tt\d+.*')},
                                  req='href')
         r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
         r = [i[0] for i in r if i]
         return url if imdb in r else None
     except:
         return
예제 #29
0
    def __search(self, title, season):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
            r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a'))
                 for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
            r = [
                i[0] for i in r
                if t == cleantitle.get(i[1]) and int(i[2]) == int(season)
            ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
예제 #30
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = urlparse.urljoin(
                self.base_link, (self.search_link % (clean_title, year)))
            search_results = self.scraper.get(search_url,
                                              headers={
                                                  'referer': self.base_link
                                              }).content

            not_found = dom_parser.parse_dom(search_results, 'div',
                                             {'class': 'not-found'})
            if len(not_found) > 0:
                return

            links = client.parseDOM(search_results,
                                    "a",
                                    ret="href",
                                    attrs={"class": "ml-mask jt"})
            results = []
            for link in links:
                if '%s%s' % (cleantitle.get(title),
                             year) in cleantitle.get(link):
                    results.append(link)
            return results
        except:
            return