コード例 #1
0
 def __search(self, titles, year, content):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r,
                             'div',
                             attrs={'class': 'tab-content clearfix'})
         if content == 'movies':
             r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
         else:
             r = client.parseDOM(r, 'div', attrs={'id': 'series'})
         data = dom_parser.parse_dom(r, 'figcaption')
         for i in data:
             title = i[0]['title']
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
             else:
                 url = dom_parser.parse_dom(i, 'a', req='href')
                 data = client.request(url[0][0]['href'])
                 data = re.findall(
                     '<h3>Pelicula.+?">(.+?)\((\d{4})\).+?</a>', data,
                     re.DOTALL)[0]
                 if titles[0] in data[0] and year == data[1]:
                     return source_utils.strip_domain(url[0][0]['href'])
         return
     except:
         return
コード例 #2
0
 def __search(self, search_link, imdb, titles):
     try:
         query = search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = self.scraper.get(query).content
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
         r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
         r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
         r = dom_parser.parse_dom(r, 'a', req='href')
         r = [
             i.attrs['href'] for i in r
             if i and cleantitle.get(i.content) in t
         ][0]
         url = source_utils.strip_domain(r)
         r = self.scraper.get(urlparse.urljoin(self.base_link, url)).content
         r = dom_parser.parse_dom(r,
                                  'a',
                                  attrs={'href': re.compile('.*/tt\d+.*')},
                                  req='href')
         r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
         r = [i[0] for i in r if i]
         return url if imdb in r else None
     except:
         return
コード例 #3
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = client.request(query)
         r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'})
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'})
         r = [
             (dom_parser.parse_dom(i.content,
                                   'h4',
                                   attrs={'class': 'title-list'}),
              dom_parser.parse_dom(i.content,
                                   'a',
                                   attrs={'href':
                                          re.compile('.*/year/.*')}))
             for i in r
         ]
         r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'),
               i[1][0].content if i[1] else '0') for i in r if i[0]]
         r = [(i[0][0].attrs['href'], i[0][0].content,
               re.sub('<.+?>|</.+?>', '', i[1])) for i in r
              if i[0] and i[1]]
         r = [(i[0], i[1], i[2].strip()) for i in r if i[2]]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year
         ][0]
         return source_utils.strip_domain(r)
     except:
         return
コード例 #4
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

            for i in r:
                title = i[1]
                if re.search('\*(?:.*?)\*', title) is not None:
                    title = re.sub('\*(?:.*?)\*', '', title)
                title = cleantitle.get(title)
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
                else:
                    return
        except:
            return
コード例 #5
0
    def __search(self, titles, year):
        try:
            n = cache.get(self.__get_nonce, 24)

            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])), n)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)
            r = json.loads(r)
            r = [(r[i].get('url'), r[i].get('title'),
                  r[i].get('extra').get('date')) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #6
0
	def __search(self, titles, year):
		try:
			url = urlparse.urljoin(self.base_link, self.search_link)
			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
			post = {'story': titles[0], 'years_ot': str(int(year) - 1), 'years_do': str(int(year) + 1)}
			r = client.request(url, post=post, XHR=True)
			if len(r) < 1000:
				url = urlparse.urljoin(self.base_link, self.search_old % urllib.quote_plus(titles[0]))
				r = client.request(url)
			r = r.decode('cp1251').encode('utf-8')
			r = dom_parser.parse_dom(r, 'article')
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'full'})
			r = [(dom_parser.parse_dom(i, 'a', attrs={'itemprop': 'url'}, req='href'),
			      dom_parser.parse_dom(i, 'h3', attrs={'class': 'name'}, req='content'),
			      dom_parser.parse_dom(i, 'div', attrs={'class': 'origin-name'}, req='content'),
			      dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
			r = [(i[0][0].attrs['href'], i[1][0].attrs['content'], i[2][0].attrs['content'],
			      dom_parser.parse_dom(i[3], 'a', attrs={'itemprop': 'copyrightYear'})) for i in r if
			     i[0] and i[1] and i[2]]
			r = [(i[0], i[1], i[2], i[3][0].content) for i in r if i[3]]
			r = [i[0] for i in r if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] in y][0]
			return source_utils.strip_domain(r)
		except:
			return
コード例 #7
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'article', attrs={'class': 'shortstory'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'})
            r = dom_parser.parse_dom(r, 'h2')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1]), re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #8
0
            def __get_correct_link(_url, content, checkval):
                try:
                    if not _url:
                        return

                    _url = urlparse.urljoin(self.base_link, _url)
                    r = client.request(_url)

                    r = re.findall('<h4>%s[^>]*</h4>(.*?)<div' % content, r,
                                   re.DOTALL | re.IGNORECASE)[0]
                    r = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(r))
                    r = [(dom_parser.parse_dom(i, 'a', req='href'),
                          dom_parser.parse_dom(i, 'span')) for i in r]
                    r = [(i[0][0].attrs['href'], i[1][0].content) for i in r
                         if i[0] and i[1]]
                    r = [(i[0], i[1] if i[1] else '0') for i in r]
                    r = [i[0] for i in r if int(i[1]) == int(checkval)][0]
                    r = re.sub('/(2160p|1080p|720p|x264|3d)',
                               '',
                               r,
                               flags=re.I)

                    return source_utils.strip_domain(r)
                except:
                    return
コード例 #9
0
    def __search(self, titles, year):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query, XHR=True)

            if r and r.startswith('{'): r = '[%s]' % r

            r = json.loads(r)
            r = [(i['url'], i['name']) for i in r
                 if 'name' in i and 'url' in i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})?\)*$', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            url = source_utils.strip_domain(r)
            url = url.replace('serien/', '')
            return url
        except:
            return
コード例 #10
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall(
                    '<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i,
                    re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return
コード例 #11
0
 def __search(self, titles, year):
     try:
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(urlparse.urljoin(self.base_link,
                                             self.search_link),
                            post={'query': cleantitle.query(titles[0])})
         r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
         r = dom_parser.parse_dom(r,
                                  'div',
                                  attrs={'class': 've-screen'},
                                  req='title')
         r = [(dom_parser.parse_dom(i, 'a', req='href'),
               i.attrs['title'].split(' - ')[0]) for i in r]
         r = [(i[0][0].attrs['href'], i[1],
               re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
         r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
               i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
         return source_utils.strip_domain(r)
     except:
         return
コード例 #12
0
 def __search(self, titles, year):
     try:
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(urlparse.urljoin(self.base_link,
                                             self.search_link),
                            post={'query': titles[0]},
                            XHR=True)
         r = dom_parser.parse_dom(r, 'a', req='href')
         r = [(i.attrs['href'], i.content.split('<br')[0]) for i in r]
         r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1])) for i in r]
         r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
         r = [(i[0], i[2][0][0] if i[2] else i[1],
               i[2][0][1] if i[2] else '0') for i in r]
         r = [(i[0], re.sub(u'\(с \d+ по \d+ сезон\)', '', i[1]), i[2])
              for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
         return source_utils.strip_domain(r)
     except:
         return
コード例 #13
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         query = urlparse.urljoin(self.base_link, url)
         r = self.scraper.get(query).content
         r = dom_parser.parse_dom(r,
                                  'td',
                                  attrs={
                                      'data-title-name':
                                      re.compile('Season %02d' %
                                                 int(season))
                                  })
         r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']
         r = self.scraper.get(urlparse.urljoin(self.base_link, r)).content
         r = dom_parser.parse_dom(r,
                                  'td',
                                  attrs={
                                      'data-title-name':
                                      re.compile('Episode %02d' %
                                                 int(episode))
                                  })
         r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']
         return source_utils.strip_domain(r)
     except:
         return
コード例 #14
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'details'})

            for i in r:
                title = client.parseDOM(i, 'div', attrs={'class': 'title'})[0]
                y = client.parseDOM(i, 'span', attrs={'class': 'year'})[0]
                title = re.findall('">(.+?)</a', title, re.DOTALL)[0]
                title = cleantitle.get_simple(title)

                if t in title and y == year:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
コード例 #15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            tvshowtitle = data['tvshowtitle']
            localtvshowtitle = data['localtvshowtitle']
            aliases = source_utils.aliases_to_array(eval(data['aliases']))
            year = data['year']

            url = self.__search([localtvshowtitle] +
                                source_utils.aliases_to_array(aliases), year,
                                season)
            if not url and tvshowtitle != localtvshowtitle:
                url = self.__search([tvshowtitle] +
                                    source_utils.aliases_to_array(aliases),
                                    year, season)

            if url:
                return urllib.urlencode({
                    'url': source_utils.strip_domain(url),
                    'episode': episode
                })
        except:
            return
コード例 #16
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'v_pict'})

            for i in r:
                title = re.findall('alt="(.+?)"', i[1], re.DOTALL)[0]
                y = re.findall('(\d{4})', title, re.DOTALL)[0]
                title = re.sub('<\w+>|</\w+>', '', title)
                title = cleantitle.get(title)
                title = re.findall('(\w+)', cleantitle.get(title))[0]

                if title in t and year == y:
                    url = re.findall('href="(.+?)"', i[1], re.DOTALL)[0]
                    return source_utils.strip_domain(url)
            return
        except:
            return
コード例 #17
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (cleantitle.getsearch(titles[0].replace(
             ' ', '%20')))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(
             r,
             'li',
             attrs={'class': 'item everyone-item over_online haveTooltip'})
         for i in r:
             title = client.parseDOM(i, 'a', ret='title')[0]
             url = client.parseDOM(i, 'a', ret='href')[0]
             data = client.request(url)
             y = re.findall('<p><span>Año:</span>(\d{4})', data)[0]
             original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',
                                     data, re.DOTALL)[0]
             original_t, title = cleantitle.get(original_t), cleantitle.get(
                 title)
             if (t in title or t in original_t) and y == year:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
         return
     except:
         return
コード例 #18
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = self.scraper.get(query).content

            r = dom_parser.parse_dom(r, 'article')
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #19
0
    def __search_movie(self, imdb, year):
        try:
            query = urlparse.urljoin(self.base_link, self.search_link % imdb)

            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'ml-item-content'})
            r = [(dom_parser.parse_dom(i,
                                       'a',
                                       attrs={'class': 'ml-image'},
                                       req='href'),
                  dom_parser.parse_dom(i, 'ul', attrs={'class':
                                                       'item-params'}))
                 for i in r]
            r = [(i[0][0].attrs['href'],
                  re.findall('calendar.+?>.+?(\d{4})',
                             ''.join([x.content for x in i[1]]))) for i in r
                 if i[0] and i[1]]
            r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[1]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if i[1] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #20
0
 def __search(self, titles, imdb, year):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(query, XHR=True)
         r = json.loads(r)
         r = [(i.get('title'), i.get('custom_fields', {}))
              for i in r.get('posts', [])]
         r = [(i[0], i[1]) for i in r if i[0] and i[1]]
         r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']),
               i[1].get('IMDb-Link', [''])) for i in r if i]
         r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0]))
              for i in r if i[0] and i[1] and i[2] and i[3]]
         r = [
             i[1] for i in r
             if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y)
         ][0]
         return source_utils.strip_domain(r)
     except:
         return
コード例 #21
0
    def __search(self, titles, year, season='0'):
        try:
            aj = cache.get(self.__get_ajax_object, 24)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(aj.get('ajax_url'), post={'action': aj.get('search'), 'nonce': aj.get('snonce'),
                                                         'query': cleantitle.query(titles[0])})

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #22
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            tvshowtitle = data['tvshowtitle']
            localtvshowtitle = data['localtvshowtitle']
            aliases = source_utils.aliases_to_array(eval(data['aliases']))

            url = self.__search([localtvshowtitle] + aliases, data['year'],
                                season)
            if not url and tvshowtitle != localtvshowtitle:
                url = self.__search([tvshowtitle] + aliases, data['year'],
                                    season)
            if not url: return

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(
                r, 'ul', attrs={'class': ['list-inline', 'list-film']})
            r = dom_parser.parse_dom(r, 'li')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content) for i in r if i]
            r = [(i[0], i[1] if re.compile("^(\d+)$").match(i[1]) else '0')
                 for i in r]
            r = [i[0] for i in r if int(i[1]) == int(episode)][0]

            return source_utils.strip_domain(r)
        except:
            return
コード例 #23
0
ファイル: sezonlukdizi.py プロジェクト: csu-xiao-an/LilacTV
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            url = '%s%01d-sezon-%01d-bolum.html' % (url.replace(
                '.html', ''), int(season), int(episode))
            return source_utils.strip_domain(url)
        except BaseException:
            return []
コード例 #24
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r,
                                     'ul',
                                     attrs={'class': ['products', 'row']})
            r = dom_parser.parse_dom(
                r, 'div', attrs={'class': ['box-product', 'clearfix']})
            if int(season) > 0:
                r = [
                    i for i in r if dom_parser.parse_dom(
                        i, 'div', attrs={'class': 'episode'})
                ]
            else:
                r = [
                    i for i in r if not dom_parser.parse_dom(
                        i, 'div', attrs={'class': 'episode'})
                ]
            r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title-product'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2],
                  '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [
                i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
                and int(i[3]) == int(season)
            ][0]

            url = source_utils.strip_domain(r)
            url = url.replace('-info', '-stream')
            return url
        except:
            return
コード例 #25
0
	def __search(self, titles):
		try:
			query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
			query = urlparse.urljoin(self.base_link, query)
			t = [cleantitle.get(i) for i in set(titles) if i]
			r = client.request(query, XHR=True)
			r = json.loads(r)
			r = [(i.get('url'), i.get('name')) for i in r]
			r = [(i[0]) for i in r if cleantitle.get(i[1]) in t][0]
			return source_utils.strip_domain(r)
		except:
			return
コード例 #26
0
ファイル: sezonlukdizi.py プロジェクト: csu-xiao-an/LilacTV
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
            t = [cleantitle.get(i) for i in set(t) if i]

            url = [
                i[0] for i in self.sezonlukdizi_tvcache()
                if cleantitle.get(i[1]) in t
            ][0]
            return source_utils.strip_domain(url)
        except BaseException:
            return
コード例 #27
0
    def __search(self, titles, year, imdb):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}),
                  dom_parser.parse_dom(i, 'div', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']),
                  re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r
                 if i[0] and i[1]]
            r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t]

            if len(r) > 1:
                for i in r:
                    data = client.request(urlparse.urljoin(self.base_link, i))
                    data = dom_parser.parse_dom(
                        data,
                        'a',
                        attrs={'name': re.compile('.*/tt\d+.*')},
                        req='name')
                    data = [
                        re.findall('.+?(tt\d+).*?', d.attrs['name'])
                        for d in data
                    ]
                    data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]

                    if len(data) >= 1:
                        url = i
            else:
                url = r[0]

            if url:
                return source_utils.strip_domain(url)
        except:
            return
コード例 #28
0
	def __get_episode_link(self, url, episode='1'):
		try:
			if not url:
				return
			url = urlparse.urljoin(self.base_link, url)
			r = client.request(url)
			r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'all-episode'})
			r = dom_parser.parse_dom(r, 'li')
			r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*-episode-%s\.\w+.*?' % episode)}, req='href')[
				0].attrs['href']
			return source_utils.strip_domain(r)
		except:
			return
コード例 #29
0
 def __search(self, titles):
     try:
         query = self.search_link % (urllib.quote_plus(titles[0]))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = self.scraper.get(query).content
         r = dom_parser.parse_dom(r, 'article')
         r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href')
         r = [(i.attrs['href'], i.content) for i in r]
         r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]
         return source_utils.strip_domain(r)
     except:
         return
コード例 #30
0
 def __search(self, titles, year, season='0'):
     try:
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         post = {
             'story': utils.uni2cp(titles[0]),
             'titleonly': 3,
             'do': 'search',
             'subaction': 'search',
             'search_start': 1,
             'full_search': 0,
             'result_from': 1
         }
         html = client.request(self.base_link, post=post)
         html = html.decode('cp1251').encode('utf-8')
         r = dom_parser.parse_dom(html,
                                  'div',
                                  attrs={'id': re.compile('news-id-\d+')})
         r = [(i.attrs['id'], dom_parser.parse_dom(i, 'a', req='href'))
              for i in r]
         r = [(re.sub('[^\d]+', '',
                      i[0]), dom_parser.parse_dom(i[1], 'img', req='title'))
              for i in r]
         r = [(i[0], i[1][0].attrs['title'], '') for i in r if i[1]]
         r = [(i[0], i[1], i[2],
               re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
         r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
               i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
         r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1]), i[3])
              for i in r]
         r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
               i[2][0][1] if len(i[2]) > 0 else '0', i[3]) for i in r]
         r = [(i[0], i[1], i[2],
               '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
             and int(i[3]) == int(season)
         ][0]
         r = dom_parser.parse_dom(html,
                                  'a',
                                  attrs={'href': re.compile('.*/%s-' % r)},
                                  req='href')[0].attrs['href']
         return source_utils.strip_domain(r)
     except:
         return