def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0] + ' ' + year)))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r,
                                     'figure',
                                     attrs={'class': 'pretty-figure'})
            r = dom_parser.parse_dom(r, 'figcaption')

            for i in r:
                title = client.replaceHTMLCodes(i[0]['title'])
                title = cleantitle.get(title)

                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
Exemple #2
0
    def search(self, localtitle, year, search_type):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result, 'div', attrs={'id': search_type})
            links = client.parseDOM(result, 'figcaption')
            names = client.parseDOM(result, 'figcaption', ret='title')
            urls = []
            for i in range(len(names)):
                name = cleantitle.get(names[i])
                url = client.parseDOM(links[i], 'a', ret='href')[0]
                if (name == simply_name):
                    urls.append(url)
            if len(urls) == 1:
                return urls[0]
            else:
                return self.findMatchByYear(year, urls)

        except:
            return
Exemple #3
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

            for i in r:
                title = i[1]
                if re.search('\*(?:.*?)\*', title) is not None:
                    title = re.sub('\*(?:.*?)\*', '', title)
                title = cleantitle.get(title)
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
                else:
                    return
        except:
            return
Exemple #4
0
    def do_search(self, title, localtitle, year, is_movie_search):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % urllib.quote(title)
            result = client.request(url)
            result = result.decode('utf-8')

            result = client.parseDOM(result, 'ul', attrs={'id': 'resultList2'})
            li_list = []
            for el in result:
                li_list.extend(client.parseDOM(el, 'li'))
            

            result = [(client.parseDOM(i, 'a', ret='href')[0],
                       client.parseDOM(i, 'div', attrs={'class': 'title'})[0],
                       (client.parseDOM(i, 'div', attrs={'class': 'title_org'}) + [None])[0],
                       client.parseDOM(i, 'div', attrs={'class': 'info'})[0],
                       ) for i in li_list]

            search_type = 'Film' if is_movie_search else 'Serial'
            cleaned_titles = [cleantitle.get(title), cleantitle.get(localtitle)]                         
            # filter by name
            result = [x for x in result if self.check_titles(cleaned_titles, [x[2], x[1]])]
            # filter by type
            result = [x for x in result if x[3].startswith(search_type)]
            # filter by year
            result = [x for x in result if x[3].endswith(str(year))]

            if len(result) > 0:
                return result[0][0]
            else:
                return

        except :
            return
Exemple #5
0
 def search(self, title, localtitle, year):
     try:
         import sys
         reload(sys)
         sys.setdefaultencoding('utf8')
         simply_name = cleantitle.get(localtitle)
         simply_name2 = cleantitle.get(title)
         simply_name = cleantitle.query(localtitle).split(' ')
         simply_name2 = cleantitle.query(title).split(' ')
         query = self.search_link % urllib.quote_plus(
             cleantitle.query(localtitle))
         url = urlparse.urljoin(self.base_link, query)
         result = client.request(url)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'row search-results'})
         results = client.parseDOM(
             result,
             'div',
             attrs={'class': 'item-detail-bigblock title title-bigblock'})
         for result in results:
             movieneourl = client.parseDOM(result, 'a', ret='href')[0]
             result = client.parseDOM(result, 'a')[0]
             for word in simply_name:
                 if word in result and year in result:
                     return [
                         urlparse.urljoin(self.base_link, movieneourl),
                         result
                     ]
                 continue
     except Exception, e:
         print str(e)
         return
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'],
                  client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #7
0
    def __search(self, titles, year, imdb):
        try:
            query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
            r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t]

            if len(r) > 1:
                for i in r:
                    data = client.request(urlparse.urljoin(self.base_link, i))
                    data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name')
                    data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data]
                    data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]

                    if len(data) >= 1:
                        url = i
            else:
                url = r[0]

            if url:
                return source_utils.strip_domain(url)
        except:
            return
    def search(self, localtitle, year):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result, 'article')

            for row in result:
                a_href = client.parseDOM(row, 'h3')[0]
                url = client.parseDOM(a_href, 'a', ret='href')[0]
                name = client.parseDOM(a_href, 'a')[0]
                name = cleantitle.get(name)

                year_found = client.parseDOM(row,
                                             'span',
                                             attrs={'class': 'dtyear'})
                if year_found:
                    year_found = year_found[0]

                if (name == simply_name and
                    (not year_found or not year or year_found == year)):
                    return url
        except:
            return
Exemple #9
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': ['products', 'row']})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': ['box-product', 'clearfix']})
            if int(season) > 0:
                r = [i for i in r if dom_parser.parse_dom(i, 'div', attrs={'class': 'episode'})]
            else:
                r = [i for i in r if not dom_parser.parse_dom(i, 'div', attrs={'class': 'episode'})]
            r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title-product'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0]for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            url = source_utils.strip_domain(r)
            url = url.replace('-info', '-stream')
            return url
        except:
            return
Exemple #10
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url[
             'episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(
                 url['tvshowtitle']) + '-season-%d' % int(season)
             search_url = urlparse.urljoin(
                 self.base_link,
                 self.search_link % clean_title.replace('-', '+'))
             search_results = self.scraper.get(search_url).content
             parsed = client.parseDOM(search_results, 'div',
                                      {'id': 'movie-featured'})
             parsed = [(client.parseDOM(i, 'a', ret='href'),
                        re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
             parsed = [
                 (i[0][0], i[1][0]) for i in parsed
                 if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)
             ]
             url = parsed[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'),
                    client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
    def search(self, localtitle, year, search_type):
        try:

            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url,
                               redirect=False,
                               post={
                                   'q': cleantitle.query(localtitle),
                                   'sb': ''
                               })
            r = client.parseDOM(r, 'div', attrs={'class': 'small-item'})

            local_simple = cleantitle.get(localtitle)
            for row in r:
                name_found = client.parseDOM(row, 'a')[1]
                year_found = name_found[name_found.find("(") +
                                        1:name_found.find(")")]
                url = client.parseDOM(row, 'a', ret='href')[1]
                if not search_type in url:
                    continue

                if cleantitle.get(
                        name_found) == local_simple and year_found == year:
                    return url
        except:
            return
Exemple #12
0
    def search(self, localtitle, year, search_type):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'result-item'})
            for x in result:
                correct_type = client.parseDOM(x,
                                               'span',
                                               attrs={'class': search_type})
                correct_year = client.parseDOM(x,
                                               'span',
                                               attrs={'class':
                                                      'year'})[0] == year
                name = client.parseDOM(x, 'div', attrs={'class': 'title'})[0]
                url = client.parseDOM(name, 'a', ret='href')[0]
                name = cleantitle.get(client.parseDOM(name, 'a')[0])
                if (correct_type and correct_year and name == simply_name):
                    return url

        except:
            return
Exemple #13
0
 def matchAlias(self, title, aliases):
     try:
         for alias in aliases:
             if cleantitle.get(title) == cleantitle.get(alias['title']):
                 return True
     except:
         return False
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))

            # req page 3 times to workaround their BS random 404's
            # responses (legit & BS 404s) are actually very fast: timeout prob not important
            for i in range(4):
                result = client.request(query, timeout=3)
                if not result == None: break

            t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
            t = [cleantitle.get(i) for i in set(t) if i]
            result = re.compile(
                'itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)',
                re.DOTALL).findall(result)
            for i in result:
                if cleantitle.get(cleantitle.normalize(
                        i[1])) in t and year in i[1]:
                    url = i[0]

            url = url.encode('utf-8')

            #log_utils.log('\n\n~~~ outgoing tvshow() url')
            #log_utils.log(url)

            # returned 'url' format like: /serie/x_files
            return url
        except:
            return
    def __search(self, titles, year):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(urlparse.urljoin(self.base_link,
                                                self.search_link),
                               post={'query': cleantitle.query(titles[0])})

            r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 've-screen'},
                                     req='title')
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  i.attrs['title'].split(' - ')[0]) for i in r]
            r = [(i[0][0].attrs['href'], i[1],
                  re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #16
0
    def __search(self, search_link, imdb, titles):
        try:
            query = search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
            r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [
                i.attrs['href'] for i in r
                if i and cleantitle.get(i.content) in t
            ][0]

            url = source_utils.strip_domain(r)

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'href': re.compile('.*/tt\d+.*')},
                                     req='href')
            r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
            r = [i[0] for i in r if i]

            return url if imdb in r else None
        except:
            return
Exemple #17
0
    def __search(self, titles, imdb, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query, XHR=True)
            r = json.loads(r)

            r = [(i.get('title'), i.get('custom_fields', {}))
                 for i in r.get('posts', [])]
            r = [(i[0], i[1]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']),
                  i[1].get('IMDb-Link', [''])) for i in r if i]
            r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0]))
                 for i in r if i[0] and i[1] and i[2] and i[3]]
            r = [
                i[1] for i in r
                if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y)
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #18
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception() 

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
Exemple #19
0
    def __search(self, titles, year):
        try:
            n = cache.get(self.__get_nonce, 24)

            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])), n)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)
            r = json.loads(r)
            r = [(r[i].get('url'), r[i].get('title'),
                  r[i].get('extra').get('date')) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #20
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try: r = json.loads(r)
            except: r = None
            r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = proxy.request(self.search_link_2 % q, 'tv shows')
                r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
    def __search(self, title):
        try:
            t = cleantitle.get(title)

            r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'suchbegriff': title})
            r = dom_parser.parse_dom(r, 'a', attrs={'class': 'ausgabe_1'}, req='href')
            r = [(i.attrs['href'], i.content) for i in r]
            r = [i[0] for i in r if cleantitle.get(i[1]) == t][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #22
0
    def get_filmdb_data(self, season, episode, title, localtitle, year):
        try:
            import requests

            titles = {localtitle, title}
            for item in titles:
                try:
                    if item == 'Vikings':
                        item = 'Wikingowie'
                    headers = {
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                        'Referer': 'http://filmdb.pl/'
                    }
                    url = urlparse.urljoin(self.filmdb, self.filmdb_search)
                    data = {"search_film": item}
                    r = requests.post(url, data=data, headers=headers)
                    result = r.text
                    hrefs = client.parseDOM(result, 'a', ret='href')
                    for href in hrefs:
                        result = client.request(self.filmdb + str(href))
                        fdbtitle = client.parseDOM(result,
                                                   'input',
                                                   ret='value')[0]
                        fdbsubtitle = client.parseDOM(result,
                                                      'input',
                                                      ret='value')[1]
                        fdbyear = client.parseDOM(result, 'input',
                                                  ret='value')[2]
                        fdbfilmid = client.parseDOM(result,
                                                    'input',
                                                    ret='value')[3]
                        fdbsp = client.parseDOM(result, 'input',
                                                ret='value')[4]
                        local_clean = cleantitle.get(localtitle)
                        title_clean = cleantitle.get(title)
                        found_clean = cleantitle.get(fdbsubtitle)
                        if found_clean == '':
                            found_clean = cleantitle.get(fdbtitle)
                        if title_clean == found_clean or local_clean == found_clean:
                            return {
                                'sp': fdbsp,
                                'filmid': fdbfilmid,
                                'sezon': season,
                                'odcinek': episode
                            }
                except:
                    pass
        except Exception, e:
            print str(e)
Exemple #23
0
    def __search(self, titles, year):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(urlparse.urljoin(self.base_link, self.search_link), post=urllib.urlencode({'val': cleantitle.query(titles[0])}), XHR=True)
            r = dom_parser.parse_dom(r, 'li')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content, re.findall('\((\d{4})', i.content)) for i in r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0') for i in r]
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #24
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)
            result = client.request(url)

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                premiered)[0]
            premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0])
            items = dom_parser.parse_dom(result,
                                         'a',
                                         attrs={'itemprop': 'url'})

            url = [
                i.attrs['href'] for i in items if bool(
                    re.compile(
                        '<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' %
                        (season, episode)).search(i.content))
            ][0]

            url = url.encode('utf-8')
            return url
        except:
            return
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url  % (title.replace(':', ' ').replace(' ', '+'))

            search_results = client.request(url)
            match = re.compile('<h1><a href="(.+?)" rel="bookmark">(.+?)</a></h1>',re.DOTALL).findall(search_results)
            for item_url,item_title in match:
                if cleantitle.get(title) in cleantitle.get(item_title):
                    if year in str(item_title):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolMovieZone - Exception: \n' + str(failure))
            return
Exemple #26
0
    def do_search(self, title, local_title, year, video_type):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % urllib.quote_plus(cleantitle.query(title))
            result = client.request(url)
            result = client.parseDOM(result, 'div', attrs={'class': 'item'})
            for row in result:
                row_type = client.parseDOM(row,
                                           'div',
                                           attrs={'class': 'typepost'})[0]
                if row_type != video_type:
                    continue
                names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0]
                names = names.split('/')
                year_found = client.parseDOM(row,
                                             'span',
                                             attrs={'class': 'year'})

                titles = [cleantitle.get(i) for i in [title, local_title]]

                if self.name_matches(names, titles,
                                     year) and (len(year_found) == 0
                                                or year_found[0] == year):
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    return urlparse.urljoin(self.base_link, url)
        except:
            return
Exemple #27
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'article')
            r = dom_parser.parse_dom(r, 'a', attrs={'class': 'rb'}, req='href')
            r = [(i.attrs['href'], i.content) for i in r]
            r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #28
0
    def __search(self, titles):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = json.loads(r)
            r = [(i.get('id'), i.get('value')) for i in r]
            r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]

            return r
        except:
            return
Exemple #29
0
    def __search(self, titles, year, season='0'):
        try:
            aj = cache.get(self.__get_ajax_object, 24)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(aj.get('ajax_url'),
                               post={
                                   'action': aj.get('search'),
                                   'nonce': aj.get('snonce'),
                                   'query': cleantitle.query(titles[0])
                               })

            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'search-result'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'search-item-content'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2],
                  '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [
                i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
                and int(i[3]) == int(season)
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #30
0
    def name_matches(self, names_found, titles, year):
        for name in names_found:
            name = name.strip().encode('utf-8')
            # if ends with year
            clean_found_title = cleantitle.get(name)
            # sometimes they add year to title so we need to check thet
            if clean_found_title in titles:
                return True

        return False