예제 #1
0
 def search(self, title, localtitle, year):
     try:
         import sys
         reload(sys)
         sys.setdefaultencoding('utf8')
         simply_name = cleantitle.get(localtitle)
         simply_name2 = cleantitle.get(title)
         simply_name = cleantitle.query(localtitle).split(' ')
         simply_name2 = cleantitle.query(title).split(' ')
         query = self.search_link % urllib.quote_plus(
             cleantitle.query(localtitle))
         url = urlparse.urljoin(self.base_link, query)
         result = client.request(url)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'row search-results'})
         results = client.parseDOM(
             result,
             'div',
             attrs={'class': 'item-detail-bigblock title title-bigblock'})
         for result in results:
             movieneourl = client.parseDOM(result, 'a', ret='href')[0]
             result = client.parseDOM(result, 'a')[0]
             for word in simply_name:
                 if word in result and year in result:
                     return [
                         urlparse.urljoin(self.base_link, movieneourl),
                         result
                     ]
                 continue
     except Exception, e:
         print str(e)
         return
예제 #2
0
 def search(self, title, localtitle, year, search_type):
     try:
         url = self.do_search(cleantitle.query(title), title, localtitle,
                              year, search_type)
         if not url:
             url = self.do_search(cleantitle.query(localtitle), title,
                                  localtitle, year, search_type)
         return url
     except:
         return
예제 #3
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0] + ' ' + year)))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r,
                                     'figure',
                                     attrs={'class': 'pretty-figure'})
            r = dom_parser.parse_dom(r, 'figcaption')

            for i in r:
                title = client.replaceHTMLCodes(i[0]['title'])
                title = cleantitle.get(title)

                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
예제 #4
0
    def search(self, localtitle, year, search_type):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result, 'div', attrs={'id': search_type})
            links = client.parseDOM(result, 'figcaption')
            names = client.parseDOM(result, 'figcaption', ret='title')
            urls = []
            for i in range(len(names)):
                name = cleantitle.get(names[i])
                url = client.parseDOM(links[i], 'a', ret='href')[0]
                if (name == simply_name):
                    urls.append(url)
            if len(urls) == 1:
                return urls[0]
            else:
                return self.findMatchByYear(year, urls)

        except:
            return
예제 #5
0
    def search(self, localtitle, year):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result, 'article')

            for row in result:
                a_href = client.parseDOM(row, 'h3')[0]
                url = client.parseDOM(a_href, 'a', ret='href')[0]
                name = client.parseDOM(a_href, 'a')[0]
                name = cleantitle.get(name)

                year_found = client.parseDOM(row,
                                             'span',
                                             attrs={'class': 'dtyear'})
                if year_found:
                    year_found = year_found[0]

                if (name == simply_name and
                    (not year_found or not year or year_found == year)):
                    return url
        except:
            return
예제 #6
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'],
                  client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #7
0
    def __search(self, titles, year):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(urlparse.urljoin(self.base_link,
                                                self.search_link),
                               post={'query': cleantitle.query(titles[0])})

            r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 've-screen'},
                                     req='title')
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  i.attrs['title'].split(' - ')[0]) for i in r]
            r = [(i[0][0].attrs['href'], i[1],
                  re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #8
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

            for i in r:
                title = i[1]
                if re.search('\*(?:.*?)\*', title) is not None:
                    title = re.sub('\*(?:.*?)\*', '', title)
                title = cleantitle.get(title)
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
                else:
                    return
        except:
            return
예제 #9
0
    def __search(self, titles, imdb, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query, XHR=True)
            r = json.loads(r)

            r = [(i.get('title'), i.get('custom_fields', {}))
                 for i in r.get('posts', [])]
            r = [(i[0], i[1]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1].get('Streaming', ['']), i[1].get('Jahr', ['0']),
                  i[1].get('IMDb-Link', [''])) for i in r if i]
            r = [(i[0], i[1][0], i[2][0], re.findall('.+?(tt\d+).*?', i[3][0]))
                 for i in r if i[0] and i[1] and i[2] and i[3]]
            r = [
                i[1] for i in r
                if imdb in i[3] or (cleantitle.get(i[0]) in t and i[2] in y)
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #10
0
    def __search(self, titles, year, imdb):
        try:
            query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
            r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t]

            if len(r) > 1:
                for i in r:
                    data = client.request(urlparse.urljoin(self.base_link, i))
                    data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name')
                    data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data]
                    data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]

                    if len(data) >= 1:
                        url = i
            else:
                url = r[0]

            if url:
                return source_utils.strip_domain(url)
        except:
            return
예제 #11
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))

            # req page 3 times to workaround their BS random 404's
            # responses (legit & BS 404s) are actually very fast: timeout prob not important
            for i in range(4):
                result = client.request(query, timeout=3)
                if not result == None: break

            t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
            t = [cleantitle.get(i) for i in set(t) if i]
            result = re.compile(
                'itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)',
                re.DOTALL).findall(result)
            for i in result:
                if cleantitle.get(cleantitle.normalize(
                        i[1])) in t and year in i[1]:
                    url = i[0]

            url = url.encode('utf-8')

            #log_utils.log('\n\n~~~ outgoing tvshow() url')
            #log_utils.log(url)

            # returned 'url' format like: /serie/x_files
            return url
        except:
            return
예제 #12
0
    def search(self, localtitle, year, search_type):
        try:

            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url,
                               redirect=False,
                               post={
                                   'q': cleantitle.query(localtitle),
                                   'sb': ''
                               })
            r = client.parseDOM(r, 'div', attrs={'class': 'small-item'})

            local_simple = cleantitle.get(localtitle)
            for row in r:
                name_found = client.parseDOM(row, 'a')[1]
                year_found = name_found[name_found.find("(") +
                                        1:name_found.find(")")]
                url = client.parseDOM(row, 'a', ret='href')[1]
                if not search_type in url:
                    continue

                if cleantitle.get(
                        name_found) == local_simple and year_found == year:
                    return url
        except:
            return
예제 #13
0
    def search(self, localtitle, year, search_type):
        try:
            simply_name = cleantitle.get(localtitle)

            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query)

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'result-item'})
            for x in result:
                correct_type = client.parseDOM(x,
                                               'span',
                                               attrs={'class': search_type})
                correct_year = client.parseDOM(x,
                                               'span',
                                               attrs={'class':
                                                      'year'})[0] == year
                name = client.parseDOM(x, 'div', attrs={'class': 'title'})[0]
                url = client.parseDOM(name, 'a', ret='href')[0]
                name = cleantitle.get(client.parseDOM(name, 'a')[0])
                if (correct_type and correct_year and name == simply_name):
                    return url

        except:
            return
예제 #14
0
    def do_search(self, title, local_title, year, video_type):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % urllib.quote_plus(cleantitle.query(title))
            result = client.request(url)
            result = client.parseDOM(result, 'div', attrs={'class': 'item'})
            for row in result:
                row_type = client.parseDOM(row,
                                           'div',
                                           attrs={'class': 'typepost'})[0]
                if row_type != video_type:
                    continue
                names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0]
                names = names.split('/')
                year_found = client.parseDOM(row,
                                             'span',
                                             attrs={'class': 'year'})

                titles = [cleantitle.get(i) for i in [title, local_title]]

                if self.name_matches(names, titles,
                                     year) and (len(year_found) == 0
                                                or year_found[0] == year):
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    return urlparse.urljoin(self.base_link, url)
        except:
            return
예제 #15
0
    def __search(self, search_link, imdb, titles):
        try:
            query = search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
            r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [
                i.attrs['href'] for i in r
                if i and cleantitle.get(i.content) in t
            ][0]

            url = source_utils.strip_domain(r)

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'href': re.compile('.*/tt\d+.*')},
                                     req='href')
            r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
            r = [i[0] for i in r if i]

            return url if imdb in r else None
        except:
            return
예제 #16
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': ['products', 'row']})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': ['box-product', 'clearfix']})
            if int(season) > 0:
                r = [i for i in r if dom_parser.parse_dom(i, 'div', attrs={'class': 'episode'})]
            else:
                r = [i for i in r if not dom_parser.parse_dom(i, 'div', attrs={'class': 'episode'})]
            r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title-product'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0]for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            url = source_utils.strip_domain(r)
            url = url.replace('-info', '-stream')
            return url
        except:
            return
예제 #17
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try: r = json.loads(r)
            except: r = None
            r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = proxy.request(self.search_link_2 % q, 'tv shows')
                r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return
예제 #18
0
    def __search(self, titles, year):
        try:
            n = cache.get(self.__get_nonce, 24)

            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])), n)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)
            r = json.loads(r)
            r = [(r[i].get('url'), r[i].get('title'),
                  r[i].get('extra').get('date')) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #19
0
    def __search(self, titles, year, season='0'):
        try:
            aj = cache.get(self.__get_ajax_object, 24)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(aj.get('ajax_url'),
                               post={
                                   'action': aj.get('search'),
                                   'nonce': aj.get('snonce'),
                                   'query': cleantitle.query(titles[0])
                               })

            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'search-result'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'search-item-content'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2],
                  '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [
                i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
                and int(i[3]) == int(season)
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #20
0
    def __search(self, titles):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = json.loads(r)
            r = [(i.get('id'), i.get('value')) for i in r]
            r = [i[0] for i in r if cleantitle.get(i[1]) in t][0]

            return r
        except:
            return
예제 #21
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r,
                                     'article',
                                     attrs={'class': 'shortstory'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'})
            r = dom_parser.parse_dom(r, 'h2')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], re.sub('<.+?>|</.+?>', '',
                               i[1]), re.findall('(.+?) \(*(\d{4})', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
                  i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2],
                  '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [
                i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
                and int(i[3]) == int(season)
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #22
0
    def __search(self, titles, year, content):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]

            c = client.request(urlparse.urljoin(self.base_link,
                                                self.year_link % int(year)),
                               output='cookie')

            p = urllib.urlencode({'search': cleantitle.query(titles[0])})
            c = client.request(urlparse.urljoin(self.base_link,
                                                self.search_link),
                               cookie=c,
                               post=p,
                               output='cookie')
            r = client.request(urlparse.urljoin(self.base_link,
                                                self.type_link % content),
                               cookie=c,
                               post=p)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'})
            r = dom_parser.parse_dom(r, 'tr')
            r = [dom_parser.parse_dom(i, 'td') for i in r]
            r = [dom_parser.parse_dom(i, 'a', req='href') for i in r]

            r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r
                 if i]
            x = []
            for i in r:
                if re.search('(?<=<i>\().*$', i[1]):
                    x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(),
                              re.search('(?<=<i>\().*$', i[1]).group(), i[2]))
                else:
                    x.append((i[0], i[1], i[1], i[2]))
            r = [
                i[0] for i in x
                if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t)
                and i[3] == year
            ][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #23
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            result = client.request(query)
            #tvshowtitle = cleantitle.get(tvshowtitle)
            t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
            t = [cleantitle.get(i) for i in set(t) if i]
            result = re.compile(
                'itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)',
                re.DOTALL).findall(result)
            for i in result:
                if cleantitle.get(cleantitle.normalize(
                        i[1])) in t and year in i[1]:
                    url = i[0]

            url = url.encode('utf-8')
            return url
        except:
            return
예제 #24
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:

            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url,
                               redirect=False,
                               post={'szukaj': cleantitle.query(localtitle)})
            r = client.parseDOM(r, 'div', attrs={'class': 'video_info'})

            local_simple = cleantitle.get(localtitle)
            for row in r:
                name_found = client.parseDOM(row, 'h1')[0]
                year_found = name_found[name_found.find("(") +
                                        1:name_found.find(")")]
                if cleantitle.get(
                        name_found) == local_simple and year_found == year:
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    return url
        except:
            return
예제 #25
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'MovieList'})
            r = dom_parser.parse_dom(r, 'li', attrs={'class': 'TPostMv'})
            r = dom_parser.parse_dom(r, 'a')

            for i in r:
                title = dom_parser.parse_dom(i, 'h2', attrs={'class': 'Title'})
                title = cleantitle.get(title[0][1])
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
        except:
            return
예제 #26
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                urllib.quote_plus(cleantitle.query(titles[0]))))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            post = urllib.urlencode({'movlang_de': '1', 'movlang': ''})

            r = client.request(query, post=post)

            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'table'})
            r = dom_parser.parse_dom(r, 'a', attrs={'class': 'PreviewImage'})

            for x in r:
                title = cleantitle.get(x[1])
                if title in t:
                    return source_utils.strip_domain(x[0]['href'])
            return
        except:
            return
예제 #27
0
    def __search(self, titles, year, content_type):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])), content_type)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'search'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r,
                                     'tr',
                                     attrs={'class': re.compile('entry\d+')})
            r = [(dom_parser.parse_dom(i, 'a'),
                  dom_parser.parse_dom(i,
                                       'img',
                                       attrs={
                                           'class': 'flag',
                                           'alt': 'de'
                                       })) for i in r]
            r = [i[0] for i in r if i[0] and i[1]]
            r = [(i[0].attrs['href'], i[0].content) for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #28
0
    def __search(self, titles, year):
        try:
            r = urllib.urlencode({'keyword': titles[0]})
            r = client.request(urlparse.urljoin(self.base_link,
                                                self.search_link),
                               XHR=True,
                               post=r)
            if r is None:
                r = urllib.urlencode({'keyword': cleantitle.query(titles[0])})
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.search_link),
                                   XHR=True,
                                   post=r)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = json.loads(r)
            r = [(i['link'], re.sub('<.+?>|</.+?>', '', i['title'])) for i in r
                 if 'title' in i and 'link' in i]
            r = [(i[0], i[1],
                  re.findall('(.+?)\s*Movie \d+:.+?$', i[1], re.DOTALL))
                 for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1]) for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \((\d{4})\)?', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
예제 #29
0
    def search(self, title, localtitle, year):
        try:
            simply_name = cleantitle.get(localtitle)
            simply_name2 = cleantitle.get(title)
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            url = urlparse.urljoin(self.base_link, query)
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                'Referer': 'https://segos.es/?page=login'
            }
            data = {
                "login": self.user_name,
                'password': self.user_pass,
                'loguj': ''
            }
            url = 'https://segos.es/?page=login'
            s = requests.Session()
            s.post('https://segos.es/?page=login', data=data, headers=headers)
            url = urlparse.urljoin(self.base_link, query)
            k = s.get(url)
            result = k.text

            results = client.parseDOM(
                result,
                'div',
                attrs={'class': 'col-lg-12 col-md-12 col-xs-12'})
            for result in results:
                segosurl = client.parseDOM(result, 'a', ret='href')[0]
                result = client.parseDOM(result, 'a')
                segostitles = cleantitle.get(result[1]).split('/')
                for segostitle in segostitles:
                    if simply_name == segostitle or simply_name2 == segostitle:
                        return urlparse.urljoin(self.base_link, segosurl)
                    continue
        except Exception, e:
            print str(e)
            return
예제 #30
0
    def __search(self, titles, episode):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]) + ' ' + str(episode))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) + str(episode) for i in set(titles) if i]

            r = client.request(query)
            r = r.split('</style>')[-1].strip()
            r = json.loads(r)

            r = [(i.get('title',
                        {}).get('rendered'), i.get('content',
                                                   {}).get('rendered'))
                 for i in r]
            r = [(re.sub('ger (?:sub|dub)', '', i[0],
                         flags=re.I).strip(), i[1]) for i in r
                 if i[0] and i[1]]
            r = [(i[0], re.findall('(.+?) (\d*)$', i[0]), i[1]) for i in r]
            r = [
                (i[0] if not i[1] else i[1][0][0] + ' ' + str(int(i[1][0][1])),
                 i[2]) for i in r
            ]
            r = [
                dom_parser.parse_dom(i[1], 'div') for i in r
                if cleantitle.get(i[0]) in t
            ]
            r = [[
                x.attrs['href']
                for x in dom_parser.parse_dom(i, 'a', req='href')
            ] + [
                x.attrs['src']
                for x in dom_parser.parse_dom(i, 'iframe', req='src')
            ] for i in r]
            return r[0]
        except:
            return