Exemple #1
0
 def __search(self, titles):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.query(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = client.request(query)
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
         r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
         r = dom_parser.parse_dom(r, 'a', req='href')
         for i in r:
             title = i[1]
             if re.search('\*(?:.*?)\*', title) is not None:
                 title = re.sub('\*(?:.*?)\*', '', title)
             title = cleantitle.get(title)
             if title in t:
                 return source_utils.strip_domain(i[0]['href'])
             else:
                 return
     except:
         return
Exemple #2
0
    def search(self, title, localtitle, year):
        try:
            simply_name = cleantitle.get(localtitle)
            simply_name2 = cleantitle.get(title)
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(localtitle))
            url = urlparse.urljoin(self.base_link, query)
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                'Referer': 'https://segos.es/?page=login'
            }
            data = {
                "login": self.user_name,
                'password': self.user_pass,
                'loguj': ''
            }
            url = 'https://segos.es/?page=login'
            s = requests.Session()
            s.post('https://segos.es/?page=login', data=data, headers=headers)
            url = urlparse.urljoin(self.base_link, query)
            k = s.get(url)
            result = k.text

            results = client.parseDOM(
                result,
                'div',
                attrs={'class': 'col-lg-12 col-md-12 col-xs-12'})
            for result in results:
                segosurl = client.parseDOM(result, 'a', ret='href')[0]
                result = client.parseDOM(result, 'a')
                segostitles = cleantitle.get(result[1]).split('/')
                for segostitle in segostitles:
                    if simply_name == segostitle or simply_name2 == segostitle:
                        return urlparse.urljoin(self.base_link, segosurl)
                    continue
        except Exception as e:
            print(str(e))
            return
Exemple #3
0
 def __search(self, titles, year, season='0'):
     try:
         query = self.search_link % urllib.quote_plus(
             cleantitle.query(titles[0]))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(query)
         r = dom_parser.parse_dom(r,
                                  'article',
                                  attrs={'class': 'shortstory'})
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 's_info'})
         r = dom_parser.parse_dom(r, 'h2')
         r = dom_parser.parse_dom(r, 'a', req='href')
         r = [(i.attrs['href'], i.content.lower()) for i in r if i]
         r = [(i[0], re.sub('<.+?>|</.+?>', '',
                            i[1]), re.findall('(.+?) \(*(\d{4})', i[1]))
              for i in r]
         r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
               i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
         r = [(i[0], i[1], i[2],
               re.findall('(.+?)(\d+)\s+(?:staf+el|s)', i[1])) for i in r]
         r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2],
               i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
         r = [(i[0], i[1], i[2],
               '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y
             and int(i[3]) == int(season)
         ][0]
         return source_utils.strip_domain(r)
     except:
         return
    def __search(self, titles, year, content_type):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])), content_type)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'search'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r,
                                     'tr',
                                     attrs={'class': re.compile('entry\d+')})
            r = [(dom_parser.parse_dom(i, 'a'),
                  dom_parser.parse_dom(i,
                                       'img',
                                       attrs={
                                           'class': 'flag',
                                           'alt': 'de'
                                       })) for i in r]
            r = [i[0] for i in r if i[0] and i[1]]
            r = [(i[0].attrs['href'], i[0].content) for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #5
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = urllib.quote_plus(cleantitle.query(tvshowtitle))
            p = urllib.urlencode({'term': q})

            r = client.request(self.search_link, post=p, XHR=True)
            try:
                r = json.loads(r)
            except:
                r = None

            if r:
                r = [(i['seo_url'], i['value'], i['label']) for i in r
                     if 'value' in i and 'label' in i and 'seo_url' in i]
            else:
                r = proxy.request(self.search_link_2 % q, 'tv shows')
                r = client.parseDOM(r, 'div', attrs={'valign': '.+?'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a',
                                      ret='title'), client.parseDOM(i, 'a'))
                     for i in r]
                r = [(i[0][0], i[1][0], i[2][0]) for i in r
                     if i[0] and i[1] and i[2]]

            r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
            r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = r[0][0]
            url = proxy.parse(url)

            url = url.strip('/').split('/')[-1]
            url = url.encode('utf-8')
            return url
        except:
            return
    def __search(self, titles, episode):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]) + ' ' + str(episode))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) + str(episode) for i in set(titles) if i]

            r = client.request(query)
            r = r.split('</style>')[-1].strip()
            r = json.loads(r)

            r = [(i.get('title',
                        {}).get('rendered'), i.get('content',
                                                   {}).get('rendered'))
                 for i in r]
            r = [(re.sub('ger (?:sub|dub)', '', i[0],
                         flags=re.I).strip(), i[1]) for i in r
                 if i[0] and i[1]]
            r = [(i[0], re.findall('(.+?) (\d*)$', i[0]), i[1]) for i in r]
            r = [
                (i[0] if not i[1] else i[1][0][0] + ' ' + str(int(i[1][0][1])),
                 i[2]) for i in r
            ]
            r = [
                dom_parser.parse_dom(i[1], 'div') for i in r
                if cleantitle.get(i[0]) in t
            ]
            r = [[
                x.attrs['href']
                for x in dom_parser.parse_dom(i, 'a', req='href')
            ] + [
                x.attrs['src']
                for x in dom_parser.parse_dom(i, 'iframe', req='src')
            ] for i in r]
            return r[0]
        except:
            return
Exemple #7
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'})
            r = [(dom_parser.parse_dom(i.content, 'h4', attrs={'class': 'title-list'}),
                  dom_parser.parse_dom(i.content, 'a', attrs={'href': re.compile('.*/year/.*')})) for i in r]
            r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'), i[1][0].content if i[1] else '0') for i in r
                 if i[0]]
            r = [(i[0][0].attrs['href'], i[0][0].content, re.sub('<.+?>|</.+?>', '', i[1])) for i in r if i[0] and i[1]]
            r = [(i[0], i[1], i[2].strip()) for i in r if i[2]]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year][0]

            return source_utils.strip_domain(r)
        except:
            return
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0] + ' ' + year)))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'figure', attrs={'class': 'pretty-figure'})
            r = dom_parser.parse_dom(r, 'figcaption')

            for i in r:
                title = client.replaceHTMLCodes(i[0]['title'])
                title = cleantitle.get(title)

                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
    def __search(self, titles, year):
        try:
            r = urllib.urlencode({'keyword': titles[0]})
            r = client.request(urlparse.urljoin(self.base_link, self.search_link), XHR=True, post=r)
            if r is None:
                r = urllib.urlencode({'keyword': cleantitle.query(titles[0])})
                r = client.request(urlparse.urljoin(self.base_link, self.search_link), XHR=True, post=r)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = json.loads(r)
            r = [(i['link'], re.sub('<.+?>|</.+?>', '', i['title'])) for i in r if 'title' in i and 'link' in i]
            r = [(i[0], i[1], re.findall('(.+?)\s*Movie \d+:.+?$', i[1], re.DOTALL)) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1]) for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \((\d{4})\)?', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Exemple #10
0
    def search(self, title, localtitle, year, search_type):
        try:
            r = client.request(urlparse.urljoin(self.base_link,
                                                self.search_link),
                               post={'search': cleantitle.query(title)})
            r = self.get_rows(r, search_type)

            names = [cleantitle.get(i) for i in [title, localtitle]]
            for row in r:
                url = client.parseDOM(row, 'a', ret='href')[0]
                names_found = client.parseDOM(row, 'h3')[0]
                if names_found.startswith(
                        'Zwiastun') and not localtitle.startswith('Zwiastun'):
                    continue
                names_found = names_found.split('/')
                names_found = [cleantitle.get(i) for i in names_found]
                if self.name_matches(names, names_found):
                    found_year = self.try_read_year(url)
                    if not found_year or found_year == year:
                        return url

        except:
            return
Exemple #11
0
	def __search(self, titles, year, episode='0'):
		try:
			title = titles[0]
			if int(episode) > 0: title += ' episode %s' % episode
			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
			r = client.request(
				urlparse.urljoin(self.base_link, self.search_link) % urllib.quote_plus(cleantitle.query(title)))
			r = dom_parser.parse_dom(r, 'div', attrs={'id': 'entries'})
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'post'})
			r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title'})
			r = dom_parser.parse_dom(r, 'a', req='href')
			r = [(i.attrs['href'], i.content.lower()) for i in r if i]
			r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
			r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
			r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:episode)\s+(\d+)', i[1])) for i in r]
			r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
			r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(episode) > 0 and i[3] == '0' else i[3]) for i in r]
			r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
			r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(episode)][0]
			return source_utils.strip_domain(r)
		except:
			return
Exemple #12
0
 def __search(self, title, localtitle, year, content_type):
     try:
         t = cleantitle.get(title)
         tq = cleantitle.get(localtitle)
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         query = urlparse.urljoin(self.base_link, self.search_link)
         post = urllib.urlencode({'k': "%s"}) % tq
         r = client.request(query, post=post)
         r = json.loads(r)
         r = [
             i.get('result') for i in r
             if i.get('type', '').encode('utf-8') == content_type
         ]
         r = [(i.get('url'), i.get('originalTitle'), i.get('title'),
               i.get('anneeProduction', 0), i.get('dateStart', 0))
              for i in r]
         r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1] if i[1] else ''),
               re.sub('<.+?>|</.+?>', '', i[2] if i[2] else ''),
               i[3] if i[3] else re.findall('(\d{4})', i[4])[0]) for i in r
              if i[3] or i[4]]
         r = sorted(r, key=lambda i: int(i[3]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r
             if i[3] in y and (t.lower() == cleantitle.get(i[1].lower()) or
                               tq.lower() == cleantitle.query(i[2].lower()))
         ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Exemple #13
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                urllib.quote_plus(cleantitle.query(titles[0]))))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'coverBox'})
            r = dom_parser.parse_dom(r, 'li')
            r = dom_parser.parse_dom(r, 'span', attrs={'class': 'name'})
            r = dom_parser.parse_dom(r, 'a')

            title = r[0][1]
            title = cleantitle.get(title)

            if title in t:
                return source_utils.strip_domain(r[0][0]['href'])
            else:
                return
        except:
            return
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'article')
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

            for i in r:
                title = client.replaceHTMLCodes(r[0][1])
                title = cleantitle.get(title)

                if title in t:
                    return source_utils.strip_domain(i[0]['href'])

            return
        except:
            return
Exemple #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            print url

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            print data

            title = data['title']
            year = data['year'] if 'year' in data else data['year']
            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False
            localtitle = data['localtitle'] if 'localtitle' in data else False

            if season and episode:
                localtitle = data[
                    'localtvshowtitle'] if 'localtvshowtitle' in data else False

            # r = 'http://www.fullmoviz.org/?s=deadpool'
            # r = client.request(r)

            # r = client.parseDOM(r, 'div', attrs={'class': 'post-thumbnail'})
            # r = client.parseDOM(r, 'a', ret='href')
            # r = client.request(r[0])
            # r = client.parseDOM(r, 'div', attrs={'class': 'tab-me-content-wrapper'})
            # r = client.parseDOM(r, 'iframe', ret='src')

            t = cleantitle.get(title)
            tq = cleantitle.query(localtitle)
            tq2 = re.sub(' ', '', cleantitle.query(localtitle).lower())
            tq = re.sub(' ', '%20', tq)
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            query = 'http://www.fullmoviz.org'

            r = client.request('http://www.fullmoviz.org/?s=%s' % tq)
            print 'http://www.fullmoviz.org/?s=%s' % tq
            r = client.parseDOM(r, 'div', attrs={'class': 'post-thumbnail'})
            r0 = client.parseDOM(r, 'a', ret='href')[0]
            r2 = client.parseDOM(r, 'a', ret='title')[0]
            r1 = re.sub('(\([0-9]{4}\)|streaming|\s+)', '', r2)

            # r = sorted(set(r))
            r = [(r0, r1) for i in r]
            # r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            # r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            # r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
            # r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            # r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
            r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            r = client.request('http://www.fullmoviz.org' + url)
            print 'http://www.fullmoviz.org' + url

            r = client.parseDOM(r,
                                'div',
                                attrs={'class': 'tab-me-content-wrapper'})
            r = client.parseDOM(r, 'iframe', ret='src')

            for i in r:

                url = i

                host = re.findall(
                    '([\w]+[.][\w]+)$',
                    urlparse.urlparse(url.strip().lower()).netloc)[0]
                if not host in hostDict: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'FR',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemple #16
0
	def sources(self, url, hostDict, hostprDict):
		try:
			print '-------------------------------    -------------------------------'
			sources = []

			print url

			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			print data

			title = data['title']
			year = data['year'] if 'year' in data else data['year']
			season = data['season'] if 'season' in data else False
			episode = data['episode'] if 'episode' in data else False
			localtitle = data['localtitle'] if 'localtitle' in data else False

			if season and episode:
				localtitle = data['localtvshowtitle'] if 'localtvshowtitle' in data else False

			t = cleantitle.get(title)
			tq = cleantitle.query(localtitle)
			tq2 = re.sub(' ', '', cleantitle.query(localtitle).lower())
			tq = re.sub(' ', '%20', tq)
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

			query = 'http://www.cinemay.com'

			r = client.request('http://www.cinemay.com/?s=%s' % tq)
			print 'http://www.cinemay.com/?s=%s' % tq
			r = client.parseDOM(r, 'div', attrs={'class': 'unfilm'})
			r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
			r = [(i[0][0], re.sub('(film| en streaming vf| en streaming vostfr|&rsquo;| )', '', i[1][0]).lower()) for i
			     in r if len(i[0]) > 0 and len(i[1]) > 0]
			r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
			r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
			r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
			r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
			r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
			r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]

			url = re.findall('(?://.+?|)(/.+)', r)[0]
			url = client.replaceHTMLCodes(url)
			url = url.encode('utf-8')

			r = client.request('http://www.cinemay.com' + url)
			print 'http://www.cinemay.com' + url
			r = client.parseDOM(r, 'div', attrs={'class': 'module-actionbar'})
			r = client.parseDOM(r, 'a', ret='href')

			for i in r:
				if i == '#':
					continue

				url = client.request('http://www.cinemay.com' + i)
				url = client.parseDOM(url, 'div', attrs={'class': 'wbox2 video dark'})
				url = client.parseDOM(url, 'iframe', ret='src')[0]

				host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
				if not host in hostDict: continue
				host = client.replaceHTMLCodes(host)
				host = host.encode('utf-8')

				sources.append({'source': host, 'quality': 'SD', 'language': 'FR', 'url': url, 'direct': False,
				                'debridonly': False})

			return sources
		except:
			return sources
Exemple #17
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         self.hostDict = hostDict + hostprDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         url = urlparse.urljoin(
             self.base_link,
             self.search_link % urllib.quote_plus(cleantitle.query(title)))
         if 'tvshowtitle' in data:
             html = self.scraper.get(url).content
             match = re.compile(
                 'class="post-item.+?href="(.+?)" title="(.+?)"',
                 re.DOTALL).findall(html)
             for url, item_name in match:
                 if cleantitle.getsearch(title).lower(
                 ) in cleantitle.getsearch(item_name).lower():
                     season_url = '%02d' % int(data['season'])
                     episode_url = '%02d' % int(data['episode'])
                     sea_epi = 'S%sE%s' % (season_url, episode_url)
                     result = self.scraper.get(url).content
                     regex = re.compile('href="(.+?)"',
                                        re.DOTALL).findall(result)
                     for ep_url in regex:
                         if sea_epi in ep_url:
                             quality, info = source_utils.get_release_quality(
                                 url, url)
                             sources.append({
                                 'source': 'CDN',
                                 'quality': quality,
                                 'language': 'en',
                                 'info': info,
                                 'url': ep_url,
                                 'direct': False,
                                 'debridonly': False
                             })
         else:
             html = self.scraper.get(url).content
             match = re.compile(
                 '<div class="thumbnail".+?href="(.+?)" title="(.+?)"',
                 re.DOTALL).findall(html)
             for url, item_name in match:
                 if cleantitle.getsearch(title).lower(
                 ) in cleantitle.getsearch(item_name).lower():
                     quality, info = source_utils.get_release_quality(
                         url, url)
                     result = self.scraper.get(url).content
                     regex = re.compile('href="/download.php.+?link=(.+?)"',
                                        re.DOTALL).findall(result)
                     for link in regex:
                         if 'server=' not in link:
                             try:
                                 link = base64.b64decode(link)
                             except Exception:
                                 pass
                             valid, host = source_utils.is_host_valid(
                                 link, self.hostDict)
                             if valid:
                                 sources.append({
                                     'source': host,
                                     'quality': quality,
                                     'language': 'en',
                                     'info': info,
                                     'url': link,
                                     'direct': False,
                                     'debridonly': False
                                 })
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ExtraMovies - Exception: \n' + str(failure))
         return sources
    def __search(self, titles, year, season=0, episode=False):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'ml-item-content'})

            f = []
            for i in r:
                _url = dom_parser.parse_dom(i,
                                            'a',
                                            attrs={'class': 'ml-image'},
                                            req='href')[0].attrs['href']

                _title = re.sub('<.+?>|</.+?>', '',
                                dom_parser.parse_dom(i,
                                                     'h6')[0].content).strip()
                try:
                    _title = re.search('(.*?)\s(?:staf+el|s)\s*(\d+)', _title,
                                       re.I).group(1)
                except:
                    pass

                _season = '0'

                _year = re.findall(
                    'calendar.+?>.+?(\d{4})', ''.join([
                        x.content for x in dom_parser.parse_dom(
                            i, 'ul', attrs={'class': 'item-params'})
                    ]))
                _year = _year[0] if len(_year) > 0 else '0'

                if season > 0:
                    s = dom_parser.parse_dom(i,
                                             'span',
                                             attrs={'class': 'season-label'})
                    s = dom_parser.parse_dom(s,
                                             'span',
                                             attrs={'class': 'el-num'})
                    if s: _season = s[0].content.strip()

                if cleantitle.get(_title) in t and _year in y and int(
                        _season) == int(season):
                    f.append((_url, _year))
            r = f
            r = sorted(r, key=lambda i: int(i[1]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if r[0]][0]

            url = source_utils.strip_domain(r)
            if episode:
                r = client.request(urlparse.urljoin(self.base_link, url))
                r = dom_parser.parse_dom(r,
                                         'div',
                                         attrs={'class': 'season-list'})
                r = dom_parser.parse_dom(r, 'li')
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [(i.attrs['href'], i.content) for i in r]
                r = [i[0] for i in r if i[1] and int(i[1]) == int(episode)][0]
                url = source_utils.strip_domain(r)
            return url
        except:
            return
Exemple #19
0
 def __search(self, imdb, titles, year):
     try:
         q = self.search_link % urllib.quote_plus(
             cleantitle.query(titles[0]))
         q = urlparse.urljoin(self.base_link, q)
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(q)
         r = dom_parser.parse_dom(
             r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
         r = [(dom_parser.parse_dom(i, 'a', req='href'),
               dom_parser.parse_dom(i,
                                    'div',
                                    attrs={'style': re.compile('.+?')}),
               dom_parser.parse_dom(i, 'img', req='src')) for i in r]
         r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1],
               i[2]) for i in r if i[0] and i[2]]
         r = [(i[0], i[1], [
             x.content for x in i[2]
             if x.content.isdigit() and len(x.content) == 4
         ], i[3]) for i in r]
         r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
         r = [
             i for i in r if any('us_ger_' in x.attrs['src'] for x in i[3])
         ]
         r = [(i[0], i[1], i[2], [
             re.findall('(\d+)', x.attrs['src']) for x in i[3]
             if 'smileys' in x.attrs['src']
         ]) for i in r]
         r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
         r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
         r = sorted(r, key=lambda x: x[3])[::-1]
         r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
         r = [(i[0], i[1], i[2]) for i in r if not i[3]]
         r = [i for i in r if i[2] in y]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r]
         match = [
             i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]
         ]
         match2 = [i[0] for i in r]
         match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
         if match2 == []: return
         for i in match2[:5]:
             try:
                 if match:
                     url = match[0]
                     break
                 r = client.request(urlparse.urljoin(self.base_link, i))
                 r = re.findall('(tt\d+)', r)
                 if imdb in r:
                     url = i
                     break
             except:
                 pass
         return source_utils.strip_domain(url)
     except:
         return
Exemple #20
0
	def sources(self, url, hostDict, hostprDict):
		try:
			print '-------------------------------    -------------------------------'
			sources = []
			print url
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			season = data['season'] if 'season' in data else False
			episode = data['episode'] if 'episode' in data else False
			print season, episode
			if season and episode:
				print 'TV'
				self.search_link = 'query=%s&submit=Submit+Query'
				aTitle = data['tvshowtitle']
			else:
				self.search_link = 'query=%s&submit=Submit+Query'
				aTitle = data['title']
			post = self.search_link % (urllib.quote_plus(cleantitle.query(aTitle)))
			url = 'https://dpstreaming.live/recherche/'
			t = cleantitle.get(aTitle)
			r = client.request(url, XHR=True, referer=url, post=post)
			r = client.parseDOM(r, 'div', attrs={'class': 'film-k kutu-icerik kat'})
			if season and episode:
				t = t + 'saison0' + season
			r = client.parseDOM(r, 'div', attrs={'class': 'play fa fa-play-circle'})
			r = sorted(set(r))
			r = [(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) for i in r]
			r = [(i[0][0], i[1][0].lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
			r = [i[0] for i in r if t == cleantitle.get(i[1])][0]
			# r = sorted(set(r))
			url0 = '%s%s' % ('https://dpstreaming.live', r)
			print url0
			url = client.replaceHTMLCodes(url0)
			url = url0.encode('utf-8')
			r = client.request(url, XHR=True, referer=url)
			r = re.sub('(\n|\t)', '', r)
			langue = re.compile('<b class=\"fa fa-cc\"></b><span>(.+?)</span>', re.MULTILINE | re.DOTALL).findall(r)[0]
			if langue == 'VF':
				langue = 'FR'
			quality2 = re.compile('<div class=\"kalite\">(.+?)</div>', re.MULTILINE | re.DOTALL).findall(r)[0]
			quality2 = re.sub('-', '', quality2)
			if season and episode:
				unLien0a = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri'})[0]
				r = re.compile('Saison\s+0%s\s+\-\s+Episode\s+0%s(.+?)class=\"dropit-trigger\">' % (season, episode),
				               re.MULTILINE | re.DOTALL).findall(unLien0a)[0]
				unLien0b = client.parseDOM(r, 'li', ret='id')
			else:
				r = client.parseDOM(r, 'div', attrs={'class': 'dizi-bolumleri film'})
				unLien0b = client.parseDOM(r, 'span', ret='id')
			counter = 0
			for unLienUrl in unLien0b:
				if 'gf-' in unLienUrl:
					continue
				dataUrl = urllib.urlencode({'pid': unLienUrl[1:]})
				dataUrl = client.request(url0, post=dataUrl, XHR=True, referer=url0)
				try:
					url = client.parseDOM(dataUrl, 'iframe', ret='src')[1]
				except:
					url = client.parseDOM(dataUrl, 'iframe', ret='src')[0]
				if url.startswith('//'):
					url = url.replace('//', '', 1)
				host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
				if not host in hostDict: continue
				host = client.replaceHTMLCodes(host)
				host = host.encode('utf-8')
				url = url.encode('utf-8')
				if '1080p' in quality2:
					quality = '1080p'
				elif '720p' in quality2 or 'bdrip' in quality2 or 'hdrip' in quality2:
					quality = 'HD'
				else:
					quality = 'SD'
				if 'dvdscr' in quality2 or 'r5' in quality2 or 'r6' in quality2:
					quality2 = 'SCR'
				elif 'camrip' in quality2 or 'tsrip' in quality2 or 'hdcam' in quality2 or 'hdts' in quality2 or 'dvdcam' in quality2 or 'dvdts' in quality2 or 'cam' in quality2 or 'telesync' in quality2 or 'ts' in quality2:
					quality2 = 'CAM'
				sources.append({'source': host, 'quality': quality, 'language': langue, 'url': url, 'direct': False,
				                'debridonly': False})
			print sources
			return sources
		except:
			return sources