Example #1
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urljoin(self.base_link, url)
			r = self.scraper.get(query).content
			r = dom_parser.parse_dom(r, 'div', attrs={'id': 'ko-bind'})
			r = dom_parser.parse_dom(r, 'table', attrs={'class': 'links-table'})
			r = dom_parser.parse_dom(r, 'tbody')
			r = dom_parser.parse_dom(r, 'tr')
			for i in r:
				if re.search('(?<=<td>)(HD)(?=</td>)', i[1]):
					quality = '720p'
				else:
					quality = 'SD'
				x = dom_parser.parse_dom(i, 'td', attrs={'class': 'name'}, req='data-bind')
				hoster = re.search("(?<=>).*$", x[0][1])
				hoster = hoster.group().lower()
				url = re.search("http(.*?)(?=')", x[0][0]['data-bind'])
				url = url.group()
				valid, hoster = source_utils.is_host_valid(hoster, hostDict)
				if not valid: continue
				sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False,
				                'debridonly': False})
			return sources
		except:
			return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            hostDict = hostprDict + hostDict
            if url is None:
                return sources

            for href in url:
                r = self.scraper.get(href).content

                quality = re.findall(">(\w+)<\/p", r)
                if quality[0] == "HDHC":
                    quality = "1080p"
                elif quality[0] == "HD":
                    quality = "720p"
                else:
                    quality = quality[0]
                r = dom_parser.parse_dom(r, 'div', {'id': 'servers-list'})
                r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i]

                for i in r[0]:
                    href = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'],
                            'data-server': i.attrs['data-server'], 'data-name': i.attrs['data-name']}

                    href = urllib.urlencode(href)

                    valid, host = source_utils.is_host_valid(i.content, hostDict)

                    if valid:
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': href,
                                        'direct': False, 'debridonly': False})
            return sources
        except:
            return sources
Example #3
0
 def __search(self, titles, year, content):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r,
                             'div',
                             attrs={'class': 'tab-content clearfix'})
         if content == 'movies':
             r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
         else:
             r = client.parseDOM(r, 'div', attrs={'id': 'series'})
         data = dom_parser.parse_dom(r, 'figcaption')
         for i in data:
             title = i[0]['title']
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
             else:
                 url = dom_parser.parse_dom(i, 'a', req='href')
                 data = client.request(url[0][0]['href'])
                 data = re.findall(
                     '<h3>Pelicula.+?">(.+?)\((\d{4})\).+?</a>', data,
                     re.DOTALL)[0]
                 if titles[0] in data[0] and year == data[1]:
                     return source_utils.strip_domain(url[0][0]['href'])
         return
     except:
         return
Example #4
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = url.replace('/en/', '/de/')

            video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group()
            if not video_id:
                return sources

            # load player
            query = self.get_player % (video_id)
            query = urlparse.urljoin(self.base_link, query)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'})

            # for each hoster
            for i in r:
                hoster = dom_parser.parse_dom(i,
                                              'div',
                                              attrs={'class': 'les-title'})
                hoster = dom_parser.parse_dom(hoster, 'strong')
                hoster = hoster[0][1]

                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                links = dom_parser.parse_dom(i,
                                             'a',
                                             attrs={'class': 'ep-item'})

                # for each link
                for i in links:
                    if '1080p' in i[0]['title']:
                        quality = '1080p'
                    elif 'HD' in i[0]['title']:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i[0]['id']
                    if not url: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Example #5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'stream_links'})
            r = dom_parser.parse_dom(r, 'tr')
            r = [(dom_parser.parse_dom(i, 'td'), dom_parser.parse_dom(i, 'td', attrs={'class': 'hide-for-small-only'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content.lower()) for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[1]) for i in r if i[0]]

            for link, hoster in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False,
                                'debridonly': False})

            return sources
        except:
            return sources
Example #6
0
 def __search(self, titles, year, content_type):
     try:
         query = self.search_link % (quote_plus(cleantitle.query(
             titles[0])), content_type)
         query = urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(query)
         r = dom_parser.parse_dom(r, 'div', attrs={'id': 'search'})
         r = dom_parser.parse_dom(r, 'table')
         r = dom_parser.parse_dom(r,
                                  'tr',
                                  attrs={'class': re.compile('entry\d+')})
         r = [(dom_parser.parse_dom(i, 'a'),
               dom_parser.parse_dom(i,
                                    'img',
                                    attrs={
                                        'class': 'flag',
                                        'alt': 'de'
                                    })) for i in r]
         r = [i[0] for i in r if i[0] and i[1]]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
         r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
               i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
         return source_utils.strip_domain(r)
     except:
         return
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'],
                  client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
    def __search(self, titles, year, season='0'):
        try:
            aj = cache.get(self.__get_ajax_object, 24)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(aj.get('ajax_url'), post={'action': aj.get('search'), 'nonce': aj.get('snonce'),
                                                         'query': cleantitle.query(titles[0])})

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'hosterSiteVideo'})
            r = dom_parser.parse_dom(r, 'li', attrs={'data-lang-key': re.compile('[1|3]')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'h4'),
                  'subbed' if i.attrs['data-lang-key'] == '3' else '') for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2]) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r]
            r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3]) for i in r]

            for link, host, quality, info in r:
                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                sources.append(
                    {'source': host, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False,
                     'debridonly': False})

            return sources
        except:
            return sources
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'bd'})

            for i in r:
                r = dom_parser.parse_dom(i, 'h3')
                r = dom_parser.parse_dom(r, 'a')
                title = r[0][1]
                y = re.findall('(\d{4})', title, re.DOTALL)[0]
                title = cleantitle.get(title.split('(')[0])

                if title in t and year == y:
                    return source_utils.strip_domain(r[0][0]['href'])
            return
        except:
            return
Example #12
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         r = dom_parser.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('/(\w+).html', i[0].attrs['href'])) for i in r
              if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except Exception:
         return
Example #13
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         url = data.get('url')
         episode = int(data.get('episode', 1))
         r = self.scraper.get(urlparse.urljoin(self.base_link, url)).content
         r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})}
         for info, data in r.iteritems():
             data = dom_parser.parse_dom(data, 'tr')
             data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})]
             data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i]
             data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]]
             data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data]
             data = [(i[0], i[1][0]) for i in data if i[1]]
             for link, hoster in data:
                 valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                 if not valid: continue
                 sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Example #14
0
    def __search(self, imdb):
        try:
            l = ['1', '15']

            r = client.request(
                urlparse.urljoin(self.base_link, self.search_link % imdb))
            r = dom_parser.parse_dom(r,
                                     'table',
                                     attrs={'id': 'RsltTableStatic'})
            r = dom_parser.parse_dom(r, 'tr')
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i,
                                       'img',
                                       attrs={'alt': 'language'},
                                       req='src')) for i in r]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].attrs['src'])
                 for i in r if i[0] and i[1]]
            r = [(i[0], i[1], re.findall('.+?(\d+)\.', i[2])) for i in r]
            r = [(i[0], i[1], i[2][0] if len(i[2]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]))  # german > german/subbed
            r = [i[0] for i in r if i[2] in l][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #15
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0] + ' ' + year)))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r,
                                     'figure',
                                     attrs={'class': 'pretty-figure'})
            r = dom_parser.parse_dom(r, 'figcaption')

            for i in r:
                title = client.replaceHTMLCodes(i[0]['title'])
                title = cleantitle.get(title)

                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
    def __search_movie(self, imdb, year):
        try:
            query = urlparse.urljoin(self.base_link, self.search_link % imdb)

            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'ml-item-content'})
            r = [(dom_parser.parse_dom(i,
                                       'a',
                                       attrs={'class': 'ml-image'},
                                       req='href'),
                  dom_parser.parse_dom(i, 'ul', attrs={'class':
                                                       'item-params'}))
                 for i in r]
            r = [(i[0][0].attrs['href'],
                  re.findall('calendar.+?>.+?(\d{4})',
                             ''.join([x.content for x in i[1]]))) for i in r
                 if i[0] and i[1]]
            r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[1]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if i[1] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
            def __get_correct_link(_url, content, checkval):
                try:
                    if not _url:
                        return

                    _url = urlparse.urljoin(self.base_link, _url)
                    r = client.request(_url)

                    r = re.findall('<h4>%s[^>]*</h4>(.*?)<div' % content, r,
                                   re.DOTALL | re.IGNORECASE)[0]
                    r = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(r))
                    r = [(dom_parser.parse_dom(i, 'a', req='href'),
                          dom_parser.parse_dom(i, 'span')) for i in r]
                    r = [(i[0][0].attrs['href'], i[1][0].content) for i in r
                         if i[0] and i[1]]
                    r = [(i[0], i[1] if i[1] else '0') for i in r]
                    r = [i[0] for i in r if int(i[1]) == int(checkval)][0]
                    r = re.sub('/(2160p|1080p|720p|x264|3d)',
                               '',
                               r,
                               flags=re.I)

                    return source_utils.strip_domain(r)
                except:
                    return
Example #18
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (quote_plus(cleantitle.query(
             titles[0])))
         query = urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = client.request(query)
         r = dom_parser.parse_dom(r, 'div', attrs={'id': 'main'})
         r = dom_parser.parse_dom(r, 'div', attrs={'class': 'panel-body'})
         r = [
             (dom_parser.parse_dom(i.content,
                                   'h4',
                                   attrs={'class': 'title-list'}),
              dom_parser.parse_dom(i.content,
                                   'a',
                                   attrs={'href':
                                          re.compile('.*/year/.*')}))
             for i in r
         ]
         r = [(dom_parser.parse_dom(i[0][0].content, 'a', req='href'),
               i[1][0].content if i[1] else '0') for i in r if i[0]]
         r = [(i[0][0].attrs['href'], i[0][0].content,
               re.sub('<.+?>|</.+?>', '', i[1])) for i in r
              if i[0] and i[1]]
         r = [(i[0], i[1], i[2].strip()) for i in r if i[2]]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r if cleantitle.get(i[1]) in t and i[2] == year
         ][0]
         return source_utils.strip_domain(r)
     except:
         return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            clean_title = cleantitle.geturl(url['tvshowtitle']) + '+s%02d' % int(season)
            search_url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title.replace('-', '+'),
                                                                               url['year'])))
            search_results = self.scraper.get(search_url, headers={'referer': self.base_link}).content

            not_found = dom_parser.parse_dom(search_results, 'div', {'class': 'not-found'})
            if len(not_found) > 0:
                return

            links = client.parseDOM(search_results, "a", ret="href", attrs={"class": "ml-mask jt"})
            results = []
            for link in links:
                if '%ss%02d' % (cleantitle.get(url['tvshowtitle']), int(season)) in cleantitle.get(link):
                    link_results = self.scraper.get(link, headers={'referer': search_url}).content
                    r2 = dom_parser.parse_dom(link_results, 'div', {'id': 'ip_episode'})
                    r3 = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r2 if i]
                    for i in r3[0]:
                        if i.content == 'Episode %s' % episode:
                            results.append(i.attrs['href'])
            return results
        except:
            return
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            tvshowtitle = data['tvshowtitle']
            localtvshowtitle = data['localtvshowtitle']
            aliases = source_utils.aliases_to_array(eval(data['aliases']))

            url = self.__search([localtvshowtitle] + aliases, data['year'],
                                season)
            if not url and tvshowtitle != localtvshowtitle:
                url = self.__search([tvshowtitle] + aliases, data['year'],
                                    season)
            if not url: return

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(
                r, 'ul', attrs={'class': ['list-inline', 'list-film']})
            r = dom_parser.parse_dom(r, 'li')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content) for i in r if i]
            r = [(i[0], i[1] if re.compile("^(\d+)$").match(i[1]) else '0')
                 for i in r]
            r = [i[0] for i in r if int(i[1]) == int(episode)][0]

            return source_utils.strip_domain(r)
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = urllib.urlencode({'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode})
                r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r)
            else:
                r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content
            r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r)
            r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]]
            r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r]

            for url, quli in r:
                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                sources.append({'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)
            r = dom_parser.parse_dom(r,
                                     'td',
                                     attrs={
                                         'data-title-name':
                                         re.compile('Season %02d' %
                                                    int(season))
                                     })
            r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']
            r = client.request(urlparse.urljoin(self.base_link, r))
            r = dom_parser.parse_dom(r,
                                     'td',
                                     attrs={
                                         'data-title-name':
                                         re.compile('Episode %02d' %
                                                    int(episode))
                                     })
            r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']

            return source_utils.strip_domain(r)
        except:
            return
Example #23
0
 def __search(self, titles, year):
     try:
         t = [cleantitle.get(i) for i in set(titles) if i]
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         r = client.request(urljoin(self.base_link, self.search_link),
                            post={'query': cleantitle.query(titles[0])})
         r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
         r = dom_parser.parse_dom(r,
                                  'div',
                                  attrs={'class': 've-screen'},
                                  req='title')
         r = [(dom_parser.parse_dom(i, 'a', req='href'),
               i.attrs['title'].split(' - ')[0]) for i in r]
         r = [(i[0][0].attrs['href'], i[1],
               re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
         r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
               i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
         r = sorted(r, key=lambda i: int(i[2]),
                    reverse=True)  # with year > no year
         r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
         return source_utils.strip_domain(r)
     except:
         return
Example #24
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         if url is None:
             return sources
         r = self.scraper.get(url).content
         qual = re.findall(">(\w+)<\/p", r)
         for i in qual:
             quality, info = source_utils.get_release_quality(i, i)
         r = dom_parser.parse_dom(r, 'div', {'id': 'servers-list'})
         r = [dom_parser.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             url = {
                 'url': i.attrs['href'],
                 'data-film': i.attrs['data-film'],
                 'data-server': i.attrs['data-server'],
                 'data-name': i.attrs['data-name']
             }
             url = urllib.urlencode(url)
             valid, host = source_utils.is_host_valid(i.content, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
    def __search(self, search_link, imdb, titles):
        try:
            query = search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
            r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [
                i.attrs['href'] for i in r
                if i and cleantitle.get(i.content) in t
            ][0]

            url = source_utils.strip_domain(r)

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'href': re.compile('.*/tt\d+.*')},
                                     req='href')
            r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
            r = [i[0] for i in r if i]

            return url if imdb in r else None
        except:
            return
Example #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split('&nbsp;')[0]
            quality, info = source_utils.get_release_quality(quality)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
            r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}),
                  dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
            r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if
                 i[0] and i[1]]

            for hoster, id in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': quality, 'language': 'de',
                                'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id,
                                'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
Example #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
            result = self.scraper.get(url, timeout=10).content

            dom = dom_parser.parse_dom(result, 'a', req='data-video')
            urls = [
                i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video']
                for i in dom]

            for url in urls:
                dom = []
                if 'vidnode.net' in url:
                    result = self.scraper.get(url, timeout=10).content
                    dom = dom_parser.parse_dom(result, 'source', req=['src', 'label'])
                    dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'],
                            i.attrs['label']) for i in dom if i]
                elif 'ocloud.stream' in url:
                    result = self.scraper.get(url, timeout=10).content
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                    dom = [(i.attrs['href'].replace('./embed', base + 'embed'), i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
                if dom:
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                            for x in urls:
                                if direct: size = source_utils.get_size(x['url'])
                                if size:
                                    sources.append(
                                        {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                         'direct': direct, 'debridonly': False, 'info': size})
                                else:
                                    sources.append(
                                        {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                         'direct': direct, 'debridonly': False})
                    except:
                        pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append(
                            {'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                             'debridonly': False})
                    except:
                        pass
            return sources
        except:
            return sources
Example #28
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         query = urlparse.urljoin(self.base_link, url)
         r = client.request(query)
         r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
         r = dom_parser.parse_dom(r, 'tbody')
         r = dom_parser.parse_dom(r, 'tr')
         for i in r:
             if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
                 hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)',
                                    i[1]).group().strip()
                 link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
                 rel = re.search(
                     '(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)',
                     i[1]).group().strip()
                 quality, info = source_utils.get_release_quality(rel)
                 if not quality:
                     quality = 'SD'
                 valid, hoster = source_utils.is_host_valid(
                     hoster, hostDict)
                 if not valid: continue
                 sources.append({
                     'source': hoster,
                     'quality': quality,
                     'language': 'de',
                     'url': link,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

            for i in r:
                title = i[1]
                if re.search('\*(?:.*?)\*', title) is not None:
                    title = re.sub('\*(?:.*?)\*', '', title)
                title = cleantitle.get(title)
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
                else:
                    return
        except:
            return
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         url = urlparse.urljoin(self.base_link, data.get('url'))
         season = data.get('season')
         episode = data.get('episode')
         r = client.request(url)
         if season and episode:
             r = dom_parser.parse_dom(r,
                                      'select',
                                      attrs={'id': 'SeasonSelection'},
                                      req='rel')[0]
             r = client.replaceHTMLCodes(r.attrs['rel'])[1:]
             r = urlparse.parse_qs(r)
             r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r])
             r = urlparse.urljoin(
                 self.base_link, self.get_links_epi %
                 (r['Addr'], r['SeriesID'], season, episode))
             r = client.request(r)
         r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'HosterList'})[0]
         r = dom_parser.parse_dom(r,
                                  'li',
                                  attrs={'id': re.compile('Hoster_\d+')},
                                  req='rel')
         r = [(client.replaceHTMLCodes(i.attrs['rel']), i.content)
              for i in r if i[0] and i[1]]
         r = [(i[0],
               re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1]))
              for i in r]
         r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2])
              for i in r if len(i[1]) > 0]
         for link, hoster, mirrors in r:
             valid, hoster = source_utils.is_host_valid(hoster, hostDict)
             if not valid: continue
             u = urlparse.parse_qs('&id=%s' % link)
             u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u])
             for x in range(0, int(mirrors)):
                 url = self.mirror_link % (u['id'], u['Hoster'], x + 1)
                 if season and episode:
                     url += "&Season=%s&Episode=%s" % (season, episode)
                 try:
                     sources.append({
                         'source': hoster,
                         'quality': 'SD',
                         'language': 'de',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except:
         return sources