Esempio n. 1
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url: return
         sep = '%dx%02d' % (int(season), int(episode))
         r = client.request(url)
         if 'To proceed, you must allow popups' in r:
             for i in range(0, 5):
                 r = client.request(url)
                 if 'To proceed, you must allow popups' not in r: break
         r = dom_parser2.parse_dom(r, 'span', attrs={'class': 'list'})
         r1 = dom_parser2.parse_dom(r, 'br')
         r1 = [dom_parser2.parse_dom(i, 'a', req='href') for i in r1]
         try:
             if int(season) == 1 and int(episode) == 1:
                 url = dom_parser2.parse_dom(r, 'a', req='href')[1].attrs['href']
             else:
                 for i in r1:
                     if sep in i[0].content:
                         url = urlparse.urljoin(self.base_link, i[0].attrs['href'])
         except:
             pass
         url = url[:-1]
         url = url.split('?v=')[1]
         url = self.list_url % url
         return url
     except:
         return
Esempio n. 2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            r = client.request(url)
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']
                }
                url = urllib.urlencode(url)
                sources.append({
                    'source': i.content,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
Esempio n. 3
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
                r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                      dom_parser2.parse_dom(i, 'div', attrs={'class': 'status'})[0]) for i in r if i]
                r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0],
                      re.findall('(\d+)', i[1].content)[0]) for i in r if i]
                r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                     if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(
                        int(season)))]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Esempio n. 4
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('/(\w+).html', i[0].attrs['href'])) for i in r
              if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except Exception:
         return
Esempio n. 5
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
         r = cache.get(client.request, 1, search_url)
         r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
         r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
               re.findall('status-year">(\d{4})</div', i.content, re.DOTALL)[0]) for i in r if i]
         r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], i[1]) for i in r if
              i]
         r = [(i[0], i[1], i[2]) for i in r if (cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year)]
         url = r[0][0]
         return url
     except Exception:
         return
Esempio n. 6
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
            r = client.request(search_url)
            r = client.parseDOM(r, 'div', {'class': 'result-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                  client.parseDOM(i, 'img', ret='alt')[0],
                  dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
            r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if
                 (cleantitle.get(i[1]) == cleantitle.get(tvshowtitle) and i[2][0].content == year)]
            url = source_utils.strip_domain(r[0][0])

            return url
        except:
            return
Esempio n. 7
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except:
         return
Esempio n. 8
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'})
         r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}), \
               dom_parser2.parse_dom(i, 'div', {'class': 'episode'}), \
               dom_parser2.parse_dom(i, 'a', req='href')) \
              for i in r if i]
         r = [(i[2][0].attrs['href']) for i in r if i[0][0].content == 'Season %01d' % int(season) \
              and i[1][0].content == 'Episode %01d' % int(episode)]
         if r:
             return r[0]
         else:
             return
     except:
         return
Esempio n. 9
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title).replace('-', '+')
            url = urlparse.urljoin(self.base_link, (self.search_link % clean_title))
            r = client.request(url)

            r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
            r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
            r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1]) for i in r if i[1] == year]
            if r[0]:
                url = r[0][0]
                return url
            else:
                return
        except Exception:
            return
Esempio n. 10
0
	def _get_episode_url(self, show_url, season, episode):
		episode_pattern = 'href="([^"]+/season/%s/episode/%s/?)"' % (season, episode)
		title_pattern = 'href="(?P<url>[^"]+)"[^>]+title="(?:S\d+\s*E\d+:\s*)?(?P<title>[^"]+)'
		headers = {'Referer': urlparse.urljoin(self.base_url, show_url)}
		season_url = urlparse.urljoin(show_url, '/season/%s' % (season))
		season_url = urlparse.urljoin(self.base_url, season_url)
		html = self._http_get(season_url, headers=headers, cache_limit=2)
		fragment = dom_parser2.parse_dom(html, 'div', {'id': 'episodes'})
		
		return self._default_get_episode_url(fragment, video, episode_pattern, title_pattern)
Esempio n. 11
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = self.search_link % (clean_title.replace('-', '+'), year)
            r = client.request(search_url)

            if 'To proceed, you must allow popups' in r:
                for i in range(0, 5):
                    r = client.request(search_url)
                    if 'To proceed, you must allow popups' not in r: break
            r = dom_parser2.parse_dom(r, 'div', attrs={'class': 'title'})

            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0].attrs['href'])) for i in r if
                 tvshowtitle.lower() in i[0].content.lower() and year in i[0].content]
            url = r[0]
            return url
        except:
            return
Esempio n. 12
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(
             r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Esempio n. 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
                  dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
                 for i in r if i]
            r = [(i[0][0].attrs['href'], i[0][0].content,
                  i[1][0].content if i[1] else 'None') for i in r]
            for i in r:
                try:
                    url = i[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    valid, host = source_utils.is_host_valid(i[1], hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    info = []
                    quality, info = source_utils.get_release_quality(
                        i[2], i[2])

                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 14
0
	def __get_search_url(self):
		search_url = SEARCH_URL
		u = search_url[-10:]
		html = self._http_get(self.base_url, cache_limit=24)
		for attrs, _content in dom_parser2.parse_dom(html, 'script', {'type': 'text/javascript'}, req='src'):
			script = attrs['src']
			if 'flixanity' not in script: continue
			html = self._http_get(script, cache_limit=24)
			if 'autocomplete' not in html: continue
			
			r = re.search('r\s*=\s*"([^"]+)', html)
			n = re.search('n\s*=\s*"([^"]+)', html)
			u = re.search('u\s*=\s*"([^"]+)', html)
			if r and n and u:
				u = u.group(1)
				search_url = r.group(1) + n.group(1)[8:16] + u
				break
		return search_url, u
Esempio n. 15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            t = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            query = cleantitle.getsearch(t)
            r = urlparse.urljoin(self.base_link, self.search_link2)
            post = {'query': query}
            r = client.request(r, post=post)
            r = dom_parser2.parse_dom(r, 'a')
            r = [(i.attrs['href'], dom_parser2.parse_dom(i.content, 'span', attrs={'class': 'searchheading'})) for i in
                 r]
            try:
                url = []
                for i in r:
                    t1 = i[1][0].content
                    t2 = re.sub('[Ss]eason\s*\d+', '', t1)
                    if not str(int(season)) in t1: continue
                    if cleantitle.get(t) == cleantitle.get(t2) and not 'pack' in i[0]:
                        url.append(i[0])
                    if len(url) > 1:
                        url = [(i) for i in url if 'hd' in i][0]
                    else:
                        url = url[0]

            except:
                pass
            if len(url) < 0:
                try:
                    r = urlparse.urljoin(self.base_link, self.search_link)
                    t = '%s season %s' % (t, season)
                    post = 'do=search&subaction=search&story=%s' % urllib.quote_plus(cleantitle.getsearch(t))
                    r = client.request(r, post=post)
                    r = dom_parser2.parse_dom(r, 'h4')
                    r = [dom_parser2.parse_dom(i.content, 'a', req=['href']) for i in r if i]
                    r = [(i[0].attrs['href'], i[0].content) for i in r if i]
                    r = [(i[0], i[1]) for i in r if t.lower() == i[1].replace(' -', '').lower()]
                    r = [(i[0]) for i in r if not 'pack' in i[0]]
                    url = r[0][0]

                except:
                    pass

            links = []

            r = client.request(url)
            name = re.findall('<b>Release Name :.+?">(.+?)</span>', r, re.DOTALL)[0]
            link = client.parseDOM(r, 'span', attrs={'class': 'downloads nobr'})
            link = [(re.findall('<a href="(.+?)"\s*target="_blank">[Ee]pisode\s*(\d+)</a>', i, re.DOTALL)) for i in
                    link]
            for item in link:
                link = [(i[0], i[1]) for i in item if i[1] == str(episode)]
                links.append(link[0][0])

            quality, info = source_utils.get_release_quality(name, None)

            for url in links:
                try:
                    if "protect" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">', redirect)
                        url = url[0]

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': True})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 16
0
    def sources(self, url, hostDict, hostprDict):

        self._sources = []
        try:
            if not url: return self._sources

            self.hostDict = hostDict
            self.hostprDict = hostprDict

            referer = url

            html = client.request(url)
            if 'To proceed, you must allow popups' in html:
                for i in range(0, 5):
                    html = client.request(url)
                    if 'To proceed, you must allow popups' not in html: break
            match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)

            secret = ''.join(match.groups(''))
            match = re.search('"&t=([^"]+)', html)
            t = match.group(1)
            match = re.search('(?:\s+|,)s\s*=(\d+)', html)
            s_start = int(match.group(1))

            match = re.search('(?:\s+|,)m\s*=(\d+)', html)
            m_start = int(match.group(1))

            threads = []

            for fragment in dom_parser2.parse_dom(html, 'div', {'class': 'ripdiv'}):
                match = re.match('<b>(.*?)</b>', fragment.content)
                if match:
                    q_str = match.group(1).replace(' ', '').upper()
                    if '1080' in q_str:
                        quality = '1080p'
                    elif '720' in q_str:
                        quality = '720p'
                    elif '4k' in q_str.lower():
                        quality = '4K'
                    else:
                        quality = 'SD'
                else:
                    quality = 'SD'

                pattern = '''onclick='go\((\d+)\)'>([^<]+)(<span.*?)</a>'''
                for match in re.finditer(pattern, fragment.content):
                    link_id, label, host_fragment = match.groups()
                    s = s_start + random.randint(3, 1000)
                    m = m_start + random.randint(21, 1000)
                    post = self.post % (link_id, s, m, secret, t)
                    url = urlparse.urljoin(self.base_link,
                                           'membersonly/components/com_iceplayer/video.phpAjaxResp.php?s=%s&t=%s' % (
                                               link_id, t))

                    threads.append(workers.Thread(self._get_sources, url, post, host_fragment, quality, referer))

            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except:
            return self._sources
Esempio n. 17
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() == False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \
             'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         headers = {
             'Referer':
             url,
             'User-Agent':
             'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
         }
         r = client.request(url, headers=headers)
         items = dom_parser2.parse_dom(r, 'h2')
         items = [
             dom_parser2.parse_dom(i.content, 'a', req=['href'])
             for i in items
         ]
         items = [(i[0].content, i[0].attrs['href']) for i in items]
         hostDict = hostprDict + hostDict
         for item in items:
             try:
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 headers = {
                     'Referer':
                     url,
                     'User-Agent':
                     'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
                 }
                 r = client.request(item[1], headers=headers)
                 links = dom_parser2.parse_dom(r,
                                               'a',
                                               req=[
                                                   'href',
                                                   'rel',
                                               ])
                 links = [i.attrs['href'] for i in links]
                 for url in links:
                     try:
                         if hdlr in name:
                             fmt = re.sub(
                                 '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                                 '', name.upper())
                             fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                             fmt = [i.lower() for i in fmt]
                             if any(
                                     i.endswith(('subs', 'sub', 'dubbed',
                                                 'dub')) for i in fmt):
                                 raise Exception()
                             if any(i in ['extras'] for i in fmt):
                                 raise Exception()
                             if '2160p' in fmt:
                                 quality = '4K'
                             elif '1080p' in fmt:
                                 quality = '1080p'
                             elif '720p' in fmt:
                                 quality = '720p'
                             else:
                                 quality = 'SD'
                             if any(i in ['dvdscr', 'r5', 'r6']
                                    for i in fmt):
                                 quality = 'SCR'
                             elif any(i in [
                                     'camrip', 'tsrip', 'hdcam', 'hdts',
                                     'dvdcam', 'dvdts', 'cam', 'telesync',
                                     'ts'
                             ] for i in fmt):
                                 quality = 'CAM'
                             info = []
                             if '3d' in fmt: info.append('3D')
                             try:
                                 size = re.findall(
                                     '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                                     name[2])[-1]
                                 div = 1 if size.endswith(
                                     ('GB', 'GiB')) else 1024
                                 size = float(
                                     re.sub('[^0-9|/.|/,]', '', size)) / div
                                 size = '%.2f GB' % size
                                 info.append(size)
                             except:
                                 pass
                             if any(i in ['hevc', 'h265', 'x265']
                                    for i in fmt):
                                 info.append('HEVC')
                             info = ' | '.join(info)
                             if not any(x in url
                                        for x in ['.rar', '.zip', '.iso']):
                                 url = client.replaceHTMLCodes(url)
                                 url = url.encode('utf-8')
                                 host = \
                                     re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                                 if host in hostDict:
                                     host = client.replaceHTMLCodes(host)
                                     host = host.encode('utf-8')
                                     sources.append({
                                         'source': host,
                                         'quality': quality,
                                         'language': 'en',
                                         'url': url,
                                         'info': info,
                                         'direct': False,
                                         'debridonly': True
                                     })
                     except:
                         pass
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check: sources = check
         return sources
     except:
         return
Esempio n. 18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not debrid.status(): raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'h2')
            r = [re.findall('''<a.+?href=["']([^"']+)["']>(.+?)</a>''', i, re.DOTALL) for i in r]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(urlparse.urljoin(self.base_link, item[0][0]))
                    data = dom_parser2.parse_dom(data, 'a', attrs={'target': '_blank'})
                    u = [(t, i.content) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if not url.startswith('http'): continue
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            print('TVRelease - Exception: \n' + str(failure))
            return sources
Esempio n. 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'
                            # if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': False
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources