Exemplo n.º 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostDict + hostprDict
         r = cfScraper.get(url).content
         match = re.compile(
             '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>',
             re.DOTALL | re.M).findall(r)
         for link in match:
             if "/show/" in url:
                 surl = "http://www.tvmovieflix.com/e/" + link
             else:
                 surl = "http://www.tvmovieflix.com/m/" + link
             i = cfScraper.get(surl).content
             match = re.compile('<IFRAME.+?SRC="(.+?)"',
                                re.DOTALL | re.IGNORECASE).findall(i)
             for link in match:
                 if "realtalksociety.com" in link:
                     r = requests.get(link).content
                     match = re.compile(
                         '<source src="(.+?)" type="video/mp4">',
                         re.DOTALL | re.IGNORECASE).findall(r)
                     for url in match:
                         valid, host = source_utils.is_host_valid(
                             url, hostDict)
                         quality, info = source_utils.get_release_quality(
                             url, url)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     valid, host = source_utils.is_host_valid(
                         link, hostDict)
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': link,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Exemplo n.º 2
0
    def _get_sources(self, item):
        try:
            name = item[0]
            quality, info = source_utils.get_release_quality(name, item[1])
            info.insert(0, item[2])
            data = cfScraper.get(item[1]).content
            data = ensure_text(data, errors='replace')
            data = client.parseDOM(data, 'a', ret='href')
            url = [i for i in data if 'magnet:' in i][0]
            url = url.split('&tr')[0]
            info = ' | '.join(info)

            self._sources.append({
                'source': 'Torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': item[3],
                'name': name
            })
        except:
            log_utils.log('1337x_exc1', 1)
            pass
Exemplo n.º 3
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         hostDict = hostprDict + hostDict
         #headers = {'Referer': url}
         r = cfScraper.get(url).content
         u = client.parseDOM(r,
                             "span",
                             attrs={"class": "movie_version_link"})
         for t in u:
             match = client.parseDOM(t, 'a', ret='data-href')
             for url in match:
                 if url in str(sources):
                     continue
                 quality, info = source_utils.get_release_quality(url, url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Exemplo n.º 4
0
    def _get_items(self, r):
        try:
            size = re.search(r'<size>([\d]+)</size>', r).groups()[0]
            seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0]
            _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>', r).groups()[0]
            name = re.search(r'<title>(.+?)</title>', r).groups()[0]
            url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(), quote_plus(name))
            url = url.split('&tr')[0]
            t = name.split(self.hdlr)[0]

            try:
                y = re.findall(r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
            except BaseException:
                y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()

            quality, info = source_utils.get_release_quality(name, url)

            try:
                div = 1000 ** 3
                dsize = float(size) / div
                isize = '%.2f GB' % dsize
            except BaseException:
                dsize, isize = 0.0, ''

            info.insert(0, isize)

            info = ' | '.join(info)

            if cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title):
                if y == self.hdlr:
                    self._sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})

        except BaseException:
            pass
Exemplo n.º 5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources
            hostDict = hostprDict + hostDict
            # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            r = cfScraper.get(url).content
            qual = re.compile('<span class="calidad2">(.+?)</span>').findall(r)
            for qcheck in qual:
                quality, info = source_utils.get_release_quality(
                    qcheck, qcheck)

            links = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(r)

            for link in links:

                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid:
                    continue
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Movie4kis - Exception: \n' + str(failure))
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         hostDict = hostprDict + hostDict
         sources = []
         if url == None:
             return sources
         page = client.request(url)
         links = re.compile('<a rel="nofollow" target="blank" href="(.+?)"',
                            re.DOTALL).findall(page)
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             valid, host = source_utils.is_host_valid(link, hostDict)
             if valid:
                 quality, info = source_utils.get_release_quality(
                     link, link)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return sources
Exemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostprDict + hostDict
         r = client.request(url)
         r = re.compile(
             'class="watch-button" data-actuallink="(.+?)"').findall(r)
         for url in r:
             if url in str(sources):
                 continue
             quality, info = source_utils.get_release_quality(url, url)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         log_utils.log('Watchepisodes4 Exception', 1)
         return sources
Exemplo n.º 8
0
	def get_sources(self, url):
		try:
			r = client.request(url)
			if not r:
				return
			div = client.parseDOM(r, 'div', attrs={'id': 'div2child'})

			for row in div:
				row = client.parseDOM(r, 'div', attrs={'class': 'resultdivbotton'})
				if not row:
					return

				for post in row:
					hash = re.findall('<div id="hideinfohash.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = re.findall('<div id="hidename.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = unquote_plus(name)
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name, self.episode_title):
						continue

					url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

					if url in str(self.sources):
						continue

					if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
						continue

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if self.episode_title:
						if not source_utils.filter_single_episodes(self.hdlr, name):
							continue

					try:
						seeders = int(re.findall('<div class="resultdivbottonseed">([0-9]+|[0-9]+,[0-9]+)<', post, re.DOTALL)[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('<div class="resultdivbottonlength">(.+?)<', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
													'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('IDOPE')
			pass
Exemplo n.º 9
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
Exemplo n.º 10
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         title = cleantitle.get_query(title)
         query = '%s S%02dE%02d' % (
             title, int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else '%s' % data['imdb']
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         token = cfScraper.get(self.token).content
         token = json.loads(token)["token"]
         if 'tvshowtitle' in data:
             search_link = self.tvsearch.format(token, quote_plus(query),
                                                'format=json_extended')
         else:
             search_link = self.msearch.format(token, data['imdb'],
                                               'format=json_extended')
         control.sleep(250)
         rjson = cfScraper.get(search_link).content
         rjson = ensure_text(rjson, errors='ignore')
         files = json.loads(rjson)['torrent_results']
         for file in files:
             name = file["title"]
             url = file["download"]
             url = url.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, url)
             try:
                 dsize = float(file["size"]) / 1073741824
                 isize = '%.2f GB' % dsize
             except:
                 dsize, isize = 0.0, ''
             info.insert(0, isize)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True,
                 'size': dsize,
                 'name': name
             })
         return sources
     except:
         log_utils.log('torapi - Exception', 1)
         return sources
Exemplo n.º 11
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query).lower()

            url = urljoin(self.base_link, self.search_link % query)

            #r = client.request(url)
            #r = requests.get(url).content
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').replace('&nbsp;', ' ')
            r = client.parseDOM(r, 'div', attrs={'class': 'col s12'})
            posts = client.parseDOM(r, 'div')[1:]
            posts = [i for i in posts if 'magnet/' in i]
            for post in posts:

                links = client.parseDOM(post, 'a', ret='href')[0]
                url = 'magnet:?xt=urn:btih:' + links.lstrip('magnet/')
                try:
                    name = client.parseDOM(post, 'a', ret='title')[0]
                    if not query in cleantitle.get_title(name): continue
                except:
                    name = ''

                quality, info = source_utils.get_release_quality(name, name)
                try:
                    size = re.findall(r'<b class="cpill .+?-pill">(.+?)</b>', post)[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                'direct': False, 'debridonly': True, 'size': dsize, 'name': name})

            return sources
        except:
            log_utils.log('bt4g3 - Exception', 1)
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
Exemplo n.º 13
0
    def get_sources(self, link):
        try:
            url = '%s%s' % (self.base_link, link)
            result = client.request(url)

            info_hash = re.findall('<kbd>(.+?)<', result, re.DOTALL)[0]
            url1 = '%s%s' % ('magnet:?xt=urn:btih:', info_hash)
            name = re.findall('<h3 class="card-title">(.+?)<', result,
                              re.DOTALL)[0]
            name = urllib.unquote_plus(name).replace(' ', '.')
            url = '%s%s%s' % (url1, '&dn=', str(name))

            t = name.split(self.hdlr)[0].replace(self.year, '').replace(
                '(', '').replace(')', '').replace('&', 'and').replace(
                    '.US.', '.').replace('.us.', '.')
            if cleantitle.get(t) != cleantitle.get(self.title):
                return

            if self.hdlr not in name:
                return

            size = re.findall(
                '<div class="col-3">File size:</div><div class="col">(.+?)<',
                result, re.DOTALL)[0]
            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                    size)[0]
                dsize, isize = utils._size(size)
            except:
                dsize, isize = 0, ''

            info.insert(0, isize)

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': dsize
            })

        except:
            pass
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s-s%02de%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            try:
                data = re.compile(
                    '<a href="(.+?)" target="_blank" rel="nofollow" title.+?'
                ).findall(r)
                for url in data:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        quality, info = source_utils.get_release_quality(
                            url, url)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
            except:
                log_utils.log('projectfree2 - Exception', 1)
                pass
            return sources
        except:
            log_utils.log('projectfree3 - Exception', 1)
            return sources
Exemplo n.º 15
0
	def get_sources(self, row):
		try:
			if 'magnet' not in row:
				return

			url = re.findall('href="(magnet:.+?)"', row, re.DOTALL)[0]
			url = unquote_plus(url).split('&tr')[0].replace('&amp;', '&').replace(' ', '.')
			if url in str(self.sources):
				return

			hash = re.compile('btih:(.*?)&').findall(url)[0]

			name = url.split('&dn=')[1]
			name = source_utils.clean_name(self.title, name)
			if source_utils.remove_lang(name, self.episode_title):
				return

			if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
				return

			# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
			if self.episode_title:
				if not source_utils.filter_single_episodes(self.hdlr, name):
					return

			try:
				seeders = int(re.findall('<span style="color:#008000"><strong>\s*([0-9]+)\s*</strong>', row, re.DOTALL)[0].replace(',', ''))
				if self.min_seeders > seeders: 
					return
			except:
				seeders = 0
				pass

			quality, info = source_utils.get_release_quality(name, url)

			try:
				size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL)[0]
				dsize, isize = source_utils._size(size)
				info.insert(0, isize)
			except:
				dsize = 0
				pass

			info = ' | '.join(info)

			self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('MAGNET4YOU')
			pass
Exemplo n.º 16
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                 self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
             # r = client.request(url, headers=headers, timeout='10', output='geturl')
             r = cfScraper.get(url).content
             if url is None:
                 url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases, headers)
             if url is None:
                 url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
         if url is None:
             raise Exception()
         # r = client.request(url, headers=headers, timeout='10')
         r = cfScraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             if '123movieshd' in link or 'seriesonline' in link:
                 # r = client.request(link, headers=headers, timeout='10')
                 r = cfScraper.get(link).content
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
             else:
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(link, link)
                     if 'load.php' not in link:
                         sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote_plus(query))
                url = urljoin(self.base_link, url)

            else:
                url = self.moviesearch.format(quote_plus(query))
                url = urljoin(self.base_link, url)

            items = self._get_items(url)

            hostDict = hostDict + hostprDict
            for item in items:
                try:
                    name = item[0]
                    url = item[1]
                    url = url.split('&tr')[0]
                    quality, info = source_utils.get_release_quality(name, url)
                    info.insert(0, item[2])
                    info = ' | '.join(info)

                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True, 'size': item[3], 'name': name})
                except:
                    log_utils.log('glodls0_exc', 1)
                    pass

            return sources
        except:
            log_utils.log('glodls1_exc', 1)
            return sources
Exemplo n.º 18
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         html = cfScraper.get(url).content
         links = re.compile('id="linkplayer.+?href="(.+?)"',re.DOTALL).findall(html)
         for link in links:
             quality, info = source_utils.get_release_quality(link, link)
             host = link.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             valid, host = source_utils.is_host_valid(host, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Exemplo n.º 19
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else '%s' % data['imdb']
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         token = client.request(self.token)
         token = json.loads(token)["token"]
         if 'tvshowtitle' in data:
             search_link = self.tvsearch.format(token,
                                                urllib.quote_plus(query),
                                                'format=json_extended')
         else:
             search_link = self.msearch.format(token, data['imdb'],
                                               'format=json_extended')
         rjson = client.request(search_link)
         files = json.loads(rjson)['torrent_results']
         for file in files:
             name = file["title"]
             url = file["download"]
             url = url.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, url)
             dsize = float(file["size"]) / 1073741824
             isize = '%.2f GB' % dsize
             #size = source_utils.convert_size(file["size"])
             #size = '[B]%s[/B]' % size
             info.insert(0, isize)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True,
                 'size': dsize
             })
         return sources
     except BaseException:
         return sources
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall(
                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.append(size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower()
                       for x in ['.rar.', '.zip.', '.iso.']) or any(
                           url.lower().endswith(x)
                           for x in ['.rar', '.zip', '.iso']):
                    raise Exception()

                if any(x in url.lower()
                       for x in ['youtube', 'sample', 'trailer']):
                    raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info2 = source_utils.get_release_quality(title, url)
                if url in str(self._sources): continue

                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
        except Exception:
            pass
Exemplo n.º 21
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         q = '%s' % cleantitle.get_gan_url(data['title'])
         url = self.base_link + self.search_link % q
         r = cfScraper.get(url).content
         v = re.compile(
             '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>'
         ).findall(r)
         for url, check, qual in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t in check:
                 key = url.split('-hd')[1]
                 url = 'https://fmovies.tw/moviedownload.php?q=%s' % key
                 r = cfScraper.get(url).content
                 r = re.compile(
                     '<a rel=".+?" href="(.+?)" target=".+?">').findall(r)
                 for url in r:
                     if any(x in url for x in ['.rar']): continue
                     quality, info = source_utils.get_release_quality(
                         qual, url)
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         info = ' | '.join(info)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'url': url,
                             'info': info,
                             'direct': False,
                             'debridonly': False
                         })
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('---Ganool Testing - Exception: \n' + str(failure))
         return sources
Exemplo n.º 22
0
 def _get_sources(self, item):
     try:
         name = item[0]
         data = client.request(item[1])
         url = re.search('''href=["'](magnet:\?[^"']+)''', data).groups()[0]
         url = url.split('&tr')[0]
         quality, info = source_utils.get_release_quality(name, url)
         info.insert(0, item[2])
         info = ' | '.join(info)
         self._sources.append({
             'source': 'Torrent',
             'quality': quality,
             'language': 'en',
             'url': url,
             'info': info,
             'direct': False,
             'debridonly': True,
             'size': item[3]
         })
     except BaseException:
         pass
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None:
             return sources
         hostDict = hostDict + hostprDict
         sourcePage = ensure_text(cfScraper.get(url).content,
                                  errors='replace')
         thesources = re.compile('<tbody>(.+?)</tbody>',
                                 re.DOTALL).findall(sourcePage)[0]
         links = re.compile(
             "<a href=\'(.+?)\' target=\'_blank\'>Download</a>",
             re.DOTALL).findall(thesources)
         for link in links:
             linkPage = ensure_text(cfScraper.get(link).content,
                                    errors='replace')
             vlink = re.compile(
                 '<a id="link" rel="nofollow" href="(.+?)" class="btn"',
                 re.DOTALL).findall(linkPage)
             for zlink in vlink:
                 valid, host = source_utils.is_host_valid(zlink, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(
                         zlink, zlink)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': zlink,
                         'info': info,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Exemplo n.º 24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)
            if '<tbody' not in r:
                return sources

            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')

        except:
            source_utils.scraper_error('SKYTORRENTS')
            return sources

        for post in posts:
            try:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                link = re.findall(
                    'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>',
                    post, re.DOTALL)

                for url, seeders, in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    if url in str(self.sources):
                        return

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title, aliases, name, hdlr,
                                                    data['year']):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if episode_title:
                        if not source_utils.filter_single_episodes(hdlr, name):
                            continue

                    try:
                        seeders = int(seeders)
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
            except:
                source_utils.scraper_error('SKYTORRENTS')
                return sources
        return sources
Exemplo n.º 25
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            r = client.request(link)
            if not r:
                return
            if '<tbody' not in r:
                return
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
        except:
            source_utils.scraper_error('SKYTORRENTS')
            return

        for post in posts:
            try:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                link = re.findall(
                    'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>',
                    post, re.DOTALL)

                for url, seeders, in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    if url in str(self.sources):
                        return

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name):
                        continue

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    try:
                        seeders = int(seeders)
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
            except:
                source_utils.scraper_error('SKYTORRENTS')
                pass
Exemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        scraper = cfscrape.create_scraper()
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', title)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            try:
                r = scraper.get(url).content
                if not r:
                    return sources
                if any(value in str(r) for value in [
                        'No movies found', 'something went wrong',
                        'Connection timed out'
                ]):
                    return sources
                r = json.loads(r)
                id = ''
                for i in r:
                    if i['original_title'] == title and i[
                            'release_date'] == year:
                        id = i['id']
                        break
                if id == '':
                    return sources
                link = '%s%s%s' % (self.base_link, '/movies/torrents?id=', id)

                result = scraper.get(link).content
                if 'magnet' not in result:
                    return sources
                result = re.sub(r'\n', '', result)
                links = re.findall(
                    r'<tr>.*?<a title="Download:\s*(.+?)"href="(magnet:.+?)">.*?title="File Size">\s*(.+?)\s*</td>.*?title="Seeds">([0-9]+|[0-9]+,[0-9]+)\s*<',
                    result)

                for link in links:
                    name = link[0]
                    name = unquote_plus(name)
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title.replace('&', 'and'),
                                                    aliases, name, year, year):
                        continue

                    url = link[1]
                    try:
                        url = unquote_plus(url).decode('utf8').replace(
                            '&amp;', '&').replace(' ', '.')
                    except:
                        url = unquote_plus(url).replace('&amp;',
                                                        '&').replace(' ', '.')
                    url = url.split('&tr')[0]
                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = link[2]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    try:
                        seeders = int(link[3].replace(',', ''))
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                return sources
            except:
                source_utils.scraper_error('MOVIEMAGNET')
                return sources
        except:
            source_utils.scraper_error('MOVIEMAGNET')
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            r = client.request(url)

            posts = re.findall('<h2 class="title">(.+?)</h2>', r,
                               re.IGNORECASE)

            hostDict = hostprDict + hostDict

            urls = []
            for item in posts:

                try:
                    link, name = re.findall('href="(.+?)" title="(.+?)"', item,
                                            re.IGNORECASE)[0]
                    if not cleantitle.get(title) in cleantitle.get(name):
                        continue
                    name = client.replaceHTMLCodes(name)
                    try:
                        _name = name.lower().replace('permalink to', '')
                    except:
                        _name = name

                    quality, info = source_utils.get_release_quality(
                        name, link)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            name)[-1]
                        dsize, isize = source_utils._size(size)
                    except Exception:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    links = self.links(link)
                    urls += [(i, quality, info) for i in links]
                except Exception:
                    pass

            for item in urls:
                if 'earn-money' in item[0]:
                    continue

                if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                    continue
                url = client.replaceHTMLCodes(item[0])
                #url = url.encode('utf-8')
                url = ensure_text(url)

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                host = ensure_text(host)

                sources.append({
                    'source': host,
                    'quality': item[1],
                    'language': 'en',
                    'url': url,
                    'info': item[2],
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
            return sources
        except Exception:
            return sources
Exemplo n.º 28
0
    def get_sources(self, link):
        try:
            url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', link,
                                            re.DOTALL)[0])
            url = unquote_plus(url).split('&tr')[0].replace('&amp;',
                                                            '&').replace(
                                                                ' ', '.')
            url = source_utils.strip_non_ascii_and_unprintable(url)
            if url in str(self.sources):
                return

            hash = re.compile('btih:(.*?)&').findall(url)[0]

            name = url.split('&dn=')[1]
            name = source_utils.clean_name(self.title, name)
            if source_utils.remove_lang(name, self.episode_title):
                return

            if not source_utils.check_title(self.title, self.aliases, name,
                                            self.hdlr, self.year):
                return

            # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
            if self.episode_title:
                if not source_utils.filter_single_episodes(self.hdlr, name):
                    return

            try:
                seeders = int(
                    client.parseDOM(link, 'td',
                                    attrs={'class': 'sy'})[0].replace(',', ''))
                if self.min_seeders > seeders:
                    return
            except:
                seeders = 0
                pass

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                    link)[0]
                dsize, isize = source_utils._size(size)
                info.insert(0, isize)
            except:
                source_utils.scraper_error('EXTRATORRENT')
                dsize = 0
                pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'seeders': seeders,
                'hash': hash,
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': dsize
            })
        except:
            source_utils.scraper_error('EXTRATORRENT')
            pass
Exemplo n.º 29
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('%3A+', '+')

            #r = client.request(url)
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')

            posts = client.parseDOM(r, "div", attrs={"class": "postContent"})
            items = []
            for post in posts:
                try:
                    p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:]
                    for i in p:
                        items.append(i)
                except:
                    pass

            try:
                for item in items:
                    u = client.parseDOM(item, 'a', ret='href')
                    name = re.findall('<strong>(.*?)</strong>', item,
                                      re.DOTALL)[0]
                    name = client.replaceHTMLCodes(name)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): continue
                    for url in u:
                        if any(x in url for x in ['.rar', '.zip', '.iso']):
                            continue
                        quality, info = source_utils.get_release_quality(
                            name, url)
                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))',
                                item, re.DOTALL)[0]
                            dsize, isize = source_utils._size(size)
                        except:
                            dsize, isize = 0.0, ''
                        info.insert(0, isize)
                        info = ' | '.join(info)
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': name
                            })
            except:
                pass
            return sources
        except:
            log_utils.log('max_rls Exception', 1)
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (title, int(data['season']), int(data['episode']))\
                if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urljoin(
                self.base_link,
                self.search_link.format(query[0].lower(),
                                        cleantitle.geturl(query)))

            r = client.request(url)
            r = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(r, 'tr')
            posts = [i for i in posts if 'magnet:' in i]
            for post in posts:
                post = post.replace('&nbsp;', ' ')
                name = client.parseDOM(post, 'a', ret='title')[1]

                t = name.split(hdlr)[0]
                if not cleantitle.get(re.sub(r'(|)', '',
                                             t)) == cleantitle.get(title):
                    continue

                try:
                    y = re.findall(
                        u'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        u'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == hdlr: continue

                links = client.parseDOM(post, 'a', ret='href')
                magnet = [
                    i.replace('&amp;', '&') for i in links if 'magnet:' in i
                ][0]
                url = magnet.split('&tr')[0]

                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('Magnetdl - Exception', 1)
            return sources