示例#1
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			r = client.request(url)
			match = re.compile('<iframe src="(.+?)"').findall(r)
			# log_utils.log('match = %s' % match, log_utils.LOGDEBUG)

			for url in match:
				r = client.request(url)
				if 'playpanda' in url:
					match = re.compile("url: '(.+?)',").findall(r)
				else:
					match = re.compile('file: "(.+?)",').findall(r)
				for url in match:
					url = url.replace('\\', '')
					if url in str(sources):
						continue

					quality, info = source_utils.get_release_quality(url)
					fileType = source_utils.getFileType(url)
					info.append(fileType)
					info = ' | '.join(info) if fileType else info[0]

					sources.append({'source': 'direct', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
					                'direct': False, 'debridonly': False})
			return sources
		except:
			source_utils.scraper_error('TOONGET')
			return sources
示例#2
0
	def _get_sources(self, name, url):
		try:
			headers = {'User-Agent': client.agent()}
			r = self.scraper.get(url, headers=headers).content

			name = client.replaceHTMLCodes(name)
			if name.startswith('['):
				name = name.split(']')[1]
			name = name.strip().replace(' ', '.')

			l = dom_parser.parse_dom(r, 'div', {'class': 'ppu2h'})
			if l == []:
				return
			s = ''
			for i in l:
				s += i.content

			urls = re.findall(r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE|re.DOTALL)
			urls = [i for i in urls if '.rar' not in i or '.zip' not in i or '.iso' not in i or '.idx' not in i or '.sub' not in i]

			for url in urls:
				if url in str(self.sources):
					continue

				valid, host = source_utils.is_host_valid(url, self.hostDict)
				if not valid:
					continue
				host = client.replaceHTMLCodes(host)
				host = host.encode('utf-8')

				quality, info = source_utils.get_release_quality(name, url)

				try:
					size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0]
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except:
					dsize = 0
					pass

				fileType = source_utils.getFileType(name)
				info.append(fileType)
				info = ' | '.join(info) if fileType else info[0]

				self.sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('RAPIDMOVIEZ')
			pass
示例#3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            r = client.request(url)
            posts = client.parseDOM(r, 'h2')

            urls = []
            for item in posts:
                if not item.startswith('<a href'):
                    continue

                try:
                    name = client.parseDOM(item, "a")[0]
                    t = name.split(hdlr)[0].replace(data['year'], '').replace(
                        '(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(
                        name, item[0])

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                            item)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    fileType = source_utils.getFileType(name)
                    info.append(fileType)
                    info = ' | '.join(info) if fileType else info[0]

                    item = client.parseDOM(item, 'a', ret='href')

                    url = item

                    links = self.links(url)
                    if links is None:
                        continue

                    urls += [(i, quality, info) for i in links]

                except:
                    source_utils.scraper_error('300MBFILMS')
                    pass

            for item in urls:
                if 'earn-money' in item[0]:
                    continue
                if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                    continue
                url = client.replaceHTMLCodes(item[0])
                try:
                    url = url.encode('utf-8')
                except:
                    pass

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue

                host = client.replaceHTMLCodes(host)
                try:
                    host = host.encode('utf-8')
                except:
                    pass

                sources.append({
                    'source': host,
                    'quality': item[1],
                    'language': 'en',
                    'url': url,
                    'info': item[2],
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            return sources

        except:
            source_utils.scraper_error('300MBFILMS')
            return sources
示例#4
0
	def sources(self, url, hostDict, hostprDict):
		scraper = cfscrape.create_scraper()
		sources = []
		try:
			if url is None:
				return sources

			if debrid.status() is False:
				return sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			q = '%s' % cleantitle.get_gan_url(data['title'])
			url = self.base_link + self.search_link % q
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = scraper.get(url).content
			v = re.compile('<a href="(.+?)" class="ml-mask jt" title="View(.+?)">\s+<span class=".+?">(.+?)</span>').findall(r)
			t = '%s (%s)' % (data['title'], data['year'])

			for url, name, qual in v:
				if t not in name:
					continue
				item = client.request(url)
				item = client.parseDOM(item, 'div', attrs={'class': 'mvici-left'})[0]
				details = re.compile('<strong>Movie Source.*\s*.*/Person">(.*)</').findall(item)[0]

				name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
				if source_utils.remove_lang(name):
					continue

				key = url.split('-hd')[1]
				r = scraper.get('https://soapgate.online/moviedownload.php?q=' + key).content
				r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r)

				for url in r:
					if any(x in url for x in ['.rar', '.zip', '.iso']):
						continue

					quality, info = source_utils.get_release_quality(qual)

					try:
						size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', item)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					fileType = source_utils.getFileType(details)
					info.append(fileType)
					info = ' | '.join(info) if fileType else info[0]

					valid, host = source_utils.is_host_valid(url, hostDict)
					if not valid:
						continue

					sources.append({'source': host, 'quality': quality, 'info': info, 'language': 'en', 'url': url, 'direct': False,
										'debridonly': True, 'size': dsize})
			return sources
		except:
			source_utils.scraper_error('GANOOL')
			return sources
示例#5
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url is None:
				return sources

			if debrid.status() is False:
				raise Exception()

			hostDict = hostprDict + hostDict

			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

			query = '%s %s' % (title, hdlr)
			query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

			url = self.search_link % urllib.quote_plus(query)
			url = urlparse.urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			html = client.request(url)

			posts = client.parseDOM(html, 'item')

			items = []
			for post in posts:
				try:
					t = client.parseDOM(post, 'title')[0]
					u = client.parseDOM(post, 'a', ret='href')
					s = re.search('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post)
					s = s.groups()[0] if s else '0'
					items += [(t, i, s) for i in u]
				except:
					source_utils.scraper_error('300MBDOWNLOAD')
					pass

			for item in items:
				try:
					name = item[0]
					name = client.replaceHTMLCodes(name)

					t = name.split(self.hdlr)[0].replace(self.year, '').replace('(', '').replace(')', '')
					if cleantitle.get(t) != cleantitle.get(title):
						continue

					if self.hdlr not in name:
						continue

					url = item[1]
					if any(x in url for x in ['.rar', '.zip', '.iso']):
						continue

					url = client.replaceHTMLCodes(url)
					url = url.encode('utf-8')

					if url in str(sources):
						continue

					valid, host = source_utils.is_host_valid(url, hostDict)
					if not valid:
						continue

					host = client.replaceHTMLCodes(host)
					host = host.encode('utf-8')

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1]
						div = 1 if size.endswith(('GB', 'GiB')) else 1024
						size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
						size = '%.2f GB' % size
						info.append(size)
					except:
						pass

					fileType = source_utils.getFileType(name)
					info.append(fileType)
					info = ' | '.join(info) if fileType else info[0]

					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
					                'info': info, 'direct': False, 'debridonly': True})
				except:
					source_utils.scraper_error('300MBDOWNLOAD')
					pass

			return sources
		except:
			source_utils.scraper_error('300MBDOWNLOAD')
			return
示例#6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % urllib.quote_plus(query)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)
            if r is None:
                return sources
            if 'Nothing Found' in r:
                return sources

            r = client.parseDOM(r, 'article')
            r1 = client.parseDOM(r, 'h2')
            r2 = client.parseDOM(r, 'div', attrs={'class': 'entry-excerpt'})

            if 'tvshowtitle' in data:  # fuckers removed file size for episodes
                posts = zip(client.parseDOM(r1, 'a', ret='href'),
                            client.parseDOM(r1, 'a'))
            else:
                posts = zip(
                    client.parseDOM(r1, 'a', ret='href'),
                    client.parseDOM(r1, 'a'),
                    re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                        r2[0]))

            hostDict = hostprDict + hostDict

            items = []
            for post in posts:
                try:
                    base_u = client.request(post[0])

                    if 'tvshowtitle' in data:
                        regex = '<b>(' + title + '.*)</b>'
                        lists = zip(
                            re.findall(regex, base_u),
                            re.findall('<ul>(.+?)</ul>', base_u, re.DOTALL))
                        for links in lists:
                            u = re.findall('\'(http.+?)\'',
                                           links[1]) + re.findall(
                                               '\"(http.+?)\"', links[1])
                            t = links[0]
                            s = 0
                            items += [(t, i, s) for i in u]
                    else:
                        u = re.findall('\'(http.+?)\'', base_u) + re.findall(
                            '\"(http.+?)\"', base_u)
                        u = [i for i in u if '/embed/' not in i]
                        u = [i for i in u if 'youtube' not in i]

                        try:
                            t = post[1].encode('utf-8')
                        except:
                            t = post[1]
                        s = post[2]
                        items += [(t, i, s) for i in u]

                except:
                    source_utils.scraper_error('MYVIDEOLINK')
                    pass

            for item in items:
                try:
                    url = item[1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url.endswith(('.rar', '.zip', '.iso', '.part', '.png',
                                     '.jpg', '.bmp', '.gif')):
                        continue

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        continue

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]

                    name = client.replaceHTMLCodes(name).replace(' ', '.')
                    match = source_utils.check_title(title, name, hdlr,
                                                     data['year'])
                    if not match:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    fileType = source_utils.getFileType(name)
                    info.append(fileType)
                    info = ' | '.join(info) if fileType else info[0]

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                except:
                    source_utils.scraper_error('MYVIDEOLINK')
                    pass

            return sources
        except:
            source_utils.scraper_error('MYVIDEOLINK')
            return sources
示例#7
0
    def _get_sources(self, url, name, hostDict, hostprDict):
        try:
            urls = []
            result = self.scraper.get(url).content
            if 'dbuttn watch' not in result:
                return
            urls = [(client.parseDOM(result,
                                     'a',
                                     ret='href',
                                     attrs={'class': 'dbuttn watch'})[0],
                     client.parseDOM(result,
                                     'a',
                                     ret='href',
                                     attrs={'class': 'dbuttn blue'})[0],
                     client.parseDOM(result,
                                     'a',
                                     ret='href',
                                     attrs={'class': 'dbuttn magnet'})[0])]

            # '''<a class="dbuttn watch" href="https://www.linkomark.xyz/view/EnWNqSNeLw" target="_blank" rel="nofollow noopener">Watch Online Links</a>
            # <a class="dbuttn blue" href="https://www.linkomark.xyz/view/3-Gjyz5Q2R" target="_blank" rel="nofollow noopener">Get Download Links</a>
            # <a class="dbuttn magnet" href="https://torrentbox.site/save/2970fa51e8af52b7e2d1d5fa61a6005777d768ba" target="_blank" rel="nofollow noopener">Magnet Link</a>'''

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                    result)[0]
                dsize, isize = source_utils._size(size)
                info.insert(0, isize)
            except:
                dsize = 0
                pass

            fileType = source_utils.getFileType(name)
            info.append(fileType)
            info = ' | '.join(info) if fileType else info[0]
        except:
            source_utils.scraper_error('MKVHUB')
            return

        for url in urls[0]:
            try:
                r = client.request(url)
                if r is None:
                    continue

                if 'linkomark' in url:
                    p_link = client.parseDOM(r,
                                             'link',
                                             attrs={'rel': 'canonical'},
                                             ret='href')[0]

                    #<input type="hidden" name="_csrf_token_" value=""/>
                    input_name = client.parseDOM(r, 'input', ret='name')[0]
                    input_value = client.parseDOM(r, 'input', ret='value')[0]

                    post = {input_name: input_value}
                    p_data = client.request(p_link, post=post)
                    links = client.parseDOM(p_data,
                                            'a',
                                            ret='href',
                                            attrs={'target': '_blank'})

                    for i in links:
                        valid, host = source_utils.is_host_valid(i, hostDict)
                        if not valid:
                            valid, host = source_utils.is_host_valid(
                                i, hostprDict)
                            if not valid:
                                continue
                            else:
                                rd = True
                        else:
                            rd = False
                        if i in str(self._sources):
                            continue

                        if 'rapidgator' in i:
                            rd = True

                        if rd:
                            self._sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize
                            })
                        else:
                            self._sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'info': info,
                                'direct': False,
                                'debridonly': False,
                                'size': dsize
                            })

                elif 'torrent' in url:
                    data = client.parseDOM(r, 'a', ret='href')
                    url = [i for i in data if 'magnet:' in i][0]
                    url = unquote_plus(url).replace('&amp;',
                                                    '&').replace(' ', '.')
                    url = url.split('&tr')[0]
                    hash = re.compile('btih:(.*?)&').findall(url)[0]
                    name = url.split('&dn=')[1]
                    if '.-.MkvHub' in name:
                        name = name.split('.-.')[0]
                    seeders = 0

                    self._sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
            except:
                source_utils.scraper_error('MKVHUB')
                pass
示例#8
0
    def sources(self, url, hostDict, hostprDict):

        auth = self._get_auth()

        if not auth:
            return

        sources = []

        query = self._query(url)

        url, params = self._translate_search(query)
        headers = {'Authorization': auth}
        response = requests.get(url, params=params, headers=headers).text
        results = json.loads(response)

        down_url = results.get('downURL')
        dl_farm = results.get('dlFarm')
        dl_port = results.get('dlPort')
        files = results.get('data', [])

        for item in files:

            try:

                post_hash, post_title, ext, duration = item['0'], item[
                    '10'], item['11'], item['14']

                checks = [False] * 5
                if 'alangs' in item and item['alangs'] and 'eng' not in item[
                        'alangs']:
                    checks[1] = True
                if re.match('^\d+s', duration) or re.match(
                        '^[0-5]m', duration):
                    checks[2] = True
                if 'passwd' in item and item['passwd']: checks[3] = True
                if 'virus' in item and item['virus']: checks[4] = True
                if 'type' in item and item['type'].upper() != 'VIDEO':
                    checks[5] = True

                if any(checks):
                    continue

                stream_url = down_url + quote(
                    '/%s/%s/%s%s/%s%s' %
                    (dl_farm, dl_port, post_hash, ext, post_title, ext))
                file_name = post_title
                file_dl = stream_url + '|Authorization=%s' % (quote(auth))
                size = float(int(item['rawSize'])) / 1073741824

                quality = source_utils.get_release_quality(file_name)[0]
                info = source_utils.getFileType(file_name)
                info = '%.2f GB | %s | %s' % (
                    size, info, file_name.replace('.', ' ').upper())

                sources.append({
                    'source': 'direct',
                    'quality': quality,
                    'language': "en",
                    'url': file_dl,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })

            except:
                print("Unexpected error in Easynews Script: source",
                      sys.exc_info()[0])
                exc_type, exc_obj, exc_tb = sys.exc_info()
                print(exc_type, exc_tb.tb_lineno)
                pass

        return sources
示例#9
0
文件: furk.py 项目: 17Q/modules4all
	def sources(self, url, hostDict, hostprDict):

		api_key = self.get_api()

		if not api_key:
			return

		sources = []

		try:

			content_type = 'episode' if 'tvshowtitle' in url else 'movie'
			match = 'extended'
			moderated = 'no' if content_type == 'episode' else 'yes'
			search_in = ''
		
			if content_type == 'movie':
				title = cleantitle.normalize(url.get('title'))
				year = url.get('year')
				query = '@name+%s+%s+@files+%s+%s' % (title, year, title, year)

			elif content_type == 'episode':
				title = cleantitle.normalize(url.get('tvshowtitle'))
				season = int(url['season'])
				episode = int(url['episode'])
				seasEpList = self._seas_ep_query_list(season, episode)
				query = '@name+%s+@files+%s+|+%s+|+%s+|+%s+|+%s' % (title, seasEpList[0], seasEpList[1], seasEpList[2], seasEpList[3], seasEpList[4])
			

			s = requests.Session()
			link = self.base_link + self.search_link % \
				   (api_key, query, match, moderated, search_in)

			p = s.get(link)
			p = json.loads(p.text)

			if p['status'] != 'ok':
				return

			files = p['files']

			for i in files:
				
				if i['is_ready'] == '1' and i['type'] == 'video':
					
					try:

						source = 'SINGLE'
						if int(i['files_num_video']) > 3:
							source = 'PACK [B](x%02d)[/B]' % int(i['files_num_video'])
						file_name = i['name']
						file_id = i['id']
						file_dl = i['url_dl']
						size = float(i['size']) / 1073741824
						
						if content_type == 'episode':
							url = json.dumps({'content': 'episode', 'file_id': file_id, 'season': season, 'episode': episode})
						else:
							url = json.dumps({'content': 'movie', 'file_id': file_id, 'title': title, 'year': year})

						quality = source_utils.get_release_quality(file_name, file_dl)[0]
						info = source_utils.getFileType(file_name)
						info = '%.2f GB | %s | %s' % (size, info, file_name.replace('.', ' ').upper())
						sources.append({'source': source,
										'quality': quality,
										'language': "en",
										'url': url,
										'info': info,
										'direct': True,
										'debridonly': False})

					except:
						pass

				else:
					continue

			return sources

		except:
			print("Unexpected error in Furk Script: source", sys.exc_info()[0])
			exc_type, exc_obj, exc_tb = sys.exc_info()
			print(exc_type, exc_tb.tb_lineno)
			pass
示例#10
0
    def _get_sources(self, url, name, hostDict, hostprDict):
        try:
            urls = []
            result = client.request(url)

            urls = [(client.parseDOM(result,
                                     'a',
                                     ret='href',
                                     attrs={'class': 'dbuttn watch'})[0],
                     client.parseDOM(result,
                                     'a',
                                     ret='href',
                                     attrs={'class': 'dbuttn blue'})[0],
                     client.parseDOM(result,
                                     'a',
                                     ret='href',
                                     attrs={'class': 'dbuttn magnet'})[0])]

            # '''<a class="dbuttn watch" href="https://www.linkomark.xyz/view/EnWNqSNeLw" target="_blank" rel="nofollow noopener">Watch Online Links</a>
            # <a class="dbuttn blue" href="https://www.linkomark.xyz/view/3-Gjyz5Q2R" target="_blank" rel="nofollow noopener">Get Download Links</a>
            # <a class="dbuttn magnet" href="https://torrentbox.site/save/2970fa51e8af52b7e2d1d5fa61a6005777d768ba" target="_blank" rel="nofollow noopener">Magnet Link</a>'''

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                    result)[0]
                div = 1 if size.endswith(('GB', 'GiB', 'Gb')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(
                    ',', '.'))) / div
                size = '%.2f GB' % size
                info.append(size)
            except:
                pass

            fileType = source_utils.getFileType(name)
            info.append(fileType)
            info = ' | '.join(info) if fileType else info[0]

            # Debrid_info = info.append(fileType)
            # Debrid_info = ' | '.join(info) if fileType else info[0]
            # Torrent_info = ' | '.join(info)

        except:
            source_utils.scraper_error('MKVHUB')
            return

        for url in urls[0]:
            try:
                r = client.request(url)
                if r is None:
                    continue

                if 'linkomark' in url:
                    # info = Debrid_info
                    p_link = client.parseDOM(r,
                                             'link',
                                             attrs={'rel': 'canonical'},
                                             ret='href')[0]

                    #<input type="hidden" name="_csrf_token_" value=""/>
                    input_name = client.parseDOM(r, 'input', ret='name')[0]
                    input_value = client.parseDOM(r, 'input', ret='value')[0]

                    post = {input_name: input_value}
                    p_data = client.request(p_link, post=post)
                    links = client.parseDOM(p_data,
                                            'a',
                                            ret='href',
                                            attrs={'target': '_blank'})

                    for i in links:
                        valid, host = source_utils.is_host_valid(i, hostDict)
                        if not valid:
                            valid, host = source_utils.is_host_valid(
                                i, hostprDict)
                            if not valid:
                                continue
                            else:
                                rd = True
                        else:
                            rd = False
                        if i in str(self._sources):
                            continue

                        if 'rapidgator' in i:
                            rd = True

                        if rd:
                            self._sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                        else:
                            self._sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'info': info,
                                'direct': False,
                                'debridonly': False
                            })

                elif 'torrent' in url:
                    # info = Torrent_info
                    data = client.parseDOM(r, 'a', ret='href')

                    url = [i for i in data if 'magnet:' in i][0]
                    url = url.split('&tr')[0]

                    self._sources.append({
                        'source': 'torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

            except:
                source_utils.scraper_error('MKVHUB')
                pass