Example #1
0
 def process_movie_results(self, html, imdb_id):
     results = []
     match = re.search('imdb\.com/title/(tt\d+?)"', html)
     if match:
         if match.group(1) != imdb_id: return results
     gc.disable()
     for link in re.finditer('<tr class="linkTr">(.+?)</tr>', html,
                             re.DOTALL):
         html = link.group(1)
         url = re.search(
             'linkHiddenUrl" data-width="\d+" data-height="\d+">(.+?)</td>',
             html).group(1)
         obj = urlparse(url)
         host_name = re.sub('^(www\.|embed\.)*', '', obj.hostname)
         if self.filter_host(host_name):
             url = "%s://%s" % (self.service, url)
             quality = re.search('linkQuality([A-Z]+?)"', html).group(1)
             result = ScraperResult(self.debrid_hosts, self.service,
                                    host_name, url)
             if quality in QUALITY_MAP.keys():
                 result.quality = QUALITY_MAP[quality]
             else:
                 result.quality = QUALITY.UNKNOWN
             results += [result]
     gc.enable()
     return results
Example #2
0
	def process_results(self, soup):
		results = []
		rows=soup.findAll('tr')
		for i in range(0,len(rows)):
			try:
				row=rows[i]
				link=row.find('td').find('a')['href']
				import HTMLParser
				h = HTMLParser.HTMLParser()
				dm=row.find('td').getText()
				domain=h.unescape(dm).strip()
				try:
					quality=row.findAll('td')[3].find('img')['src']
				except:
					quality=''
				host_name = domain
				if self.filter_host(host_name):
					url = "%s://%s" % (self.service, link)
					result = ScraperResult(self.service, host_name, url)
					if 'qualityDVD' in quality:
						result.quality = QUALITY.SD480
					elif 'qualityhd' in quality:
						result.quality = QUALITY.SD720
					else:
						result.quality = QUALITY.UNKNOWN

					results.append(result)	
			except:
				pass
		return results
Example #3
0
    def process_results(self, uri, pattern):
        results = []
        soup = self.request(uri, return_soup=True, cache=3600)
        links = soup.findAll('tr', {
            "class": "forum_header_border",
            "name": "hover"
        })
        for link in links:
            try:
                a = link.find('a', {"class": "download_1"})
                href = a['href']
                title = a['title']
                if re.search(pattern, href):
                    url = "%s://%s" % (self.service, href)
                    result = ScraperResult({}, self.service, self.name, url,
                                           title)
                    result.quality = self.test_quality(title)
                    match = re.search('>(\d+\.\d+)\s+(M|G)B<', str(link))
                    if match:
                        size, prefix = match.groups()
                        if prefix == 'M':
                            result.size = float(size) * 1024 * 1024
                        else:
                            result.size = float(size) * 1024 * 1024 * 1024

                    results += [result]

            except:
                pass
        return results
Example #4
0
File: yify.py Project: bialagary/mw
	def get_resolved_url(self, raw_url):
		resolved_url = ''
		uri = '/player/pk/pk/plugins/player_p2.php?url=' + raw_url
		json = self.request(uri, return_json=True)
		videos = []
		for link in json:
			if re.match('^video', link['type']):
				videos.append(link)
		if len(videos) == 1:
			resolved_url = videos[0]['url']
			return resolved_url
		else:
			self.search_results = []
			for v in videos:
				url = v['url']
				obj = urlparse(url)
				host_name =  re.sub('^www(.+?)\.', '', obj.hostname)
				result = ScraperResult(self.service, host_name, url)
				if v['width'] > 1280:
					result.quality = QUALITY.HD1080
				elif v['width'] == 1280:
					result.quality = QUALITY.HD720
				elif v['width'] == 640:
					result.quality = QUALITY.SD480
				else:
					result.quality = QUALITY.UNKNOWN	 
				self.search_results.append(result)
			resolved_url =  self.select_stream()			
		return resolved_url
Example #5
0
    def process_results(self, xml, regex=""):
        results = []
        hashes = [item.find('info_hash').text for item in xml.iter('item')]
        hashes = pm.check(hashes)
        for item in xml.iter('item'):

            hash = item.find('info_hash').text
            if self.return_cached_torrents and hashes['hashes'][hash][
                    'status'] != 'finished':
                continue
            if self.return_cached_torrents is False and hashes['hashes'][hash][
                    'status'] == 'finished':
                continue
            title = item.find('title').text
            test = True
            if regex:
                test = regex.search(title)
            if test:
                url = "http://itorrents.org/torrent/%s.torrent?%s" % (
                    hash.upper(), urllib.urlencode({"title": title}))
                url = "%s://%s" % (self.service, url)
                result = ScraperResult({}, self.service, self.name, url, title)
                result.quality = self.test_quality(title)
                result.size = item.find('size').text
                results += [result]

        return results
Example #6
0
 def process_results(self, data, re_test=None):
     results = []
     if data['status'] != 'success':
         plugin.log(data)
         return []
     gc.disable()
     for result in data['result']:
         title = self.normalize(result['title'])
         if re_test:
             if re_test.search(title) is None: continue
         sourcetitle = self.normalize(result['sourcetitle'])
         hoster = result['hosterurls']
         extension = result['extension']
         size = result['sizeinternal']
         extension = result['extension']
         host_name = result['hostername']
         hosts = result['hosterurls']
         if extension == 'rar': continue
         for host in hosts:
             if self.filter_host(host_name):
                 url = "%s://%s" % (self.service, host['url'])
                 quality = self.test_quality(title + sourcetitle +
                                             self.normalize(url))
                 result = ScraperResult(self.debrid_hosts, self.service,
                                        host_name, url, title)
                 result.quality = quality
                 result.size = int(size)
                 result.extension = extension
                 results += [result]
     gc.enable()
     return results
Example #7
0
    def process_results(self, soup):
        results = []
        rows = soup.findAll('tr')
        for i in range(0, len(rows)):
            try:
                row = rows[i]
                link = row.find('td').find('a')['href']
                import HTMLParser
                h = HTMLParser.HTMLParser()
                dm = row.find('td').getText()
                domain = h.unescape(dm).strip()
                try:
                    quality = row.findAll('td')[3].find('img')['src']
                except:
                    quality = ''
                host_name = domain
                if self.filter_host(host_name):
                    url = "%s://%s" % (self.service, link)
                    result = ScraperResult(self.service, host_name, url)
                    if 'qualityDVD' in quality:
                        result.quality = QUALITY.SD480
                    elif 'qualityhd' in quality:
                        result.quality = QUALITY.SD720
                    else:
                        result.quality = QUALITY.UNKNOWN

                    results.append(result)
            except:
                pass
        return results
Example #8
0
	def process_tv_results(self, js):
		results = []
		url = "%s://%s" % (self.service, js['url'])
		result = ScraperResult(self.service, 'movietv', url)
		result.quality = QUALITY.HD720
		results.append(result)
		return results
    def process_results(self, xml):
        results = []
        regex_hash = re.compile("<hash>([^<]+)<\/hash>")
        regex_title = re.compile("<br\/>([^<]+) <br\/>")

        hashes = []
        for item in xml.iter('item'):
            desc = item.find("description").text
            hashes.append(str(regex_hash.search(desc).group(1)).upper())
        hashes = pm.check(hashes)
        for item in xml.iter('item'):
            desc = item.find("description").text
            hash = str(regex_hash.search(desc).group(1)).upper()
            if self.return_cached_torrents and hashes['hashes'][hash][
                    'status'] != 'finished':
                continue
            if self.return_cached_torrents is False and hashes['hashes'][hash][
                    'status'] == 'finished':
                continue
            atributes = item.find("enclosure").attrib
            url = "%s://%s" % (self.service, hash)
            title = hash = regex_title.search(desc).group(1)
            result = ScraperResult({}, self.service, self.name, url, title)
            result.size = atributes['length']
            result.quality = self.test_quality(title)
            results += [result]
        return results
Example #10
0
 def process_movie_results(self, html, imdb_id):
     results = []
     match = re.search('imdb\.com/title/(tt\d+?)"', html)
     if match:
         if match.group(1) != imdb_id: return results
     for link in re.finditer('<tr class="linkTr">(.+?)</tr>', html,
                             re.DOTALL):
         html = link.group(1)
         url = re.search(
             'linkHiddenUrl" data-width="\d+" data-height="\d+">(.+?)</td>',
             html).group(1)
         obj = urlparse(url)
         host_name = re.sub('^(www\.|embed\.)*', '', obj.hostname)
         filter = False
         if self.domains is not None:
             filter = True
         if filter == False or (host_name in self.domains
                                and filter == True):
             url = "%s://%s" % (self.service, url)
             quality = re.search('linkQuality([A-Z]+?)"', html).group(1)
             result = ScraperResult(self.service, host_name, url)
             if quality in QUALITY_MAP.keys():
                 result.quality = QUALITY_MAP[quality]
             else:
                 result.quality = QUALITY.UNKNOWN
             results.append(result)
     return results
Example #11
0
    def process_tvshow_results(self, html, imdb_id, season, episode):
        results = []
        match = re.search('imdb\.com/title/(tt\d+?)"', html)
        if match:
            if match.group(1) != imdb_id: return results
        pattern = 'Season %s Serie %s(.*?)</table>' % (season, episode)
        match = re.search(pattern, html, re.DOTALL)
        html = match.group(1)

        for link in re.finditer('<tr class="linkTr">(.+?)</tr>', html,
                                re.DOTALL):
            html = link.group(1)
            url = re.search(
                'linkHiddenUrl" data-width="\d+" data-height="\d+">(.+?)</td>',
                html).group(1)
            obj = urlparse(url)
            host_name = re.sub('^(www\.|embed\.)*', '', obj.hostname)
            if self.filter_host(host_name):
                url = "%s://%s" % (self.service, url)
                quality = re.search('linkQuality([A-Z]+?)"', html).group(1)
                result = ScraperResult(self.service, host_name, url)
                if quality in QUALITY_MAP.keys():
                    result.quality = QUALITY_MAP[quality]
                else:
                    result.quality = QUALITY.UNKNOWN
                results.append(result)
        return results
Example #12
0
	def process_tvshow_results(self, html, imdb_id, season, episode):
		results = []
		match = re.search('imdb\.com/title/(tt\d+?)"', html)
		if match:
			if match.group(1) != imdb_id: return results
		pattern = 'Season %s Serie %s(.*?)</table>' % (season, episode)
		match = re.search(pattern, html, re.DOTALL)
		if not match: return []
		html = match.group(1)
		gc.disable()
		for link in re.finditer('<tr class="linkTr">(.+?)</tr>', html, re.DOTALL):
			html = link.group(1)
			url = re.search('linkHiddenUrl" data-width="\d+" data-height="\d+">(.+?)</td>', html).group(1)
			obj = urlparse(url)
			host_name =  re.sub('^(www\.|embed\.)*', '', obj.hostname)
			if self.filter_host(host_name):
				url = "%s://%s" % (self.service, url)
				quality = re.search('linkQuality([A-Z]+?)"', html).group(1)
				result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
				if quality in QUALITY_MAP.keys():
					result.quality = QUALITY_MAP[quality]
				else:
					result.quality = QUALITY.UNKNOWN
				results += [result]
		gc.enable()
		return results
Example #13
0
 def process_results(self, response):
     results = []
     hashes = []
     hash_regex = re.compile('btih:([^&]+)&')
     for r in response['torrent_results']:
         hash = hash_regex.search(r['download'])
         if hash:
             hashes.append(hash.group(1))
     hashes = pm.check(hashes)
     for r in response['torrent_results']:
         hash = hash_regex.search(r['download'])
         if not hash: continue
         hash = hash.group(1)
         if self.return_cached_torrents and hashes['hashes'][hash][
                 'status'] != 'finished':
             continue
         if self.return_cached_torrents is False and hashes['hashes'][hash][
                 'status'] == 'finished':
             continue
         url = "%s://%s" % (self.service, r['download'])
         result = ScraperResult({}, self.service, self.name, url,
                                r['title'])
         result.quality = self.test_quality(r['title'])
         result.size = r['size']
         results += [result]
     return results
Example #14
0
	def process_results(self, path):
		results = []
		url = "%s://%s" % (self.service, path)
		result = ScraperResult(self.debrid_hosts, self.service, self.name, url)
		result.quality = QUALITY.LOCAL
		results += [result]
		return results
Example #15
0
	def process_movie_results(self, html, year):
		results = []
		pattern = 'href="[^"]+" data-id="([^"]+)"\s*(data-subtitle="[^"]+")*'
		match = re.search(pattern, html)
		if match:
			id = match.group(1)
			now = time.localtime()
			uri = AJAX_URL % (id, now.tm_hour + now.tm_min)
			data = self.request(uri, headers={'X-Requested-With': 'XMLHttpRequest'}, return_json=True)
			if 'videoUrlHash' in data and 'grabber' in data:
					query = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': data['videoUrlHash'], '_': int(time.time())}
					query['link'] = query['link'].replace('\/', '/')
					grab_url = data['grabber'].replace('\/', '/')
					grab_url += '?' + urllib.urlencode(query)
					data = self.request(grab_url, headers={'X-Requested-With': 'XMLHttpRequest'}, append_base=False, return_json=True)
					for f in data:
						url = f['file']
						if 'label' in f:
							height = int(f['label'].replace('p', ''))
							quality = self.test_height_quality(height)
						else:
							quality = self.test_gv_quality(url, QUALITY.LOW)
						url = self.get_embeded_url(url)	
						result = ScraperResult(self.debrid_hosts, self.service, 'gvideo', url)
						result.quality = quality
						results += [result]
		return results
Example #16
0
	def process_results(self, data, re_test=None):
		results = []
		if data['status'] != 'success':
			ADDON.log(data)
			return []
		gc.disable()
		for result in data['result']:
			title = self.normalize(result['title'])
			if re_test:
				if re_test.search(title) is None: continue
			sourcetitle = self.normalize(result['sourcetitle'])
			hoster = result['hosterurls']
			extension = result['extension']
			size = result['sizeinternal']
			extension = result['extension']
			host_name = result['hostername']
			hosts = result['hosterurls']
			if extension == 'rar': continue
			for host in hosts:
				if self.filter_host(host_name):
					url = "%s://%s" % (self.service, host['url'])
					quality = self.test_quality(title+sourcetitle+self.normalize(url))
					result = ScraperResult(self.debrid_hosts, self.service, host_name, url, title)
					result.quality = quality
					result.size = int(size)
					result.extension = extension
					results += [result]
		gc.enable()
		return results
Example #17
0
	def process_results(self, soup):
		vfs = VFSClass()
		results = []

		blocks = soup.findAll('div', {"class": "postContent"})
		for block in blocks:
			links = block.findAll('p', {"style": "text-align: center;"})
			for link in links:
				files = link.findAll('a')
				size = re.search('\s(\d+\.??\d+)(MB|GB)', str(link))
				if size:
					if size.group(2) == 'MB':
						size = int(size.group(1)) * 1000 * 1000
					else:
						size = float(size.group(1)) * 1000 * 1000 * 1000

				for file in files:
					if file.text.lower() not in self.table: continue
					url = file['href']
					quality = self.test_quality(url, default=QUALITY.HIGH)
					host_name = self.table[file.text.lower()]
					path = vfs.path_parts(url)
					url = "%s://%s" % (self.service, url)
					try: 
						filename =  path['path'].split('/')[-1]
					except:
						filename = self.service
					result = ScraperResult(self.debrid_hosts, self.service, host_name, url, filename)
					result.quality = quality
					if size: result.size = size
					if path['extension'] != 'html':
						result.extension = path['extension']
					results += [result]
		return results
Example #18
0
	def process_tv_results(self, js):
		results = []
		#url = "%s://%s" % (self.service, js['url'])
		url = self.get_embeded_url(js['url'], user_agent=self.get_user_agent(), cookies=True)
		result = ScraperResult(self.debrid_hosts, self.service, 'movietv', url)
		result.quality = QUALITY.HD720
		results.append(result)
		return results
Example #19
0
 def process_results(self, response):
     results = []
     url = "%s://%s" % (self.service, response['url'])
     result = ScraperResult(self.debrid_hosts, self.service,
                            'transmogrified', url)
     result.quality = QUALITY.LOCAL
     result.size = response['size']
     results += [result]
     return results
Example #20
0
 def process_results(self, html):
     results = []
     links = re.finditer("pic=([^&]+)", html)
     for link in links:
         url = "%s://%s" % (self.service, link.group(1))
         result = ScraperResult(self.service, self.service, url)
         result.quality = QUALITY.HD720
         results.append(result)
     return results
Example #21
0
File: yify.py Project: bialagary/mw
	def process_results(self, html):
		results = []
		links = re.finditer("pic=([^&]+)", html)
		for link in links:
			url = "%s://%s" % (self.service, link.group(1))
			result = ScraperResult(self.service, self.service, url)
			result.quality = QUALITY.HD720
			results.append(result)
		return results
Example #22
0
	def process_results(self, path):
		results = []
		url = "%s://%s" % (self.service, path)
		result = ScraperResult(self.debrid_hosts, self.service, 'transmogrified', url)
		result.quality = QUALITY.LOCAL
		result.size = vfs.get_size(path)
		result.extension = self.extension
		results += [result]
		return results
Example #23
0
 def process_results(self, path):
     results = []
     url = "%s://%s" % (self.service, path)
     result = ScraperResult(self.service, 'transmogrified', url)
     result.quality = QUALITY.LOCAL
     result.size = vfs.get_size(path)
     result.extension = self.extension
     results.append(result)
     return results
Example #24
0
 def process_results(self, links):
     results = []
     for link in links:
         url = "%s://%s" % (self.service, link['torrentLink'])
         result = ScraperResult({}, self.service, self.name, url,
                                link['title'])
         result.size = link['size']
         result.quality = self.test_quality(link['title'])
         results += [result]
     return results
Example #25
0
 def process_tv_results(self, js):
     results = []
     #url = "%s://%s" % (self.service, js['url'])
     url = self.get_embeded_url(js['url'],
                                user_agent=self.get_user_agent(),
                                cookies=True)
     result = ScraperResult(self.debrid_hosts, self.service, 'movietv', url)
     result.quality = QUALITY.HD720
     results.append(result)
     return results
Example #26
0
	def process_results(self, html):
		results = []
		for match in re.finditer('{\s*file\s*:\s*"([^"]+)', html):
			stream_url = match.group(1)
			if 'dizlab' not in stream_url.lower():
				url = "%s://%s" % (self.service, stream_url)
				result = ScraperResult(self.service, 'gvideo', url)
				result.quality = self.test_gv_quality(stream_url)
				results.append(result)
		return results
Example #27
0
	def process_results(self, path):
		results = []
		url = "%s://%s" % (self.service, path)
		result = ScraperResult(self.service, 'VideoLibrary', url)
		result.quality = QUALITY.LOCAL
		result.size = vfs.get_size(path)
		result.text = path
		parts = vfs.path_parts(path)
		result.extension = parts['extension']
		results.append(result)
		return results
Example #28
0
	def process_results(self, html):
		results = []
		for match in re.finditer('<a[^>]+href="([^"]+)[^>]+>(Version \d+)<', html):
			url, version = match.groups()
			host_name = urlsplit(url).hostname.replace('embed.', '')
			if self.filter_host(host_name) and host_name != 'putlocker.is':
				url = "%s://%s" % (self.service, url)
				result = ScraperResult(self.service, host_name, url)
				result.quality = QUALITY.HIGH
				results.append(result)
		return results
Example #29
0
	def process_results(self, html):
		results = []
		for match in re.finditer('<a[^>]+href="([^"]+)[^>]+>(Version \d+)<', html):
			url, version = match.groups()
			host_name = urlsplit(url).hostname.replace('embed.', '')
			if self.filter_host(host_name) and host_name != 'putlocker.is':
				url = "%s://%s" % (self.service, url)
				result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
				result.quality = QUALITY.HIGH
				results += [result]
		return results
Example #30
0
 def process_results(self, html):
     results = []
     for match in re.finditer('{\s*file\s*:\s*"([^"]+)', html):
         stream_url = match.group(1)
         if 'dizlab' not in stream_url.lower():
             url = "%s://%s" % (self.service, stream_url)
             result = ScraperResult(self.debrid_hosts, self.service,
                                    'gvideo', url)
             result.quality = self.test_gv_quality(stream_url)
             results += [result]
     return results
Example #31
0
 def process_results(self, soup):
     results = []
     links = soup.findAll('a', {"rel": "nofollow", "target": "_blank"})
     for link in links:
         host_name = link.string.lower()
         if self.filter_host(host_name):
             url = "%s://%s" % (self.service, link['href'])
             result = ScraperResult(self.service, host_name, url)
             result.quality = QUALITY.UNKNOWN
             results.append(result)
     return results
Example #32
0
	def process_results(self, soup):
		results = []
		links = soup.findAll('a', {"rel": "nofollow", "target": "_blank"})
		for link in links:
			host_name = link.string.lower()
			if host_name in self.domains:
				url = "%s://%s" % (self.service, link['href'])
				result = ScraperResult(self.service, host_name, url)
				result.quality = QUALITY.UNKNOWN
				results.append(result)	
		return results
Example #33
0
	def process_results(self, html):
		results = []
		pattern = 'href="http://projectfreetv.so/([^"]+)" target="_blank" rel="nofollow"><img src="[^"]+" width="16" height="16"> &nbsp;\n*\s*(.+?)\n*\s*</a>'
		for match in re.finditer(pattern, html, re.DOTALL):
			uri, host_name = match.groups()
			if self.filter_host(host_name):
				url = "%s://%s" % (self.service, uri)
				result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
				result.quality = QUALITY.UNKNOWN
				results += [result]
		return results
Example #34
0
	def process_movie_results(self, html):
		results = []
		for match in re.finditer('var\s+(videolink[^\s]*)\s*=\s*"([^"]+)', html):
			var_name, url = match.groups()
			url = self.get_embeded_url(url, user_agent=self.get_user_agent(), cookies=True)
			result = ScraperResult(self.debrid_hosts, self.service, 'movietv', url)
			if 'hd' in var_name:
				result.quality = QUALITY.HD1080
			else:
				result.quality = QUALITY.HD720
			results.append(result)
		return results
Example #35
0
	def process_results(self, html):
		results = []
		pattern = 'iframe src="([^"]+)" width="600"'
		for link in re.finditer(pattern, html, re.DOTALL):
			href = link.group(1)
			host_name = self.get_domain_from_url(href)
			if self.filter_host(host_name):
				url = "%s://%s" % (self.service, href)
				result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
				result.quality=QUALITY.HIGH
				results += [result]
		return self.get_response(results)
Example #36
0
	def process_movie_results(self, html):
		results = []
		for match in re.finditer('var\s+(videolink[^\s]*)\s*=\s*"([^"]+)', html):
			var_name, url = match.groups()
			url = "%s://%s" % (self.service, url)
			result = ScraperResult(self.service, 'movietv', url)
			if 'hd' in var_name:
				result.quality = QUALITY.HD1080
			else:
				result.quality = QUALITY.HD720
			results.append(result)
		return results
Example #37
0
 def process_results(self, html):
     results = []
     pattern = 'href="http://projectfreetv.so/([^"]+)" target="_blank" rel="nofollow"><img src="[^"]+" width="16" height="16"> &nbsp;\n*\s*(.+?)\n*\s*</a>'
     for match in re.finditer(pattern, html, re.DOTALL):
         uri, host_name = match.groups()
         if self.filter_host(host_name):
             url = "%s://%s" % (self.service, uri)
             result = ScraperResult(self.debrid_hosts, self.service,
                                    host_name, url)
             result.quality = QUALITY.UNKNOWN
             results += [result]
     return results
Example #38
0
	def process_results(self, html, season, episode):
		results = []
		pattern = '<li\sid=\"season%s-%s[^"]+"\sdata-click="([^"]+)"' % (season, episode)
		for match in re.finditer(pattern, html, re.DOTALL):
			stream_url = self.get_redirect(match.group(1))
			plugin.log(stream_url)
			if stream_url:
				url = "%s://%s" % (self.service, stream_url)
				result = ScraperResult(self.debrid_hosts, self.service, 'gvideo', url)
				result.quality = self.test_gv_quality(stream_url)
				results += [result]
		return results
Example #39
0
	def process_movie_results(self, html, year):
		results = []
		pattern = '<div class="video"> <iframe src="([^"]+)" scrolling="no" frameborder="0" width="100%" height="450px"'
		match = re.search(pattern, html, re.DOTALL)
		if match:
			href = match.group(1)
			host_name = self.get_domain_from_url(href)
			url = self.get_embeded_url(href)
			result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
			result.quality = QUALITY.HD720
			results += [result]
		return results
Example #40
0
	def process_results(self, html):
		results = []
		pattern = "go_to\(\d+,'([^']+)"
		for match in re.finditer(pattern, html, re.DOTALL):
			href = match.group(1)
			host_name = self.get_domain_from_url(href)
			if self.filter_host(host_name):
				url = "%s://%s" % (self.service, href)
				result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
				result.quality = QUALITY.UNKNOWN
				results += [result]

		return results
Example #41
0
 def process_results(self, html):
     results = []
     pattern = 'iframe src="([^"]+)" width="600"'
     for link in re.finditer(pattern, html, re.DOTALL):
         href = link.group(1)
         host_name = self.get_domain_from_url(href)
         if self.filter_host(host_name):
             url = "%s://%s" % (self.service, href)
             result = ScraperResult(self.debrid_hosts, self.service,
                                    host_name, url)
             result.quality = QUALITY.HIGH
             results += [result]
     return self.get_response(results)
Example #42
0
 def process_results(self, html):
     results = []
     pattern = 'aff_id=(\d+)"\s[^\?]+\?domain=([^"]+)'
     for match in re.finditer(pattern, html, re.DOTALL):
         id, host_name = match.groups()
         if self.filter_host(host_name):
             url = "%s://http://projectfreetv.im/watch/?aff_id=%s" % (
                 self.service, id)
             result = ScraperResult(self.debrid_hosts, self.service,
                                    host_name, url)
             result.quality = QUALITY.UNKNOWN
             results += [result]
     return results
Example #43
0
 def process_results(self, soup):
     results = []
     tag = str(soup.find('div', {'id': 'linkname'}))
     reg = re.compile("go_to\(\d+,'(.+?)'\)")
     links = list(re.findall(reg, tag))
     for link in links:
         host_name = self.get_hostname(link)
         if self.filter_host(host_name):
             url = "%s://%s" % (self.service, link)
             result = ScraperResult(self.service, host_name, url)
             result.quality = QUALITY.UNKNOWN
             results.append(result)
     return results
Example #44
0
 def process_results(self, movie):
     results = []
     hashes = [r['hash'] for r in movie['torrents']]
     hashes = PremiumizeAPI().check(hashes)
     for link in movie['torrents']:
         if link['hash'] in hashes['hashes']:
             url = "%s://%s" % (self.service, link['hash'])
             result = ScraperResult({}, self.service, self.name, url,
                                    self.name)
             result.size = link['size_bytes']
             result.quality = self.test_quality(str(link['quality']))
             results += [result]
     return results
Example #45
0
	def process_results(self, soup):
		results = []
		tag=str(soup.find('div',{'id':'linkname'}))
		reg=re.compile("go_to\(\d+,'(.+?)'\)")
		links=list(re.findall(reg,tag))
		for link in links:
			host_name = self.get_hostname(link)
			if self.filter_host(host_name):
				url = "%s://%s" % (self.service, link)
				result = ScraperResult(self.service, host_name, url)
				result.quality = QUALITY.UNKNOWN
				results.append(result)	
		return results
Example #46
0
    def process_results(self, uri, pattern):
        results = []
        soup = self.request(uri, return_soup=True, cache=3600)
        links = soup.findAll('tr', {
            "class": "forum_header_border",
            "name": "hover"
        })
        hashes = []
        hash_regex = re.compile('btih:([^&]+)&')
        for link in links:
            a = link.find('a', {"class": "magnet"})
            if a is not None:
                href = a['href']
                if re.search(pattern, href):
                    hash = hash_regex.search(href)
                    if hash:
                        hashes.append(hash.group(1))
        hashes = pm.check(hashes)
        for link in links:
            try:
                a = link.find('a', {"class": "magnet"})
                magnet = a['href']
                title = a['title'].replace(' Magnet Link', '')
                if re.search(pattern, magnet):
                    hash = hash_regex.search(magnet)
                    if not hash: continue
                    hash = hash.group(1)
                    if hash in hashes['hashes']:
                        if self.return_cached_torrents and hashes['hashes'][
                                hash]['status'] != 'finished':
                            continue
                        if self.return_cached_torrents is False and hashes[
                                'hashes'][hash]['status'] == 'finished':
                            continue
                        url = "%s://%s" % (self.service, magnet)
                        result = ScraperResult({}, self.service, self.name,
                                               url, title)
                        result.quality = self.test_quality(title)
                        match = re.search('>(\d+\.\d+)\s+(M|G)B<', str(link))
                        if match:
                            size, prefix = match.groups()
                            if prefix == 'M':
                                result.size = float(size) * 1024 * 1024
                            else:
                                result.size = float(size) * 1024 * 1024 * 1024

                        results += [result]

            except:
                pass
        return results
Example #47
0
 def process_results(self, html, season, episode):
     results = []
     pattern = 'id="season%s-%s-watch-ep" class="watch-ep-btn" data-click=" ([^"]+)"' % (
         season, episode)
     match = re.search(pattern, html, re.DOTALL)
     if match:
         href = match.group(1)
         host_name = self.get_domain_from_url(href)
         url = self.get_embeded_url(href)
         result = ScraperResult(self.debrid_hosts, self.service, host_name,
                                url)
         result.quality = QUALITY.HD720
         results += [result]
     return results
Example #48
0
    def process_results(self, soup):
        results = []
        links = soup.findAll('tr')

        hashes = []
        hash_regex = re.compile('btih:([^&]+)&')
        for link in links:
            magnet = link.find('a',
                               {"title": "Download this torrent using magnet"})
            if not magnet: continue
            hash = hash_regex.search(magnet['href'])
            if hash:
                hashes.append(hash.group(1))
        hashes = pm.check(hashes)
        regex = re.compile('\/torrent\/\d+\/')
        size_regex = re.compile('Size\s(\d+\.\d+)&nbsp;(M|G)')
        for link in links:
            magnet = link.find('a',
                               {"title": "Download this torrent using magnet"})
            if not magnet: continue
            hash = hash_regex.search(magnet['href'])
            if not hash: continue
            hash = hash.group(1)
            if hash in hashes['hashes']:
                if self.return_cached_torrents and hashes['hashes'][hash][
                        'status'] != 'finished':
                    continue
                if self.return_cached_torrents is False and hashes['hashes'][
                        hash]['status'] == 'finished':
                    continue
                title = link.find('a', {"class": "detLink"})
                if title:
                    title = title['href']
                    title = regex.sub('', title)
                url = "%s://%s" % (self.service, magnet['href'])
                result = ScraperResult({}, self.service, self.name, url, title)
                result.quality = self.test_quality(title)
                match = link.find('font', {"class": "detDesc"})
                if match:
                    match = size_regex.search(str(match))
                    if match:
                        size, prefix = match.groups()
                        if prefix == 'M':
                            result.size = float(size) * 1024 * 1024
                        else:
                            result.size = float(size) * 1024 * 1024 * 1024
                results += [result]

        return results
	def _get_sources(self, vid):
		uri = '/membersonly/components/com_iceplayer/video.php?h=374&w=631&vid=%s&img=' % vid
		results = []
		html = self.request(uri)
		soup = BeautifulSoup(html)
		
		match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)
		secret = ''.join(match.groups(''))

		match = re.search('"&t=([^"]+)', html)
		t = match.group(1)

		match = re.search('(?:\s+|,)s\s*=(\d+)', html)
		s_start = int(match.group(1))

		match = re.search('(?:\s+|,)m\s*=(\d+)', html)
		m_start = int(match.group(1))

		match = re.search('<iframe[^>]*src="([^"]+)', html)
		ad_url = urllib.quote(match.group(1))
		
			
		for block in soup.findAll('div', {"class": "ripdiv"}):
			isHD = block.find('b').string == 'HD 720p'
			if isHD: quality = QUALITY.HD720
			else: quality = QUALITY.SD480
			
			mirrors = block.findAll("p")
			for mirror in mirrors:
				links = mirror.findAll("a")
				for link in links:
					mirror_id = link['onclick'][3:len(link['onclick'])-1]
					host_name, title = self.get_provider(link)
					if host_name:
						'''attribs = [
							self.name, 
							self.set_color(QUALITY.r_map[quality], self.QUALITY_COLOR), 
							self.set_color(host_name, self.HOST_COLOR)
						]'''
						s = s_start + random.randint(1, 100)
						m = m_start + (s - s_start) + random.randint(1, 100)
						url = '%s:///membersonly/components/com_iceplayer/video.phpAjaxResp.php?id=%s&s=%s&iqs=&url=&m=%s&cap= &sec=%s&t=%s' % (self.service, mirror_id, s, m, secret, t)
						#display = "[%s]: %s" % (' | '.join(attribs), title)
						#record = {"title": display, "url": url, "host": host_name, "service": self.service, "quality": quality}
						result = ScraperResult(self.service, host_name, url, title)
						result.quality = quality
						results.append(result)
		return results
Example #50
0
	def process_results(self, html):
		results = []
		pattern = r'id="tablemoviesindex2".*?href="([^"]+).*?&nbsp;([^<]+)(.*)'
		for match in re.finditer(pattern, html):
			url, host_name, extra = match.groups()
			if not url.startswith('/'): url = '/' + url
			r = re.search('/smileys/(\d+)\.gif', extra)
			if r:
				smiley = r.group(1)
			else:
				smiley = None
			url = "%s://%s" % (self.service, url)
			result = ScraperResult(self.service, host_name.lower(), url)
			result.quality = QUALITY_MAP[smiley]
			results.append(result)
		return results
Example #51
0
	def process_results(self, html):		
		results = []

		pattern = "source src='([^']+)'\s+type='video/([^']+)"
		for match in re.finditer(pattern, html):
			url = match.group(1)
			url = url.replace('&amp;', '&')
			temp = urlparse(url)
			title = temp.path.split('/')[-1]
			url = self.get_embeded_url(url, user_agent=self.get_user_agent(), cookies=True)
			host_name = 'ororo.tv'
			extension = match.group(2)
			result = ScraperResult(self.debrid_hosts, self.service, host_name, url, title)
			result.quality = QUALITY.HD720
			result.extension = extension
			results += [result]
		return results
Example #52
0
	def process_results(self, soup):
		results = []
		rows=soup.findAll('div',{'class':'site'})
		for i in range(0,len(rows)):
			try:
				row=rows[i]
				domain=row.find('a')['data-hostname'].lower()
				link=row.find('a')['data-actuallink']
				host_name = domain
				if self.filter_host(host_name):
					url = "%s://%s" % (self.service, link)
					result = ScraperResult(self.service, host_name, url)
					result.quality = QUALITY.UNKNOWN
					results.append(result)	
			except:
				pass
		return results
Example #53
0
	def _get_sources(self, vid):
		uri = self.referer % vid
		results = []
		html = self.request(uri)
		soup = BeautifulSoup(html)
		
		match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)
		secret = ''.join(match.groups(''))

		match = re.search('"&t=([^"]+)', html)
		t = match.group(1)

		match = re.search('(?:\s+|,)s\s*=(\d+)', html)
		s_start = int(match.group(1))

		match = re.search('(?:\s+|,)m\s*=(\d+)', html)
		m_start = int(match.group(1))
		
		match = re.search('<iframe[^>]*src="([^"]+)', html)
		if match:
			ad_url = urllib.quote(match.group(1))
		else:
			ad_url = ''
			
		for block in soup.findAll('div', {"class": "ripdiv"}):
			isHD = 'HD 720p' in block.find('b').string
			if isHD: quality = QUALITY.HD720
			else: quality = QUALITY.SD480
			
			mirrors = block.findAll("p")
			for mirror in mirrors:
				links = mirror.findAll("a")
				for link in links:
					mirror_id = link['onclick'][3:len(link['onclick'])-1]
					host_name, title = self.get_provider(link)
					if host_name:
						if self.filter_host(host_name):
							s = s_start + random.randint(3, 1000)
							m = m_start + random.randint(21, 1000)
							uri = self.ajax_url % (mirror_id, s, m, secret, t, ad_url)
							url = "%s://%s" % (self.service, uri)
							result = ScraperResult(self.service, host_name, url, title)
							result.quality = quality
							results.append(result)
		return results
Example #54
0
	def process_results(self, html):
		results = []
		for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
			stream_url, height = match.groups()
			stream_url = stream_url.replace('\\&', '&').replace('\\/', '/')
			if 'v.asp' in stream_url and 'ok.ru' not in html:
				redirect = self.request(stream_url, get_redirect=True, append_base=False)
				url = "%s://%s" % (self.service, stream_url)
				if 'google' in redirect or '' in redirect:
					host_name = 'gvideo'
					quality = self.test_gv_quality(redirect)
				else:
					host_name = self.service
					quality = self.test_height_quality(height)
				result = ScraperResult(self.service, host_name, url)
				result.quality = self.test_gv_quality(redirect)
				results.append(result)
		return results
Example #55
0
	def process_results(self, html):
		results = []
		pattern = '<tr id="link_(\d+?)"(.+?)</tr>'
		for link in re.finditer(pattern, html, re.DOTALL):
			id, html = link.groups()
			host_name = re.search('<a href="/link/show/%s/">\s+(.+?)\s+</a>' % id, html, re.DOTALL)
			host_name = host_name.group(1)
			if self.filter_host(host_name):
				quality = re.search('<td class="qualityCell js-link-format" style="text-transform: uppercase;">\s+(.+?)\s+</td>', html, re.DOTALL)
				quality = quality.group(1).upper()
				url = "%s:///link/play/%s/" % (self.service, id)
				result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
				if quality in QUALITY_MAP.keys():
					result.quality = QUALITY_MAP[quality]
				else:
					result.quality = QUALITY.UNKNOWN
				results += [result]
		return results
Example #56
0
	def process_results(self, html):		
		results = []

		pattern = "source src='([^']+)'\s+type='video/([^']+)"
		for match in re.finditer(pattern, html):
			url = match.group(1)
			url = url.replace('&amp;', '&')
			temp = urlparse(url)
			title = temp.path.split('/')[-1]
			url += '|User-Agent=%s' % (self.get_user_agent())
			url = "%s://%s" % (self.service, url)
			host_name = 'ororo.tv'
			extension = match.group(2)
			result = ScraperResult(self.service, host_name, url, title)
			result.quality = QUALITY.HD720
			result.extension = extension
			results.append(result)
		return results
	def get_resolved_url(self, id):
		self.search_results = []
		resolved_url = ''
		api_key = self._login()
		params = {"type": "video", "id": id, "api_key": api_key, 't_files': 1}
		results = self.request("/file/get", params)
		if results=='':
			return False
		files = results['files'][0]['t_files']
		for f in files:
			if re.search('^video/', f['ct']):
				title = f['name']
				result = ScraperResult(self.service, 'furk.net', f['url_dl'], title)
				result.size = self.format_size(f['size'])
				result.bitrate = f['bitrate']
				self.search_results.append(result)
		resolved_url =  self.select_stream()
		return resolved_url
Example #58
0
	def process_results(self, soup):
		results = []
		rows=soup.find('div',{'id':'linktable'}).findAll('tr')		
		for i in range(0,len(rows)):
			try:
				row=rows[i]
				infos=row.findAll('td')
				domain=infos[0].getText().lower()
				link=infos[1].find('a')['href']
				host_name = domain
				if self.filter_host(host_name):
					url = "%s://%s" % (self.service, link)
					result = ScraperResult(self.debrid_hosts, self.service, host_name, url)
					result.quality = QUALITY.UNKNOWN
					results.append(result)	
			except:
				pass
		return results