Exemple #1
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (cleantitle.getsearch(titles[0].replace(
             ' ', '%20')))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(
             r,
             'li',
             attrs={'class': 'item everyone-item over_online haveTooltip'})
         for i in r:
             title = client.parseDOM(i, 'a', ret='title')[0]
             url = client.parseDOM(i, 'a', ret='href')[0]
             data = client.request(url)
             y = re.findall('<p><span>Año:</span>(\d{4})', data)[0]
             original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',
                                     data, re.DOTALL)[0]
             original_t, title = cleantitle.get(original_t), cleantitle.get(
                 title)
             if (t in title or t in original_t) and y == year:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
         return
     except:
         return
Exemple #2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         r = client.request(url)
         match = re.compile('<iframe src="(.+?)"').findall(r)
         for url in match:
             r = client.request(url)
             if 'playpanda' in url:
                 match = re.compile("url: '(.+?)',").findall(r)
             else:
                 match = re.compile('file: "(.+?)",').findall(r)
             for url in match:
                 url = url.replace('\\', '')
                 if url in str(sources):
                     continue
                 info = source_tools.get_info(url)
                 quality = source_tools.get_quality(url)
                 sources.append({
                     'source': 'Direct',
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile(
                 'href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>'
             ).findall(r)
             for url in match:
                 r = client.request(url)
                 match = re.compile(
                     '<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button  wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>'
                 ).findall(r)
                 for http, host, url in match:
                     url = '%s://%s/%s' % (http, host, url)
                     info = source_utils.check_url(url)
                     quality = source_utils.check_url(url)
                     valid, host = source_utils.is_host_valid(
                         host, hostDict)
                     if valid:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
         except:
             return
     except Exception:
         return
     return sources
Exemple #4
0
    def resolve(self, url):
        try:
            # cookies = client.request(url, output='cookie')
            # verifyGet = client.request(self.verify, cookie = cookies)
            # cookies = cookies + ";tmvh=" + self.crazy_cookie_hash(verifyGet)
            cookies = cache.cache_get('szukajka_cookie')
            replace = re.findall("""tmvh=(.*)""", str(cookies['value']))[0]
            cookies = str(cookies['value'])

            verifyGet = client.request(self.verify, cookie=cookies)
            tmvh = self.crazy_cookie_hash(verifyGet)
            cookies = cookies.replace(replace, tmvh)

            test4 = client.request(url, cookie=cookies)
            test5 = client.parseDOM(test4,
                                    'a',
                                    attrs={'class': 'submit'},
                                    ret='href')[0]

            replace = re.findall("""tmvh=(.*)""", cookies)[0]
            verifyGet = client.request(self.verify, cookie=cookies)
            tmvh = self.crazy_cookie_hash(verifyGet)
            cookies = cookies.replace(replace, tmvh)

            test6 = client.request(test5, cookie=cookies)
            test7 = client.parseDOM(test6, 'iframe', ret='src')
            video_url = test7[0].replace(
                "javascript:window.location.replace('", "").replace("')", "")
            return video_url
        except Exception as e:
            return
Exemple #5
0
	def resolve(self, url):
		try:
			cookies = cache.cache_get('alltube_cookie')['value']
			myurl = url.split('?')
			mycookie = client.request(myurl[0], headers={'Cookie': cookies}, output='cookie', error=True)
			tmp = 'ZGVmIGFiYyhpbl9hYmMpOg0KICAgIGRlZiByaGV4KGEpOg0KICAgICAgICBoZXhfY2hyID0gJzAxMjM0NTY3ODlhYmNkZWYnDQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKDQpOg0KICAgICAgICAgICAgcmV0ICs9IGhleF9jaHJbKGEgPj4gKGkgKiA4ICsgNCkpICYgMHgwRl0gKyBoZXhfY2hyWyhhID4+IChpICogOCkpICYgMHgwRl0NCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiBoZXgodGV4dCk6DQogICAgICAgIHJldCA9ICcnDQogICAgICAgIGZvciBpIGluIHJhbmdlKGxlbih0ZXh0KSk6DQogICAgICAgICAgICByZXQgKz0gcmhleCh0ZXh0W2ldKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGFkZDMyKGEsIGIpOg0KICAgICAgICByZXR1cm4gKGEgKyBiKSAmIDB4RkZGRkZGRkYNCiAgICBkZWYgY21uKGEsIGIsIGMsIGQsIGUsIGYpOg0KICAgICAgICBiID0gYWRkMzIoYWRkMzIoYiwgYSksIGFkZDMyKGQsIGYpKTsNCiAgICAgICAgcmV0dXJuIGFkZDMyKChiIDw8IGUpIHwgKGIgPj4gKDMyIC0gZSkpLCBjKQ0KICAgIGRlZiBmZihhLCBiLCBjLCBkLCBlLCBmLCBnKToNCiAgICAgICAgcmV0dXJuIGNtbigoYiAmIGMpIHwgKCh+YikgJiBkKSwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgZ2coYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oKGIgJiBkKSB8IChjICYgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGhoKGEsIGIsIGMsIGQsIGUsIGYsIGcpOg0KICAgICAgICByZXR1cm4gY21uKGIgXiBjIF4gZCwgYSwgYiwgZSwgZiwgZykNCiAgICBkZWYgaWkoYSwgYiwgYywgZCwgZSwgZiwgZyk6DQogICAgICAgIHJldHVybiBjbW4oYyBeIChiIHwgKH5kKSksIGEsIGIsIGUsIGYsIGcpDQogICAgZGVmIGNyeXB0Y3ljbGUodGFiQSwgdGFiQik6DQogICAgICAgIGEgPSB0YWJBWzBdDQogICAgICAgIGIgPSB0YWJBWzFdDQogICAgICAgIGMgPSB0YWJBWzJdDQogICAgICAgIGQgPSB0YWJBWzNdDQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzBdLCA3LCAtNjgwODc2OTM2KTsNCiAgICAgICAgZCA9IGZmKGQsIGEsIGIsIGMsIHRhYkJbMV0sIDEyLCAtMzg5NTY0NTg2KTsNCiAgICAgICAgYyA9IGZmKGMsIGQsIGEsIGIsIHRhYkJbMl0sIDE3LCA2MDYxMDU4MTkpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlszXSwgMjIsIC0xMDQ0NTI1MzMwKTsNCiAgICAgICAgYSA9IGZmKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDcsIC0xNzY0MTg4OTcpOw0KICAgICAgICBkID0gZmYoZCwgYSwgYiwgYywgdGFiQls1XSwgMTIsIDEyMDAwODA0MjYpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQls2XSwgMTcsIC0xNDczMjMxMzQxKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbN10sIDIyLCAtNDU3MDU5ODMpOw0KICAgICAgICBhID0gZmYoYSwgYiwgYywgZCwgdGFiQls4XSwgNywgMTc3MDAzNTQxNik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzldLCAxMiwgLTE5NTg0MTQ0MTcpOw0KICAgICAgICBjID0gZmYoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE3LCAtNDIwNjMpOw0KICAgICAgICBiID0gZmYoYiwgYywgZCwgYSwgdGFiQlsxMV0sIDIyLCAtMTk5MDQwNDE2Mik7DQogICAgICAgIGEgPSBmZihhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNywgMTgwNDYwMzY4Mik7DQogICAgICAgIGQgPSBmZihkLCBhLCBiLCBjLCB0YWJCWzEzXSwgMTIsIC00MDM0MTEwMSk7DQogICAgICAgIGMgPSBmZihjLCBkLCBhLCBiLCB0YWJCWzE0XSwgMTcsIC0xNTAyMDAyMjkwKTsNCiAgICAgICAgYiA9IGZmKGIsIGMsIGQsIGEsIHRhYkJbMTVdLCAyMiwgMTIzNjUzNTMyOSk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzFdLCA1LCAtMTY1Nzk2NTEwKTsNCiAgICAgICAgZCA9IGdnKGQsIGEsIGIsIGMsIHRhYkJbNl0sIDksIC0xMDY5NTAxNjMyKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTFdLCAxNCwgNjQzNzE3NzEzKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbMF0sIDIwLCAtMzczODk3MzAyKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbNV0sIDUsIC03MDE1NTg2OTEpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxMF0sIDksIDM4MDE2MDgzKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbMTVdLCAxNCwgLTY2MDQ3ODMzNSk7DQogICAgICAgIGIgPSBnZyhiLCBjLCBkLCBhLCB0YWJCWzRdLCAyMCwgLTQwNTUzNzg0OCk7DQogICAgICAgIGEgPSBnZyhhLCBiLCBjLCBkLCB0YWJCWzldLCA1LCA1Njg0NDY0MzgpOw0KICAgICAgICBkID0gZ2coZCwgYSwgYiwgYywgdGFiQlsxNF0sIDksIC0xMDE5ODAzNjkwKTsNCiAgICAgICAgYyA9IGdnKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE0LCAtMTg3MzYzOTYxKTsNCiAgICAgICAgYiA9IGdnKGIsIGMsIGQsIGEsIHRhYkJbOF0sIDIwLCAxMTYzNTMxNTAxKTsNCiAgICAgICAgYSA9IGdnKGEsIGIsIGMsIGQsIHRhYkJbMTNdLCA1LCAtMTQ0NDY4MTQ2Nyk7DQogICAgICAgIGQgPSBnZyhkLCBhLCBiLCBjLCB0YWJCWzJdLCA5LCAtNTE0MDM3ODQpOw0KICAgICAgICBjID0gZ2coYywgZCwgYSwgYiwgdGFiQls3XSwgMTQsIDE3MzUzMjg0NzMpOw0KICAgICAgICBiID0gZ2coYiwgYywgZCwgYSwgdGFiQlsxMl0sIDIwLCAtMTkyNjYwNzczNCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzVdLCA0LCAtMzc4NTU4KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbOF0sIDExLCAtMjAyMjU3NDQ2Myk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzExXSwgMTYsIDE4MzkwMzA1NjIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxNF0sIDIzLCAtMzUzMDk1NTYpOw0KICAgICAgICBhID0gaGgoYSwgYiwgYywgZCwgdGFiQlsxXSwgNCwgLTE1MzA5OTIwNjApOw0KICAgICAgICBkID0gaGgoZCwgYSwgYiwgYywgdGFiQls0XSwgMTEsIDEyNzI4OTMzNTMpOw0KICAgICAgICBjID0gaGgoYywgZCwgYSwgYiwgdGFiQls3XSwgMTYsIC0xNTU0OTc2MzIpOw0KICAgICAgICBiID0gaGgoYiwgYywgZCwgYSwgdGFiQlsxMF0sIDIzLCAtMTA5NDczMDY0MCk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzEzXSwgNCwgNjgxMjc5MTc0KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMF0sIDExLCAtMzU4NTM3MjIyKTsNCiAgICAgICAgYyA9IGhoKGMsIGQsIGEsIGIsIHRhYkJbM10sIDE2LCAtNzIyNTIxOTc5KTsNCiAgICAgICAgYiA9IGhoKGIsIGMsIGQsIGEsIHRhYkJbNl0sIDIzLCA3NjAyOTE4OSk7DQogICAgICAgIGEgPSBoaChhLCBiLCBjLCBkLCB0YWJCWzldLCA0LCAtNjQwMzY0NDg3KTsNCiAgICAgICAgZCA9IGhoKGQsIGEsIGIsIGMsIHRhYkJbMTJdLCAxMSwgLTQyMTgxNTgzNSk7DQogICAgICAgIGMgPSBoaChjLCBkLCBhLCBiLCB0YWJCWzE1XSwgMTYsIDUzMDc0MjUyMCk7DQogICAgICAgIGIgPSBoaChiLCBjLCBkLCBhLCB0YWJCWzJdLCAyMywgLTk5NTMzODY1MSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzBdLCA2LCAtMTk4NjMwODQ0KTsNCiAgICAgICAgZCA9IGlpKGQsIGEsIGIsIGMsIHRhYkJbN10sIDEwLCAxMTI2ODkxNDE1KTsNCiAgICAgICAgYyA9IGlpKGMsIGQsIGEsIGIsIHRhYkJbMTRdLCAxNSwgLTE0MTYzNTQ5MDUpOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQls1XSwgMjEsIC01NzQzNDA1NSk7DQogICAgICAgIGEgPSBpaShhLCBiLCBjLCBkLCB0YWJCWzEyXSwgNiwgMTcwMDQ4NTU3MSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzNdLCAxMCwgLTE4OTQ5ODY2MDYpOw0KICAgICAgICBjID0gaWkoYywgZCwgYSwgYiwgdGFiQlsxMF0sIDE1LCAtMTA1MTUyMyk7DQogICAgICAgIGIgPSBpaShiLCBjLCBkLCBhLCB0YWJCWzFdLCAyMSwgLTIwNTQ5MjI3OTkpOw0KICAgICAgICBhID0gaWkoYSwgYiwgYywgZCwgdGFiQls4XSwgNiwgMTg3MzMxMzM1OSk7DQogICAgICAgIGQgPSBpaShkLCBhLCBiLCBjLCB0YWJCWzE1XSwgMTAsIC0zMDYxMTc0NCk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzZdLCAxNSwgLTE1NjAxOTgzODApOw0KICAgICAgICBiID0gaWkoYiwgYywgZCwgYSwgdGFiQlsxM10sIDIxLCAxMzA5MTUxNjQ5KTsNCiAgICAgICAgYSA9IGlpKGEsIGIsIGMsIGQsIHRhYkJbNF0sIDYsIC0xNDU1MjMwNzApOw0KICAgICAgICBkID0gaWkoZCwgYSwgYiwgYywgdGFiQlsxMV0sIDEwLCAtMTEyMDIxMDM3OSk7DQogICAgICAgIGMgPSBpaShjLCBkLCBhLCBiLCB0YWJCWzJdLCAxNSwgNzE4Nzg3MjU5KTsNCiAgICAgICAgYiA9IGlpKGIsIGMsIGQsIGEsIHRhYkJbOV0sIDIxLCAtMzQzNDg1NTUxKTsNCiAgICAgICAgdGFiQVswXSA9IGFkZDMyKGEsIHRhYkFbMF0pOw0KICAgICAgICB0YWJBWzFdID0gYWRkMzIoYiwgdGFiQVsxXSk7DQogICAgICAgIHRhYkFbMl0gPSBhZGQzMihjLCB0YWJBWzJdKTsNCiAgICAgICAgdGFiQVszXSA9IGFkZDMyKGQsIHRhYkFbM10pDQogICAgZGVmIGNyeXB0YmxrKHRleHQpOg0KICAgICAgICByZXQgPSBbXQ0KICAgICAgICBmb3IgaSBpbiByYW5nZSgwLCA2NCwgNCk6DQogICAgICAgICAgICByZXQuYXBwZW5kKG9yZCh0ZXh0W2ldKSArIChvcmQodGV4dFtpKzFdKSA8PCA4KSArIChvcmQodGV4dFtpKzJdKSA8PCAxNikgKyAob3JkKHRleHRbaSszXSkgPDwgMjQpKQ0KICAgICAgICByZXR1cm4gcmV0DQogICAgZGVmIGpjc3lzKHRleHQpOg0KICAgICAgICB0eHQgPSAnJzsNCiAgICAgICAgdHh0TGVuID0gbGVuKHRleHQpDQogICAgICAgIHJldCA9IFsxNzMyNTg0MTkzLCAtMjcxNzMzODc5LCAtMTczMjU4NDE5NCwgMjcxNzMzODc4XQ0KICAgICAgICBpID0gNjQNCiAgICAgICAgd2hpbGUgaSA8PSBsZW4odGV4dCk6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgY3J5cHRibGsodGV4dFsnc3Vic3RyaW5nJ10oaSAtIDY0LCBpKSkpDQogICAgICAgICAgICBpICs9IDY0DQogICAgICAgIHRleHQgPSB0ZXh0W2kgLSA2NDpdDQogICAgICAgIHRtcCA9IFswLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwLCAwXQ0KICAgICAgICBpID0gMA0KICAgICAgICB3aGlsZSBpIDwgbGVuKHRleHQpOg0KICAgICAgICAgICAgdG1wW2kgPj4gMl0gfD0gb3JkKHRleHRbaV0pIDw8ICgoaSAlIDQpIDw8IDMpDQogICAgICAgICAgICBpICs9IDENCiAgICAgICAgdG1wW2kgPj4gMl0gfD0gMHg4MCA8PCAoKGkgJSA0KSA8PCAzKQ0KICAgICAgICBpZiBpID4gNTU6DQogICAgICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgICAgIGZvciBpIGluIHJhbmdlKDE2KToNCiAgICAgICAgICAgICAgICB0bXBbaV0gPSAwDQogICAgICAgIHRtcFsxNF0gPSB0eHRMZW4gKiA4Ow0KICAgICAgICBjcnlwdGN5Y2xlKHJldCwgdG1wKTsNCiAgICAgICAgcmV0dXJuIHJldA0KICAgIGRlZiByZXplZG93YSh0ZXh0KToNCiAgICAgICAgcmV0dXJuIGhleChqY3N5cyh0ZXh0KSkNCiAgICByZXR1cm4gcmV6ZWRvd2EoaW5fYWJjKQ0K'
			tmp = base64.b64decode(tmp)
			_myFun = compile(tmp, '', 'exec')
			vGlobals = {"__builtins__": None, 'len': len, 'list': list, 'ord': ord, 'range': range}
			vLocals = {'abc': ''}
			exec _myFun in vGlobals, vLocals
			myFun1 = vLocals['abc']

			data = client.request(urlparse.urljoin(self.base_link, '/jsverify.php?op=tag'), cookie=mycookie)
			data = byteify(json.loads(data))
			d = {}
			for i in range(len(data['key'])):
				d[data['key'][i]] = data['hash'][i]
			tmp = ''
			for k in sorted(d.keys()):
				tmp += d[k]
			mycookie = 'tmvh=%s;%s' % (myFun1(tmp), mycookie)
			link = client.request(myurl[-1], cookie=mycookie)
			match = re.search('<iframe src="(.+?)"', link)
			if match:
				linkVideo = match.group(1)
				if linkVideo.startswith("//"):
					linkVideo = linkVideo.replace("//", "http://")
					return linkVideo
				return linkVideo
			return
		except:
			return
Exemple #6
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(urlparse.urljoin(self.base_link,
                                             self.conf_link),
                            XHR=True)
         r = json.loads(r).get('streamer')
         r = client.request(r + '%s.mp4/master.m3u8' % url, XHR=True)
         r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)',
                        r, re.IGNORECASE)
         r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]
         for quality, link in r:
             sources.append({
                 'source': 'CDN',
                 'quality': quality,
                 'language': 'de',
                 'url': link,
                 'direct': True,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Exemple #7
0
 def __search(self, titles, year, content):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r,
                             'div',
                             attrs={'class': 'tab-content clearfix'})
         if content == 'movies':
             r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
         else:
             r = client.parseDOM(r, 'div', attrs={'id': 'series'})
         data = dom_parser.parse_dom(r, 'figcaption')
         for i in data:
             title = i[0]['title']
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
             else:
                 url = dom_parser.parse_dom(i, 'a', req='href')
                 data = client.request(url[0][0]['href'])
                 data = re.findall(
                     '<h3>Pelicula.+?">(.+?)\((\d{4})\).+?</a>', data,
                     re.DOTALL)[0]
                 if titles[0] in data[0] and year == data[1]:
                     return source_utils.strip_domain(url[0][0]['href'])
         return
     except:
         return
Exemple #8
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         query = urlparse.urljoin(self.base_link, url)
         r = client.request(query)
         links = client.parseDOM(r, 'li', attrs={'id': '\d+'})
         for i in links:
             data = re.findall(
                 "<img.+?\('([^']+)'.+?<b>(\w+)\s*<img.+?<td.+?>(.+?)</td>\s*<td",
                 i, re.DOTALL)
             for url, info, quality in data:
                 lang, info = self.get_lang_by_type(info)
                 quality = self.quality_fixer(quality)
                 if 'streamcloud' in url: quality = 'SD'
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if 'goo' in url:
                     data = client.request(url)
                     url_id = re.findall('var\s*videokeyorig\s*=\s*"(.+?)"',
                                         data, re.DOTALL)[0]
                     url, host = 'http://hqq.tv/player/embed_player.php?vid=%s' % url_id, 'netu.tv'
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': lang,
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Exemple #9
0
	def __search(self, titles, year):
		try:
			url = urlparse.urljoin(self.base_link, self.search_link)
			t = [cleantitle.get(i) for i in set(titles) if i]
			y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
			post = {'story': titles[0], 'years_ot': str(int(year) - 1), 'years_do': str(int(year) + 1)}
			r = client.request(url, post=post, XHR=True)
			if len(r) < 1000:
				url = urlparse.urljoin(self.base_link, self.search_old % urllib.quote_plus(titles[0]))
				r = client.request(url)
			r = r.decode('cp1251').encode('utf-8')
			r = dom_parser.parse_dom(r, 'article')
			r = dom_parser.parse_dom(r, 'div', attrs={'class': 'full'})
			r = [(dom_parser.parse_dom(i, 'a', attrs={'itemprop': 'url'}, req='href'),
			      dom_parser.parse_dom(i, 'h3', attrs={'class': 'name'}, req='content'),
			      dom_parser.parse_dom(i, 'div', attrs={'class': 'origin-name'}, req='content'),
			      dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
			r = [(i[0][0].attrs['href'], i[1][0].attrs['content'], i[2][0].attrs['content'],
			      dom_parser.parse_dom(i[3], 'a', attrs={'itemprop': 'copyrightYear'})) for i in r if
			     i[0] and i[1] and i[2]]
			r = [(i[0], i[1], i[2], i[3][0].content) for i in r if i[3]]
			r = [i[0] for i in r if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] in y][0]
			return source_utils.strip_domain(r)
		except:
			return
 def links(self, url):
     urls = []
     try:
         if url is None: return
         r = client.request(url)
         r = client.parseDOM(r, 'div', attrs={'class': 'entry'})
         r = client.parseDOM(r, 'a', ret='href')
         r1 = [(i) for i in r if 'money' in i][0]
         r = client.request(r1)
         r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0]
         if 'enter the password' in r:
             plink = client.parseDOM(r, 'form', ret='action')[0]
             post = {'post_password': '******', 'Submit': 'Submit'}
             send_post = client.request(plink, post=post, output='cookie')
             link = client.request(r1, cookie=send_post)
         else:
             link = client.request(r1)
         link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0]
         link = client.parseDOM(link, 'a', ret='href')
         link = [(i.split('=')[-1]) for i in link]
         for i in link:
             urls.append(i)
         return urls
     except:
         pass
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)
            r = dom_parser.parse_dom(r,
                                     'td',
                                     attrs={
                                         'data-title-name':
                                         re.compile('Season %02d' %
                                                    int(season))
                                     })
            r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']
            r = client.request(urlparse.urljoin(self.base_link, r))
            r = dom_parser.parse_dom(r,
                                     'td',
                                     attrs={
                                         'data-title-name':
                                         re.compile('Episode %02d' %
                                                    int(episode))
                                     })
            r = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']

            return source_utils.strip_domain(r)
        except:
            return
    def __search(self, search_link, imdb, titles):
        try:
            query = search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
            r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [
                i.attrs['href'] for i in r
                if i and cleantitle.get(i.content) in t
            ][0]

            url = source_utils.strip_domain(r)

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'href': re.compile('.*/tt\d+.*')},
                                     req='href')
            r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
            r = [i[0] for i in r if i]

            return url if imdb in r else None
        except:
            return
Exemple #13
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/film/%s' % (self.base_link,
                                   cleantitle.geturl(alias['title']))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if not url is None and url != self.base_link:
                 break
         if url is None:
             for alias in aliases:
                 url = '%s/film/%s-%s' % (self.base_link,
                                          cleantitle.geturl(
                                              alias['title']), year)
                 url = client.request(url,
                                      headers=headers,
                                      output='geturl',
                                      timeout='10')
                 if not url is None and url != self.base_link:
                     break
         return url
     except:
         return
Exemple #14
0
    def sezonlukdizi_tvcache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            result = client.request(url, redirect=False)

            if not result:
                r = client.request(self.base_link)
                r = \
                    dom_parser.parse_dom(r, 'script',
                                         attrs={'type': 'text/javascript', 'src': re.compile('.*/js/dizi.*')},
                                         req='src')[0]
                url = urlparse.urljoin(self.base_link, r.attrs['src'])
                result = client.request(url)

            result = re.compile('{(.+?)}').findall(result)
            result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i),
                       re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\',|\")', i))
                      for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]),
                       re.sub('&#\d*;', '', i[1])) for i in result]
            result = [(i[0][0] + '/', cleantitle.query(self.lat2asc(i[1])))
                      for i in result if len(i[0]) > 0]

            return result
        except BaseException:
            return []
Exemple #15
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = client.request(query)
			links = client.parseDOM(r, 'tbody')
			links = client.parseDOM(links, 'a', ret='href')
			for i in range(len(links)):
				url = links[i]
				if 'target' in url: continue
				data = client.request(url)
				url = client.parseDOM(data, 'iframe', ret='src')[0]
				if url.startswith('/go'): url = re.findall('go\?(.+?)-', url)[0]
				if 'crypt' in url: continue
				if 'redvid' in url:
					data = client.request(url)
					url = client.parseDOM(data, 'iframe', ret='src')[0]
				if any(x in url for x in ['.online', 'xrysoi.se', 'filmer', '.bp', '.blogger', 'youtu']):
					continue
				quality = 'SD'
				lang, info = 'gr', 'SUB'
				valid, host = source_utils.is_host_valid(url, hostDict)
				if 'hdvid' in host: valid = True
				if not valid: continue
				sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
				                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Exemple #16
0
def more_gomo(link, hostDict):
	sources = []  # By Mpie
	try:
		gomo_link = 'https://gomostream.com/decoding_v3.php'
		User_Agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
		result = client.request(link)
		tc = re.compile('tc = \'(.+?)\';').findall(result)[0]
		if (tc):
			token = re.compile('"_token": "(.+?)",').findall(result)[0]
			post = {'tokenCode': tc, '_token': token}

			def tsd(tokenCode):
				_13x48X = tokenCode
				_71Wxx199 = _13x48X[4:18][::-1]
				return _71Wxx199 + "18" + "432782"

			headers = {'Host': 'gomostream.com', 'Referer': link, 'User-Agent': User_Agent, 'x-token': tsd(tc)}
			result = client.request(gomo_link, XHR=True, post=post, headers=headers)
			urls = json.loads(result)
			for url in urls:
				if 'gomostream' in url:
					continue
					# sources.append({'source': 'CDN', 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
				else:
					quality, info = source_utils.get_release_quality(url, url)
					valid, host = source_utils.is_host_valid(url, hostDict)
					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
					                'direct': False, 'debridonly': False})
		return sources
	except:
		return sources
Exemple #17
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			hostDict = hostprDict + hostDict
			print url
			r = client.request(url)
			qual = re.compile('class="quality">(.+?)<').findall(r)
			for i in qual:
				if 'HD' in i:
					quality = '720p'
				else:
					quality = 'SD'
			r = client.parseDOM(r, "div", attrs={"id": "mv-info"})
			for i in r:
				t = re.compile('<a href="(.+?)"').findall(i)
				for url in t:
					t = client.request(url)
					t = client.parseDOM(t, "div", attrs={"id": "content-embed"})
					for u in t:
						i = re.findall('iframe src="(.+?)"', u)
						for url in i:
							valid, host = source_utils.is_host_valid(url, hostDict)
							if valid:
								sources.append(
									{'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
									 'debridonly': False})

			return sources
		except:
			return sources
Exemple #18
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         query = urlparse.urljoin(self.base_link, url)
         r = client.request(query)
         q = re.findall("'(http://www.elreyxhd.+?)'", r, re.DOTALL)[0]
         links = client.request(q)
         links = client.parseDOM(links, 'a', ret='href')
         for url in links:
             lang, info = 'es', 'LAT'
             qual = 'HD'
             if not 'http' in url: continue
             if 'elrey' in url: continue
             valid, host = source_utils.is_host_valid(url, hostDict)
             if not valid: continue
             sources.append({
                 'source': host,
                 'quality': qual,
                 'language': lang,
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Exemple #19
0
	def search(self, title, localtitle, year, search_type):
		try:
			titles = []
			titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
			titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))
			cookies = client.request(self.base_link, output='cookie')
			cache.cache_insert('alltube_cookie', cookies)
			for title in titles:
				r = client.request(urlparse.urljoin(self.base_link, self.search_link),
				                   post={'search': cleantitle.query(title)}, headers={'Cookie': cookies})
				r = self.get_rows(r, search_type)

				for row in r:
					url = client.parseDOM(row, 'a', ret='href')[0]
					names_found = client.parseDOM(row, 'h3')[0]
					if names_found.startswith('Zwiastun') and not title.startswith('Zwiastun'):
						continue
					names_found = names_found.encode('utf-8').split('/')
					names_found = [cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found]
					for name in names_found:
						name = name.replace("  ", " ")
						title = title.replace("  ", " ")
						words = title.split(" ")
						found_year = self.try_read_year(url)
						if self.contains_all_words(name, words) and (not found_year or found_year == year):
							return url
						else:
							continue
					continue
		except:
			return
def request(url,
            check,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        r = client.request(url,
                           close=close,
                           redirect=redirect,
                           proxy=proxy,
                           post=post,
                           headers=headers,
                           mobile=mobile,
                           XHR=XHR,
                           limit=limit,
                           referer=referer,
                           cookie=cookie,
                           compression=compression,
                           output=output,
                           timeout=timeout)
        if r is not None and error is not False: return r
        if check in str(r) or str(r) == '': return r
        proxies = sorted(get(), key=lambda x: random.random())
        proxies = sorted(proxies, key=lambda x: random.random())
        proxies = proxies[:3]
        for p in proxies:
            p += urllib.quote_plus(url)
            if post is not None:
                if isinstance(post, dict):
                    post = utils.byteify(post)
                    post = urllib.urlencode(post)
                p += urllib.quote_plus('?%s' % post)
            r = client.request(p,
                               close=close,
                               redirect=redirect,
                               proxy=proxy,
                               headers=headers,
                               mobile=mobile,
                               XHR=XHR,
                               limit=limit,
                               referer=referer,
                               cookie=cookie,
                               compression=compression,
                               output=output,
                               timeout='20')
            if check in str(r) or str(r) == '': return r
    except:
        pass
Exemple #21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link,
                                   url) if not url.startswith('http') else url

            result = client.request(url)
            data = re.findall(r'\s*(eval.+?)\s*</script', result, re.DOTALL)[1]
            data = jsunpack.unpack(data).replace('\\', '')

            patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';'''
            links_url = re.findall(patern, data, re.DOTALL)[0]
            slug = 'slug={}'.format(url.split('/')[-1])
            links_url = self.base_link + [''.join(links_url)][0].replace(
                'slug=', slug)
            links = client.request(links_url)
            links = client.parseDOM(links, 'tbody')

            for i in links:
                try:
                    data = [(client.parseDOM(i, 'a', ret='href')[0],
                             client.parseDOM(i,
                                             'span',
                                             attrs={'class':
                                                    'version_host'})[0])][0]
                    url = urlparse.urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except BaseException:
                    pass

            return sources
        except Exception:
            return sources
    def resolve(self, url):
        try:
            r = client.request(url, output='extended')
            url_res = client.parseDOM(r[0], 'a', attrs={'class': 'submit'}, ret='href')[0]
            mycookie = self.crazy_cookie_hash(r[4])

            r = client.request(url_res, cookie=mycookie)

            return client.parseDOM(r, 'iframe', ret='src')[0]
        except:
            return
Exemple #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            episode = data.get('episode')

            r = client.request(url)

            aj = self.__get_ajax_object(r)

            b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img']

            if episode:
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream-ep', 'data-episode': episode},
                                         req=['data-episode', 'data-server'])
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server'])

            r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]

            for epi, server in r:
                try:
                    x = {'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server,
                         'nonce': aj.get('nonce'), 'b': b}
                    x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url)
                    x = json.loads(x)

                    q = source_utils.label_to_quality(x.get('q'))
                    x = json.loads(base64.decodestring(x.get('u')))

                    u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex"))
                    u = u.replace('\/', '/').strip('"')

                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    sources.append(
                        {'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False,
                         'checkquality': True})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['title']
            year = data['year']
            t = title + year

            query = '%s' % data['title']
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link.format(urllib.quote_plus(query))
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            items = client.parseDOM(r, 'li')
            items = [(dom.parse_dom(i, 'a', req='href')[0]) for i in items
                     if year in i]
            items = [(i.attrs['href'], re.sub('<.+?>|\n', '',
                                              i.content).strip())
                     for i in items]
            item = [
                i[0].replace('movie', 'view') for i in items
                if cleantitle.get(t) == cleantitle.get(i[1])
            ][0]

            html = client.request(item)
            streams = re.findall('sources\:\s*\[(.+?)\]\,', html, re.DOTALL)[0]
            streams = re.findall(
                'file:\s*[\'"](.+?)[\'"].+?label:\s*[\'"](.+?)[\'"]', streams,
                re.DOTALL)

            for link, label in streams:
                quality = source_utils.get_release_quality(label, label)[0]
                link += '|User-Agent=%s&Referer=%s' % (urllib.quote(
                    client.agent()), item)
                sources.append({
                    'source': 'Direct',
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except BaseException:
            return sources
Exemple #25
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
             self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
             r = client.request(url, headers=headers, timeout='10', output='geturl')
             if url == None:
                 url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases, headers)
             if url == None:
                 url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
         if url == None:
             raise Exception()
         r = client.request(url, headers=headers, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             if '123movieshd' in link or 'seriesonline' in link:
                 r = client.request(link, headers=headers, timeout='10')
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     try:
                         sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
                                         'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                     except:
                         pass
             else:
                 try:
                     host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                     if not host in hostDict:
                         raise Exception()
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
                                     'debridonly': False})
                 except:
                     pass
         return sources
     except:
         return sources
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
             data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         try:
             posts = client.parseDOM(r, 'div', attrs={'class': 'box-info'})
             for post in posts:
                 data = client.parseDOM(post, 'a', ret='href')
                 u = [i for i in data if '/torrent/' in i]
                 for u in u:
                     match = '%s %s' % (title, hdlr)
                     match = match.replace('+', '-').replace(' ', '-').replace(':-', '-').replace('---', '-')
                     if not match in u: continue
                     u = self.base_link + u
                     r = client.request(u)
                     r = client.parseDOM(r, 'div', attrs={'class': 'torrent-category-detail clearfix'})
                     for t in r:
                         link = re.findall('href="magnet:(.+?)" onclick=".+?"', t)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         seeds = int(re.compile('<span class="seeds">(.+?)</span>').findall(t)[0])
                         if self.min_seeders > seeds:
                             continue
                         quality, info = source_utils.get_release_quality(link, link)
                         try:
                             size = re.findall('<strong>Total size</strong> <span>(.+?)</span>', t)
                             for size in size:
                                 size = '%s' % size
                                 info.append(size)
                         except BaseException:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
         except:
             return
         return sources
     except:
         return sources
Exemple #27
0
    def __search(self, titles, year, imdb):
        try:
            query = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}),
                  dom_parser.parse_dom(i, 'div', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']),
                  re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r
                 if i[0] and i[1]]
            r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t]

            if len(r) > 1:
                for i in r:
                    data = client.request(urlparse.urljoin(self.base_link, i))
                    data = dom_parser.parse_dom(
                        data,
                        'a',
                        attrs={'name': re.compile('.*/tt\d+.*')},
                        req='name')
                    data = [
                        re.findall('.+?(tt\d+).*?', d.attrs['name'])
                        for d in data
                    ]
                    data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]

                    if len(data) >= 1:
                        url = i
            else:
                url = r[0]

            if url:
                return source_utils.strip_domain(url)
        except:
            return
Exemple #28
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			if 'tvshowtitle' in data:
				ep = data['episode']
				url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
				self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
				r = client.request(url, timeout='10', output='geturl')
				if url is None:
					url = self.searchShow(data['tvshowtitle'], data['season'], aliases)
			else:
				url = self.searchMovie(data['title'], data['year'], aliases)
				if url is None:
					url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
			if url is None:
				raise Exception()
			r = client.request(url, timeout='10')
			r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
			if 'tvshowtitle' in data:
				ep = data['episode']
				links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
			else:
				links = client.parseDOM(r, 'a', ret='player-data')
			for link in links:
				link = "https:" + link if not link.startswith('http') else link
				if 'vidcloud' in link:
					r = client.request(link, timeout='10')
					match = getSum.findSum(r)
					for url in match:
						url = "https:" + url if not url.startswith('http') else url
						url = requests.get(url).url if 'api.vidnode' in url else url
						valid, host = source_utils.is_host_valid(url, hostDict)
						if valid:
							quality, info = source_utils.get_release_quality(url, url)
							sources.append(
								{'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url,
								 'direct': False, 'debridonly': False})
				else:
					valid, host = source_utils.is_host_valid(link, hostDict)
					if valid:
						quality, info = source_utils.get_release_quality(link, link)
						sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link,
						                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break

            dom = dom_parser.parse_dom(result,
                                       'div',
                                       attrs={
                                           'class': 'links',
                                           'id': 'noSubs'
                                       })
            result = dom[0].content

            links = re.compile(
                '<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',
                re.DOTALL).findall(result)
            for link in links[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break
                    r = re.compile('href="([^"]+)"\s+class="action-btn'
                                   ).findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    # log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG)
                    urls, host, direct = source_utils.check_directstreams(
                        r, hoster)
                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'en',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })

                except:
                    pass

            return sources
        except:
            return sources
Exemple #30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            id = data.get('id')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.get_episodes),
                                   post={
                                       'series_id': id,
                                       'mlang': 'de',
                                       'season': season,
                                       'episode': episode
                                   })
                r = json.loads(r).get('episode_links', [])
                r = [([i.get('id')], i.get('hostername')) for i in r]
            else:
                data.update({'lang': 'de'})
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.get_links),
                                   post=data)
                r = json.loads(r).get('links', [])
                r = [(i.get('ids'), i.get('hoster')) for i in r]

            for link_ids, hoster in r:
                valid, host = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                for link_id in link_ids:
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'de',
                        'url': self.out_link % (link_id, hoster),
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            return sources