Exemplo n.º 1
0
 def play(self, url):
     try:
         if 'm3u8' in url:
             link = '%s|User-Agent=%s&Referer=%s' % (url, client.agent(),
                                                     url)
             control.execute('PlayMedia(%s)' % link)
         else:
             stream = cfscrape.get(url, headers=self.headers).content
             streams = re.findall(
                 'return\(\[(.+?)\].join.+? (.+?).join.+? document.getElementById\("(.+?)"\).innerHTML',
                 stream)
             for item in streams:
                 url2 = re.findall('var (.+?) = \[(.+?)\]', stream,
                                   re.DOTALL)
                 for code in url2:
                     if item[1].replace('+ ', '') in code[0]:
                         url3 = re.findall('id=(.+?)>(.+?)</span><span',
                                           stream, re.DOTALL)
                         for code2 in url3:
                             if item[2] in code2[0]:
                                 link = item[0].replace(',', '').replace(
                                     '"', '').replace('\\', '').replace(
                                         '+', '') + code[1].replace(
                                             ',', '').replace('"',
                                                              '') + code2[1]
                                 link = '%s|User-Agent=%s' % (
                                     link, client.agent())
                                 control.execute('PlayMedia(%s)' % link)
     except:
         return
Exemplo n.º 2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cfscrape.get(search_url, headers={'User-Agent': client.agent()}).content
                r = client.parseDOM(r, 'div', {'class': 'item'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = cfscrape.get(url, headers={'User-Agent': client.agent()}).content
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Exemplo n.º 3
0
def resolve(url):
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]

        page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page

        result = client.request(page, referer=referer)

        js = re.compile('src\s*=\s*[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[-1]
        js = client.request(js)

        try:
            token = re.findall('[\'|\"](.+?\.php)[\'|\"]',js)[-1]
            token = urlparse.urljoin('http://p2pcast.tv', token)
            token = client.request(token, referer=page, headers={'User-Agent': client.agent(), 'X-Requested-With': 'XMLHttpRequest'})
            token = re.compile('[\'|\"]token[\'|\"]\s*:\s*[\'|\"](.+?)[\'|\"]').findall(token)[0]
        except:
            token = ''


        try:
            swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(js)[-1]
        except:
            swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf'


        url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
        url = base64.b64decode(url) + token
        url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': swf})

        return url
    except:
        return
Exemplo n.º 4
0
def gamato_links(url, name, poster):  #12
    try:
        data = client.request(url)
        desc = client.parseDOM(data, 'div', attrs={'itemprop':
                                                   'description'})[0]
        desc = re.sub('<.+?>', '', desc)
        desc = desc.encode('utf-8', 'ignore')
        try:
            match = re.findall(
                '''file\s*:\s*['"](.+?)['"],poster\s*:\s*['"](.+?)['"]\}''',
                data, re.DOTALL)[0]
            link, _poster = match[0], match[1]
        except IndexError:
            frame = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})[0]
            frame = client.parseDOM(frame, 'iframe', ret='src')[0]
            if 'cloud' in frame:
                #sources: ["http://cloudb.me/4fogdt6l4qprgjzd2j6hymoifdsky3tfskthk76ewqbtgq4aml3ior7bdjda/v.mp4"],
                match = client.request(frame)
                try:
                    from resources.lib.modules import jsunpack
                    if jsunpack.detect(match):
                        match = jsunpack.unpack(match)
                    match = re.findall('sources:\s*\[[\'"](.+?)[\'"]\]', match,
                                       re.DOTALL)[0]
                    match += '|User-Agent=%s&Referer=%s' % (urllib.quote(
                        client.agent()), frame)
                except IndexError:
                    from resources.lib.modules import jsunpack as jsun
                    if jsun.detect(match):
                        match = jsun.unpack(match)
                        match = re.findall('sources:\s*\[[\'"](.+?)[\'"]\]',
                                           match, re.DOTALL)[0]
                        match += '|User-Agent=%s&Referer=%s' % (urllib.quote(
                            client.agent()), frame)

            else:
                match = frame
            link, _poster = match, poster

        try:
            fanart = client.parseDOM(data, 'div', attrs={'class': 'g-item'})[0]
            fanart = client.parseDOM(fanart, 'a', ret='href')[0]
        except IndexError:
            fanart = FANART
        try:
            trailer = client.parseDOM(data, 'iframe', ret='src')
            trailer = [i for i in trailer if 'youtube' in i][0]
            addDir('[B][COLOR lime]Trailer[/COLOR][/B]', trailer, 100,
                   iconimage, fanart, str(desc))
        except BaseException:
            pass

        addDir(name, link, 100, poster, fanart, str(desc))
    except BaseException:
        return
    views.selectView('movies', 'movie-view')
Exemplo n.º 5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         self._sources = []
         if url is None:
             return self._sources
         if debrid.status() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         self.title = data[
             'tvshowtitle'] if 'tvshowtitle' in data else data['title']
         self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else data['year']
         self.hostDict = hostprDict + hostDict
         items = []
         urls = []
         posts = []
         links = []
         if 'tvshowtitle' not in data:
             url = urlparse.urljoin(self.base_link,
                                    self.search_link % data['imdb'])
             r = client.request(url, headers={'User-Agent': client.agent()})
             posts = client.parseDOM(r, 'item')
         else:
             url = urlparse.urljoin(
                 self.base_link, self.search_link %
                 (cleantitle.geturl(self.title).replace('-', '+') + '+' +
                  self.hdlr))
             r = client.request(url, headers={'User-Agent': client.agent()})
             posts = client.parseDOM(r, 'item')
         if not posts:
             return self._sources
         for post in posts:
             try:
                 t = client.parseDOM(post, 'title')[0]
                 u = client.parseDOM(post, 'link')[0]
                 s = re.search(
                     '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)
                 s = s.groups()[0] if s else '0'
                 items += [(t, u, s)]
             except:
                 pass
         items = set(items)
         threads = []
         for i in items:
             threads.append(workers.Thread(self._get_sources, i))
         [i.start() for i in threads]
         [i.join() for i in threads]
         return self._sources
     except:
         return self._sources
Exemplo n.º 6
0
def more_vidlink(link, hostDict):
    if "vidlink.org" in link:
        sources = []  # By Shellc0de
        try:
            ua = {'User-Agent': client.agent()}
            postID = link.split('/embed/')[1]
            post_link = 'https://vidlink.org/embed/update_views'
            payload = {'postID': postID}
            headers = ua
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = link
            ihtml = client.request(post_link, post=payload, headers=headers)
            linkcode = jsunpack.unpack(ihtml).replace('\\', '')
            try:
                extra_link = re.findall(r'var oploadID="(.+?)"', linkcode)[0]
                oload = 'https://openload.co/embed/' + extra_link
                sources.append({
                    'source': 'openload.co',
                    'quality': '1080p',
                    'language': 'en',
                    'url': oload,
                    'direct': False,
                    'debridonly': False
                })
            except Exception:
                pass
            links = re.findall(r'var file1="(.+?)"', linkcode)[0]
            stream_link = links.split('/pl/')[0]
            headers = {
                'Referer': 'https://vidlink.org/',
                'User-Agent': client.agent()
            }
            response = client.request(links, headers=headers)
            urls = re.findall(
                r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.', response)
            if urls:
                for qual, url in urls:
                    url = stream_link + '/' + url + '.m3u8'
                    quality, info = source_utils.get_release_quality(qual, url)
                    sources.append({
                        'source': 'GVIDEO',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
            return sources
        except:
            return []
    return []
Exemplo n.º 7
0
 def resolve(self, url):
     try:
         urldata = urlparse.parse_qs(url)
         urldata = dict((i, urldata[i][0]) for i in urldata)
         post = {
             'ipplugins': 1,
             'ip_film': urldata['data-film'],
             'ip_server': urldata['data-server'],
             'ip_name': urldata['data-name'],
             'fix': "0"
         }
         cfscrape.headers.update({
             'Referer': urldata['url'],
             'X-Requested-With': 'XMLHttpRequest',
             'User-Agent': client.agent()
         })
         p1 = cfscrape.post(
             'http://www6.123123movies.net/ip.file/swf/plugins/ipplugins.php',
             data=post,
             headers={
                 'User-Agent': client.agent()
             }).content
         p1 = json.loads(p1)
         p2 = cfscrape.get(
             'http://www6.123123movies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0'
             % (p1['s'], urldata['data-server']),
             headers={
                 'User-Agent': client.agent()
             }).content
         p2 = json.loads(p2)
         p3 = cfscrape.get(
             'http://www6.123123movies.net/ip.file/swf/ipplayer/api.php?hash=%s'
             % (p2['hash']),
             headers={
                 'User-Agent': client.agent()
             }).content
         p3 = json.loads(p3)
         n = p3['status']
         if n is False:
             p2 = cfscrape.get(
                 'http://www6.123123movies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1'
                 % (p1['s'], urldata['data-server']),
                 headers={
                     'User-Agent': client.agent()
                 }).content
             p2 = json.loads(p2)
         url = p2["data"].replace("\/", "/")
         if not url.startswith('http'):
             url = "https:" + url
         return url
     except:
         return
Exemplo n.º 8
0
    def _get_items(self, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tr', attrs={'id': 'torrent_latest_torrents'})

            for post in posts:
                data = client.parseDOM(post, 'a', attrs={'title': 'Torrent magnet link'}, ret='href')[0]
                link = urllib.unquote(data).decode('utf8').replace('https://mylink.me.uk/?url=', '')
                name = urllib.unquote_plus(re.search('dn=([^&]+)', link).groups()[0])
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    size = '%.2f GB' % size
                except BaseException:
                    size = '0'

                self.items.append((name, link, size))

            return self.items
        except BaseException:
            return self.items
Exemplo n.º 9
0
	def links(self,url, img=' '):
		out=[]
		ref=url
		orig_title = xbmc.getInfoLabel('ListItem.Title')
		html = client.request(url)
		link = re.findall("src='(https://videoapi.my.mail.ru/.+?)'",html)[0]
		link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
		link = link.replace('html','json')
		cookieJar = cookielib.CookieJar()
		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
		conn = urllib2.Request(link)
		connection = opener.open(conn)
		f = connection.read()
		connection.close()
		js = json.loads(f)
		for cookie in cookieJar:
			token = cookie.value
		js = js['videos']
		for x in js:
			url = x['url'] 
			host  = urlparse.urlparse(url).netloc
			url = url + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref, 'Host':host, 'X-Requested-With':constants.get_shockwave()} ))
			title = orig_title + ' ' + x['key']
			out.append((title,url,img))
		return out
Exemplo n.º 10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)

            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = dom_parser2.parse_dom(r, 'li', {'class': re.compile('.+?'), 'id': re.compile('comment-.+?')})
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts: threads.append(workers.Thread(self._get_sources, i.content))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
Exemplo n.º 11
0
def resolve(url):
    try:
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except:
            referer=url
        page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page

        result = client.request(page, referer=referer)


        try:
            swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0]
            swf = client.request(swf)
            swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(swf)[0]
        except:
            swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf'


        url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
        url = base64.b64decode(url)
        url = '%s|User-Agent=%s&Referer=%s' % (url, urllib.quote_plus(client.agent()), urllib.quote_plus(swf))

        return url
    
    except:
        return
Exemplo n.º 12
0
def resolve(url):
	try:
		if '.mp4' in url:
			url = url.replace('https','http')
			url += '|%s' % urllib.urlencode({'User-agent':client.agent(),'X-requested-with':constants.get_shockwave()})
			return url
		if url.startswith('//'):
			url = 'http:' + url
		result = client.request(url)
		html = result
		result = json.loads(result)
		try:
			f4m=result['content']['media']['f4m']
		except:
			reg=re.compile('"src":"http://(.+?).f4m"')
			f4m=re.findall(reg,html)[0]
			f4m='http://'+pom+'.f4m'

		result = client.request(f4m)
		soup = webutils.bs(result)
		try:
			base=soup.find('baseURL').getText()+'/'
		except:
			base=soup.find('baseurl').getText()+'/'

		linklist = soup.findAll('media')
		link = linklist[0]
		url = base + link['url']
		return url.replace('https','http')
	except:
		return
Exemplo n.º 13
0
 def resolve(self, url):
     if self.base_link in url:
         url = cfscrape.get(url, headers={'User-Agent': client.agent()}).content
         v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
         b64 = base64.b64decode(v)
         url = client.parseDOM(b64, 'iframe', ret='src')[0]
     return url
Exemplo n.º 14
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = cfscrape.get(url, headers={'User-Agent': client.agent()}).content
         try:
             v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
             b64 = base64.b64decode(v)
             url = client.parseDOM(b64, 'iframe', ret='src')[0]
             try:
                 host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False})
             except:
                 pass
         except:
             pass
         r = client.parseDOM(r, 'div', {'class': 'server_line'})
         r = [(client.parseDOM(i, 'a', ret='href')[0],
               client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
         if r:
             for i in r:
                 try:
                     host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     if 'other' in host: continue
                     sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'), 'direct': False, 'debridonly': False})
                 except:
                     pass
         return sources
     except Exception:
         return sources
Exemplo n.º 15
0
    def _get_items(self, url):
        items = []
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
            posts = [i for i in posts if not 'racker:' in i]
            for post in posts:
                data = client.parseDOM(post, 'a', ret='href')
                url = [i for i in data if 'magnet:' in i][0]
                name = client.parseDOM(post, 'a', ret='title')[0]
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    size = '%.2f GB' % size

                except BaseException:
                    size = '0'

                items.append((name, url, size))
            return items
        except BaseException:
            return items
Exemplo n.º 16
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url == None: return sources

			url = urlparse.urljoin(self.base_link, url)

			h = {'User-Agent': client.agent()}

			r = client.request(url, headers=h, output='extended')

			s = client.parseDOM(r[0], 'ul', attrs = {'class': 'episodes'})
			s = client.parseDOM(s, 'a', ret='data.+?')
			s = [client.replaceHTMLCodes(i).replace(':', '=').replace(',', '&').replace('"', '').strip('{').strip('}') for i in s]

			for u in s:
				try:
					url = '/io/1.0/stream?%s' % u
					url = urlparse.urljoin(self.base_link, url)

					r = client.request(url)
					r = json.loads(r)

					url = [i['src'] for i in r['streams']]

					for i in url:
						try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
						except: pass
				except:
					pass

			return sources
		except:
			return sources
Exemplo n.º 17
0
def resolve(url):
	initial = url
	libPath = os.path.join(control.addonPath, 'resources/lib/modules')
	serverPath = os.path.join(libPath, 'livestreamerXBMCLocalProxy.py')
	try:
		import requests
		requests.get('http://127.0.0.1:19000/version')
		proxyIsRunning = True
	except:
		proxyIsRunning = False
		if not proxyIsRunning:
			xbmc.executebuiltin('RunScript(' + serverPath + ')')


	url = re.findall('[\"\']([^\"\']+)',url)[0]
	try:
		headers = re.findall('-http-headers=([^\s]+)',url)[0]
	except:
		headers = urllib.urlencode({'User-agent':client.agent()})

	url += '|' + headers

	try:
		cookies = re.findall('-http-cookie=([^\s]+)',initial)[0]
		url += '|' + cookies
	except:
		pass

	url = base64.b64encode(url)

	url = 'http://127.0.0.1:19000/livestreamer/' + url + '|' + cookies

	return url
Exemplo n.º 18
0
def resolve(name, url, iconimage, description):
    host = url
    if host.split('|')[0].endswith('.mp4') and 'clou' in host:
        stream_url = host + '|User-Agent=%s&Referer=%s' % (urllib.quote_plus(
            client.agent(), ':/'), GAMATO)
        name = name
    elif 'tenies-online' in host:
        stream_url = client.request(host)
        stream_url = client.parseDOM(stream_url,
                                     'a', {'id': 'link'},
                                     ret='href')[0]
        stream_url = evaluate(stream_url)
    else:
        stream_url = evaluate(host)
        name = name.split(' [B]|')[0]
    try:
        liz = xbmcgui.ListItem(name,
                               iconImage="DefaultVideo.png",
                               thumbnailImage=iconimage)
        liz.setInfo(type="Video",
                    infoLabels={
                        "Title": name,
                        "Plot": description
                    })
        liz.setProperty("IsPlayable", "true")
        liz.setPath(str(stream_url))
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
    except BaseException:
        control.infoDialog(Lang(32012), NAME)
Exemplo n.º 19
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['www.mkvhub.net', 'www.mkvhub.com']
     self.base_link = 'https://www.mkvhub.net'
     self.search_link = '/?s=%s'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 20
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            name = client.replaceHTMLCodes(name)
            l = dom_parser2.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE|re.DOTALL)
            urls = [i for i in urls if '.rar' not in i or '.zip' not in i or '.iso' not in i or '.idx' not in i or '.sub' not in i]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except BaseException:
                    pass
                info = ' | '.join(info)
                self.sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
        except:
            pass
Exemplo n.º 21
0
 def __init__(self):
     self.list = []
     self.base_link = 'https://ustvgo.tv'
     self.headers = {
         'User-Agent': client.agent(),
         'Referer': self.base_link
     }
Exemplo n.º 22
0
    def _get_items(self, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urlparse.urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    size = '%.2f GB' % size
                except BaseException:
                    size = '0'

                self.items.append((name, link, size))
            return self.items
        except BaseException:
            return self.items
Exemplo n.º 23
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.tvsearch = 'https://torrentapi.org/pubapi_v2.php?app_id=Torapi&token={0}&mode=search&search_string={1}&{2}'
     self.msearch = 'https://torrentapi.org/pubapi_v2.php?app_id=Torapi&token={0}&mode=search&search_imdb={1}&{2}'
     self.token = 'https://torrentapi.org/pubapi_v2.php?app_id=Torapi&get_token=get_token'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 24
0
    def _get_items(self, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urlparse.urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    size = '%.2f GB' % size

                except BaseException:
                    size = '0'

                self.items.append((name, link, size))
            return self.items
        except BaseException:
            return self.items
Exemplo n.º 25
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['torrentgalaxy.to']
     self.base_link = 'https://torrentgalaxy.to/'
     self.search_link = 'torrents.php?search=%s'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 26
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['max-rls.com']
     self.base_link = 'http://max-rls.com'
     self.search_link = '/?s=%s&submit=Find'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 27
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['www.link4file.com']
     self.base_link = 'http://www.link4file.com'
     self.search_link = '/download-search.php?q=%s&log=1&x=59&y=22'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 28
0
    def get_session(self):
        session = requests.Session()
        session.headers = {
            'X-Requested-With': 'XMLHttpRequest',
            'Referer': self.base,
            'User-agent': client.agent()
        }
        cookies = self.load_cookies()
        if not cookies:
            log('Getting new cookies...')
            self.login(session)
            with open(cookieFile, 'wb') as f:
                pickle.dump(session.cookies, f)

            cookies = session.cookies

        session.cookies = cookies
        test = session.get(self.base).text
        if control.setting('vaughn_user') in test:
            log('Logged in !')
            return session
        else:
            log('Getting new cookies...')
            self.login(session)
            with open(cookieFile, 'wb') as f:
                pickle.dump(session.cookies, f)
            return session
Exemplo n.º 29
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']  # Removed  seriesonline.io  series9.co  series9.io
     self.domains = ['series9.to']
     self.base_link = 'https://www7.series9.to'
     self.search_link = '/movie/search/%s'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 30
0
 def play(self, url):
     try:
         link = client.request(url, headers=self.headers)
         link = [
             lnk for lnk in re.compile("<iframe.+iframe>").findall(link)
             [0].split("'") if lnk.find("tvguide") > 0
         ][0]
         link = client.request(link, headers=self.headers)
         code = link[link.find("encrypted"):]
         code = code[:code.find("</script>")]
         file_code = re.findall(r"file.+", code)[0]
         file_code = "var link = " + file_code[file_code.find(":") +
                                               1:file_code.find(",")]
         code = code[:code.find("var player")]
         code = code + file_code
         crypto_min = self.base_link + "/Crypto/crypto.min.js"
         addional_code = client.request(crypto_min, headers=self.headers)
         code = addional_code + code
         context = js2py.EvalJs(enable_require=True)
         link = context.eval(code)
         link = '%s|User-Agent=%s&Referer=%s' % (link, client.agent(),
                                                 self.base_link)
         control.execute('PlayMedia(%s)' % link)
     except Exception as e:
         xbmc.log(str(e), level=xbmc.LOGNOTICE)
         return
Exemplo n.º 31
0
 def __init__(self):
     self.list = []
     self.base_link = 'http://myustv.com'
     self.headers = {
         'User-Agent': client.agent(),
         'Referer': self.base_link
     }
Exemplo n.º 32
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['123123movies.net']
     self.base_link = 'http://www6.123123movies.net'
     self.search_link = '/watch/%s-%s-123movies.html'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 33
0
def resolve(url):
    try:
        headers = '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': url})

        result = client.request(url, close=False)
        result = result.replace('\n','')

        url = re.compile('function\s*load_download.+?src\s*:\s*"(.+?)"').findall(result)[0]
        url = urlparse.urljoin('http://veehd.com', url)

        result = client.request(url, close=False)

        i = client.parseDOM(result, 'iframe', ret='src')
        if len(i) > 0:
            i = urlparse.urljoin('http://veehd.com', i[0])
            client.request(i, close=False)
            result = client.request(url)

        url = re.compile('href *= *"([^"]+(?:mkv|mp4|avi))"').findall(result)
        url += re.compile('src *= *"([^"]+(?:divx|avi))"').findall(result)
        url += re.compile('"url" *: *"(.+?)"').findall(result)
        url = urllib.unquote(url[0])
        url += headers

        return url
    except:
        return
Exemplo n.º 34
0
 def __init__(self):
     self.priority = 0
     self.language = ['en']
     self.domains = ['https://solidtorrents.net']
     self.base_link = 'https://solidtorrents.net'
     self.search_link = '/api/v1/search?q=%s&category=all&sort=size'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 35
0
 def sources(self, url, hostDict, hostprDict):
     try:
         self._sources = []
         if url == None:
             return self._sources
         if debrid.status() is False:
             raise Exception()
         if debrid.tor_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         self.title = data[
             'tvshowtitle'] if 'tvshowtitle' in data else data['title']
         self.hdlr = 'S%02dE%02d' % (
             int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         if 'tvshowtitle' in data:
             url = self.search.format('8', urllib.quote(query))
         else:
             url = self.search.format('4', urllib.quote(query))
         self.hostDict = hostDict + hostprDict
         headers = {'User-Agent': client.agent()}
         _html = client.request(url, headers=headers)
         threads = []
         for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
             threads.append(workers.Thread(self._get_items, i))
         [i.start() for i in threads]
         [i.join() for i in threads]
         return self._sources
     except:
         return self._sources
Exemplo n.º 36
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)

            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = dom_parser2.parse_dom(r, 'li', {'class': re.compile('.+?'), 'id': re.compile('comment-.+?')})
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts: threads.append(workers.Thread(self._get_sources, i.content))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
Exemplo n.º 37
0
 def play(self, url):
     try:
         if 'm3u8' in url:
             link = '%s|User-Agent=%s&Referer=%s' % (url, client.agent(),
                                                     url)
             control.execute('PlayMedia(%s)' % link)
         else:
             stream = cfscrape.get(url, headers=self.headers).content
             streams = re.findall('var whistler = "(.+?)"', stream)
             for item in streams:
                 link = self.base_link + item
                 link = '%s|User-Agent=%s&Referer=%s' % (
                     link, client.agent(), self.base_link)
                 control.execute('PlayMedia(%s)' % link)
     except:
         return
Exemplo n.º 38
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['thekat.app', 'thekat.nl', 'kickass2.cc']
     self.base_link = 'https://thekat.app/'
     self.search = 'https://thekat.app/usearch/{0}'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 39
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'tr', attrs={'id': 'torrent_latest_torrents'})
         for post in posts:
             data = client.parseDOM(post, 'a', attrs={'title': 'Torrent magnet link'}, ret='href')[0]
             link = urllib.unquote(data).decode('utf8').replace('https://mylink.me.uk/?url=', '')
             name = urllib.unquote_plus(re.search('dn=([^&]+)', link).groups()[0])
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
             try:
                 y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                 div = 1 if size.endswith('GB') else 1024
                 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             self.items.append((name, link, size))
         return self.items
     except BaseException:
         return self.items
Exemplo n.º 40
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['cmovies.video', 'cmovieshd.bz']
     self.base_link = 'https://cmovies.tv'
     self.search_link = '/film/%s/watching.html?ep=0'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 41
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['fmoviesto.to']
     self.base_link = 'https://www4.fmovies2.io'
     self.search_link = '/search.html?keyword=%s'
     self.headers = {'User-Agent': client.agent()}
Exemplo n.º 42
0
	def __prepare_channels(self,channels):
		new=[]
		for channel in channels:
			url = channel[0]
			img = channel[1] + '|%s' % urllib.urlencode({'Referer':self.url,'User-agent':client.agent()})
			title = channel[2].encode('utf-8')
			
			new.append((url,title,img))

		return new
Exemplo n.º 43
0
def resolve(url):
	try:
		html = client.request(url)
		url2 = client.parseDOM(html,'iframe',ret='src')[0]
		html = client.request(url2)
		video = re.findall('file:[\"\'](.+?)[\"\']',html)[0]
		video+='|%s' %urllib.urlencode({'User-agent':client.agent(),'Referer':url2})
		return video
	except:
		return []
Exemplo n.º 44
0
def parse_biggestplayer(params):
    try:
        url = params["url"]
        ref = url
        referer = params["referer"]
                
        html = client.request(url,referer=referer)
        url = re.search('file: "(.+?)"', html).group(1)
        url += '|%s' %urllib.urlencode({'User-agent':client.agent(),'Referer':ref,'X-Requested-With':constants.get_shockwave()})
        return url
    except:
        return ""
Exemplo n.º 45
0
def resolve(url):
	#try:
		out=[]
		referer = url
		url = url.replace('embed','videos').replace('sos','videos').replace('/video.mp4','')
		html = client.request(url)
		urls = re.findall('src=[\"\']([^\"\']+)[\"\'] type=[\"\']video/mp4[\"\'] label=[\"\']([^\"\']+)',html)
		for url in urls:
			host  = urlparse.urlparse(url[0]).netloc
			ur = url[0] + '|%s' %urllib.urlencode({'Referer':referer,'User-agent':client.agent(),'Host':host}).replace('%3D','=')
			q = url[1]
			out.append((ur,q))
		return out
Exemplo n.º 46
0
def resolve(url):
    try:
        page = re.compile('//(.+?)/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
        page = 'http://%s/embed/%s' % (page[0], page[1])

        try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except: referer = page

        result = client.request(page, referer=referer)
        
        unpacked = ''
        packed = result.split('\n')
        
        for i in packed: 
            try: unpacked += jsunpack.unpack(i)
            except: pass
        result += unpacked
        result = urllib.unquote_plus(result)
        
        result = re.sub('\s\s+', ' ', result)
        url = client.parseDOM(result, 'iframe', ret='src')[-1]
        url = url.replace(' ', '').split("'")[0]
        ch = re.compile('ch=""(.+?)""').findall(str(result))
        ch = ch[0].replace(' ','')
        sw = re.compile(" sw='(.+?)'").findall(str(result))
        url = url+'/'+ch+'/'+sw[0]
       
        result = client.request(url, referer=referer)
        file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
        print file
        try:
            if not file.startswith('http'): raise Exception()
            url = client.request(file, output='geturl')
            print url
            if not '.m3u8' in url: raise Exception()
            url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': file})
            return url
            
        except:
            pass

        strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
        swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
        
        url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
        return url
    except:
        return
Exemplo n.º 47
0
def getToken():
	username = control.setting('hrti_user')
	password = control.setting('hrti_pass')
	if username=='' or password=='':
		control.infoDialog('Unesite korisničko ime i lozinku za hrti.hr!')
		return 'x','x'
	session=requests.Session()
	headers={}
	cookies=session.cookies
	headers['cookies']=cookies
	headers['User-agent']=client.agent()
	uuid_url = 'https://hrti.hrt.hr/client_api.php/config/identify/format/json'
	resp_data = session.post(uuid_url, data = '{"application_publication_id":"all_in_one"}' , headers=headers).content
	data=json.loads(resp_data)
	uuid = data['uuid']
	put_data = '{"application_publication_id":"all_in_one","uuid":"%s","screen_height":1080,"screen_width":1920,"os":"Windows","os_version":"NT 4.0","device_model_string_id":"chrome 42.0.2311.135","application_version":"1.1"}'%uuid
	resp_data = session.put(uuid_url, data = put_data , headers=headers).text
	data=json.loads(resp_data)
	session_id = data['session_id']

	login_data = '{"username":"******","password": "******"}'%(username, password)
	login_url = 'https://hrti.hrt.hr/client_api.php/user/login/session_id/%s/format/json'%session_id
	resp = session.post(login_url, data = login_data, headers = headers)
	data = json.loads(resp.text)
	try:
		session_token = data['session_token']
	except:
		control.infoDialog('Provjerite korisničko ime i lozinku za hrti.hr!')
		return 'x','x'
	stream_token = data['secure_streaming_token']
	user_pin = data['pin_code']
	user_id = data['id']
	user_bitrate = int(data['bitrate'])
	subscriber_id = data['subscriber_id']
	external_id = data['external_id']
	first_name = data['first_name']
	last_name = data['last_name']

	str_token = stream_token.split('/')[0]
	expire = stream_token.split('/')[-1]
	return str_token,expire
Exemplo n.º 48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None:
                return self._sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', urllib.quote(query))
            else:
                url = self.search.format('4', urllib.quote(query))

            self.hostDict = hostDict + hostprDict
            headers = {'User-Agent': client.agent()}
            _html = client.request(url, headers=headers)
            threads = []
            for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except BaseException:
            return self._sources
Exemplo n.º 49
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r)

            f = client.parseDOM(result, 'div', attrs = {'class': 'movieplay'})
            f = [re.findall('(?:\"|\')(http.+?miradetodo\..+?)(?:\"|\')', i) for i in f]
            f = [i[0] for i in f if len(i) > 0]

            links = []
            dupes = []

            for u in f:

                try:
                    id = urlparse.parse_qs(urlparse.urlparse(u).query)['id'][0]

                    if id in dupes: raise Exception()
                    dupes.append(id)

                    try:
                        if 'acd.php' in u: raise Exception()

                        headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u}

                        post = urllib.urlencode({'link': id})

                        url = urlparse.urljoin(self.base_link, '/stream/plugins/gkpluginsphp.php')
                        url = client.request(url, post=post, headers=headers)
                        url = json.loads(url)['link']

                        if type(url) is list:
                            url = [{'url': i['link'], 'quality': '1080p'} for i in url if '1080' in i['label']] + [{'url': i['link'], 'quality': 'HD'} for i in url if '720' in i['label']]
                        else:
                            url = [{'url': url, 'quality': 'HD'}]

                        for i in url:
                            try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i['url'])[0]['quality'], 'url': i['url']})
                            except: pass

                        continue
                    except:
                        pass

                    try:
                        result = client.request(u, headers={'Referer': r})

                        url = re.findall('AmazonPlayer.*?file\s*:\s*"([^"]+)', result, re.DOTALL)[0]

                        class NoRedirection(urllib2.HTTPErrorProcessor):
                            def http_response(self, request, response): return response

                        o = urllib2.build_opener(NoRedirection)
                        o.addheaders = [('User-Agent', client.agent())]
                        r = o.open(url) ; url = r.headers['Location'] ; r.close()

                        if 'miradetodo.' in url: raise Exception()

                        links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
                    except:
                        pass
                except:
                    pass


            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MiraDeTodo', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Exemplo n.º 50
0
	def links(self,url, img=' '):
		if self.base not in url:
			url = self.base + url
		ref = url
		out = []
		html = client.request(url)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)

		dailys = re.findall('src=[\"\'](//(?:www.)?dailymotion.com/embed/video/[^\"\']+)[\"\']',html)
		vks = re.findall('src=[\"\'](//(?:www.)?vk.com/video_ext.php[^\"\']+)[\"\']',html)
		gvid720 = re.findall('src=[\"\'](https?://.+?google.+?/[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']720p[\"\']',html)
		gvid360 = re.findall('src=[\"\'](https?://.+?google.+?[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']360p[\"\']',html)
		mailru = re.findall('(https?://(?:www.)?videoapi.my.mail.ru/videos/[^\"\']+)[\"\']',html)
		opnld = re.findall('(https?://(?:www.)?openload.co/[^\"\']+)[\"\']',html)
		uptstrm = re.findall('(https?://(?:www(?:[\d+])?.)?uptostream.com[^\"\']+)[\"\']',html)
		veevr = re.findall('(https?://(?:www.)?veevr.com[^\"\']+)[\"\']',html)
		plywr = re.findall('(//config.playwire.com/[^\"\']+)[\"\']',html)
		speedvideo = re.findall('(https?://(?:www.)?speedvideo.net/[^\"\']+)[\"\']',html)
		videowood = re.findall('(https?://(?:www.)?videowood.tv/video/[^\"\']+)[\"\']',html)
		wstream = re.findall('(https?://(?:www.)?wstream.video/[^\"\']+)[\"\']',html)
		urls = []

		i = 0
		for v in plywr:
			i+=1
			title = 'Playwire video %s'%i
			url = v 
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in veevr:
			i+=1
			
			url = v
			from resources.lib.resolvers import veevr
			urlx = veevr.resolve(url)
			log(urlx)
			for url in urlx:
				if url[0] not in urls:
					title = 'Veevr video %s'%url[1].replace('<sup>HD</sup>','')
					out.append((title,url[0],icon_path(info().icon)))
					urls.append(url[0])

		i = 0
		for v in uptstrm:
			from resources.lib.resolvers import uptostream
			urlx =  uptostream.resolve(v)
			log(urlx)
			i+=1
			for u in urlx:
				q = u[1]
				title = 'Uptostream video n.%s %s'%(i,q)
				url = u[0] 
				if url not in urls:
					out.append((title,url,icon_path(info().icon)))
					urls.append(url)

		i = 0
		for v in dailys:
			i+=1
			title = 'Dailymotion video %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in vks:
			i+=1
			title = 'VK.com video %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in gvid720:
			i+=1
			title = 'GVIDEO link %s 720p'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in gvid360:
			i+=1
			title = 'GVIDEO link %s 360p'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in opnld:
			i+=1
			title = 'Openload link %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in speedvideo:
			i+=1
			title = 'Speedvideo link %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)
		i = 0
		for v in videowood:
			i+=1
			title = 'Videowood link %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)
		i = 0
		for v in wstream:
			i+=1
			title = 'Wstream link %s'%i
			url = v + '?referer=' + ref
			if url not in urls:

				out.append((title,url,icon_path(info().icon)))
				urls.append(url)


		i = 0
		for v in mailru:
			link = v
			i+=1
			title = 'Mail.ru video %s'%i
			link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
			link = link.replace('html','json')
			cookieJar = cookielib.CookieJar()
			opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
			conn = urllib2.Request(link)
			connection = opener.open(conn)
			f = connection.read()
			connection.close()
			js = json.loads(f)
			for cookie in cookieJar:
				token = cookie.value
			js = js['videos']
			for x in js:
				url = x['url'] + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref} ))
				title = 'Mail.ru video ' + x['key']
				if url not in urls:
					out.append((title,url,icon_path(info().icon)))
					urls.append(url)
		return out
Exemplo n.º 51
0
	def resolve(self,url):
		try:
			url = self.base + url
			html = client.request(url)
			url = 'http:/' + re.findall('src=[\"\']([^\"\']+)[\"\'].+?mpeg',html)[0]
			url += '|%s' % urllib.urlencode({'X-Requested-With':constants.get_shockwave(),'User-agent':client.agent()})
			return url
		except:
			return url
Exemplo n.º 52
0
def google(url):
    try:
        if any(x in url for x in ['youtube.', 'docid=']): url = 'https://drive.google.com/file/d/%s/view' % re.compile('docid=([\w-]+)').findall(url)[0]

        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

        headers = {'User-Agent': client.agent()}

        result = client.request(url, output='extended', headers=headers)

        try:
            headers['Cookie'] = result[2]['Set-Cookie']
        except:
            pass

        result = result[0]

        if netloc == 'docs' or netloc == 'drive':
            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i, append_height=True) for i in result], [])


        elif netloc == 'photos':
            result = result.replace('\r', '').replace('\n', '').replace('\t', '')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result], [])


        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1:
                result = [i for i in result if str(id) in i['link'][0]['href']][0]
            elif len(result) == 1:
                result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i, append_height=True) for i in result], [])


        elif netloc == 'plus':
            id = (urlparse.urlparse(url).path).split('/')[-1]

            result = result.replace('\r', '').replace('\n', '').replace('\t', '')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result], [])

        result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)

        url = []
        for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
            try:
                url += [[i for i in result if i.get('quality') == q][0]]
            except:
                pass

        for i in url:
            i.pop('height', None)
            i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})

        if not url: return
        return url
    except:
        return
Exemplo n.º 53
0
	def resolve(self,url):
		try:
			referer,id = url.split('##')
			s = requests.Session()
			s.headers = {'Accept':'application/json, text/javascript, */*; q=0.01','Host':'www.streamgaroo.com','Referer':referer,'X-Requested-With' : 'XMLHttpRequest'}
			html = s.post('http://www.streamgaroo.com/calls/get/source',data={'h':urllib.unquote(id)}).text
			s.headers = ({'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8','Host':'www.streamgaroo.com','Referer':referer, 'Accept-Encoding':'gzip, deflate, lzma, sdch'})
			link = json.loads(html)['link']
			html = s.get(link).text
			
			#hls
			try:
				url = re.findall('playStream\(.+?,.((?:http|rtmp)[^\"\']+)',html)[0]
				if 'rtmp' in url:
					return url 
				else:
					return url + '|%s' %urllib.urlencode({'X-Requested-With':constants.get_shockwave(),'Referer':link,'User-agent':client.agent()})
			except:	pass

			#everything else
			import liveresolver
			return liveresolver.resolve(link,html=html)
		except:
			control.infoDialog('No stream available!')
			return ''
Exemplo n.º 54
0
def login(cookies,post_data):
    log('Making new login token.')
    cj = client.request('http://www.streamlive.to/login.php', post=post_data, headers = {'referer':'http://www.streamlive.to/login', 'Content-type':'application/x-www-form-urlencoded', 'Origin': 'http://www.streamlive.to', 'Host':'www.streamlive.to', 'User-agent':client.agent()},cj=cookies,output='cj')
    return cj
Exemplo n.º 55
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url == None: return sources

			if not str(url).startswith('http'):

				data = urlparse.parse_qs(url)
				data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

				title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

				if 'tvshowtitle' in data:
					episode = '%01d' % int(data['episode'])

					u1 = '%s/watch-%s-s%02d-%s-online-free-putlocker.html' % (self.base_link, cleantitle.geturl(title), int(data['season']), str((int(data['year']) + int(data['season'])) - 1))
					u2 = '%s/watch-%s-s%02d-%s-online-free-putlocker.html' % (self.base_link, cleantitle.geturl(title), int(data['season']), data['year'])

					r = client.request(u1, output='geturl')
					if 'error.html' in r: r = client.request(u2, output='geturl')
					if 'error.html' in r: raise Exception()
					url = r
				else:
					episode = None

					u1 = '%s/watch-%s-%s-online-free-putlocker.html' % (self.base_link, cleantitle.geturl(title), data['year'])

					r = client.request(u1, output='geturl')
					if 'error.html' in r: raise Exception()
					url = r

			else:
				try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
				except: episode = None
				try: episode = '%01d' % int(data['episode'])
				except: pass

			r = client.request(url)

			h = {'User-Agent': client.agent(), 'X-Requested-With': 'XMLHttpRequest'}

			ip = client.parseDOM(r, 'input', ret='value', attrs = {'name': 'phimid'})[0]
			ep = episode if not episode == None else '1'

			p = {'ip_film': ip, 'ip_name': ep, 'ipplugins': '1', 'ip_server': '11'}
			p = urllib.urlencode(p)

			u = '/ip.file/swf/plugins/ipplugins.php'
			u = urlparse.urljoin(self.base_link, u)

			r = client.request(u, post=p, headers=h, referer=url)
			r = json.loads(r)

			u = '/ip.file/swf/ipplayer/ipplayer.php'
			u = urlparse.urljoin(self.base_link, u)

			p = {'u': r['s'], 's': r['v'], 'w': '100%', 'h': '360', 'n':'0'}
			p = urllib.urlencode(p)

			r = client.request(u, post=p, headers=h, referer=url)
			r = json.loads(r)['data']

			u = [i['files'] for i in r if 'files' in i]

			for i in u:
				try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
				except: pass

			return sources
		except:
			return sources
Exemplo n.º 56
0
	def resolve(self,url):
		ref=url
		html = client.request(url)
		soup = webutils.bs(html)
		try:
			url = soup.find('iframe',{'width':'600'})['src']
		except:
			try:
				url = 'http:' + re.findall('(\/\/config\.playwire\.com\/[^\'\"]+)',html)[0]
			except:
				try:
					url = soup.find('iframe',{'width':'626'})['src']
				except:
					return

		if 'nba' in url:
			url = url.split("playlist=")[-1]
			url = 'http://video.nba.com/videocenter/servlets/playlist?ids=%s&format=json' % url
			result = client.request(url)
			url = re.compile('"publishPoint":"(.+?)"').findall(result)[0]
			return url
		elif 'rutube' in url:
			url = re.findall('embed/(\d+)',url)[0]
			url = 'http://rutube.ru/api/play/options/'+url+'?format=json'
			result = client.request(url)
			jsx = json.loads(result)
			link = jsx['video_balancer']['m3u8']
			return link
		elif 'youtube' in url:
			import liveresolver
			return liveresolver.resolve(url)
		elif 'playwire' in url:
			try:
				result = client.request(url)
				html = result
				result = json.loads(result)
				try:
					f4m=result['content']['media']['f4m']
				except:
					reg=re.compile('"src":"http://(.+?).f4m"')
					f4m=re.findall(reg,html)[0]
					f4m='http://'+pom+'.f4m'

				result = client.request(f4m)
				soup = webutils.bs(result)
				try:
					base=soup.find('baseURL').getText()+'/'
				except:
					base=soup.find('baseurl').getText()+'/'

				linklist = soup.findAll('media')
				choices,links=[],[]
				for link in linklist:
					url = base + link['url']
					bitrate = link['bitrate']
					choices.append(bitrate)
					links.append(url)
					if len(links)==1:
						return links[0]
					if len(links)>1:
						import xbmcgui
						dialog = xbmcgui.Dialog()
						index = dialog.select('Select bitrate', choices)
					if index>-1:
						return links[index]
				return
			except:
				return

		elif 'mail.ru' in url:
			link=url

			link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
			link = link.replace('http://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
			link = link.replace('html','json')
			s = requests.Session()
			f = s.get(link).text

			js = json.loads(f)
			token = s.cookies.get_dict()['video_key']
			url = js['videos'][-1]['url'] + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref} ))
			return url
		else:
			import urlresolver
			url = urlresolver.resolve(url)
			return url
Exemplo n.º 57
0
	def get_session(self):
		session = requests.Session()
		session.headers = {'X-Requested-With':'XMLHttpRequest', 'Referer':self.base, 'User-agent':client.agent()}
		cookies = self.load_cookies()
		if not cookies:
			log('Getting new cookies...')
			self.login(session)
			with open(cookieFile, 'wb') as f:
				pickle.dump(session.cookies, f)

			cookies = session.cookies

		session.cookies = cookies
		test = session.get(self.base).text
		if control.setting('vaughn_user') in test:
			log('Logged in !')
			return session
		else:
			log('Getting new cookies...')
			self.login(session)
			with open(cookieFile, 'wb') as f:
				pickle.dump(session.cookies, f)
			return session
Exemplo n.º 58
0
def google(url):
    try:
        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]



        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

            result = client.request(url, headers={'User-Agent': client.agent()})

            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]

            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i) for i in result], [])



        elif netloc == 'photos':
            result = client.request(url, headers={'User-Agent': client.agent()})

            result = result.replace('\r','').replace('\n','').replace('\t','')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d','=').replace('\\u0026','&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]



        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = client.request(url, headers={'User-Agent': client.agent()})

            result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0]
            elif len(result) == 1: result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i) for i in result], [])



        elif netloc == 'plus':
            result = client.request(url, headers={'User-Agent': client.agent()})

            id = (urlparse.urlparse(url).path).split('/')[-1]
            result = result.replace('\r','').replace('\n','').replace('\t','')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d','=').replace('\\u0026','&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = [googletag(i)[0] for i in result]



        url = []
        try: url += [[i for i in result if i['quality'] == '1080p'][0]]
        except: pass
        try: url += [[i for i in result if i['quality'] == 'HD'][0]]
        except: pass
        try: url += [[i for i in result if i['quality'] == 'SD'][0]]
        except: pass

        if url == []: return
        return url
    except:
        return
Exemplo n.º 59
0
def resolve(url):
    
    try:
        page = url

        user, password = get_account()
        

        try: 
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
            url = url.replace(referer,'').replace('?referer=','').replace('&referer=','')
        except:
            referer = url


        post_data = 'username=%s&password=%s&accessed_by=web&submit=Login'%(user,password)

        cj = get_cj()
        result = client.request(url,cj=cj,headers={'referer':'http://www.streamlive.to', 'Content-type':'application/x-www-form-urlencoded', 'Origin': 'http://www.streamlive.to', 'Host':'www.streamlive.to', 'User-agent':client.agent()})
        if 'this channel is a premium channel.' in result.lower():
          control.infoDialog('Premium channel. Upgrade your account to watch it!', heading='Streamlive.to')
          return 

        if 'not logged in yet' in result.lower():
            #Cookie expired or not valid, request new cookie
            cj = login(cj,post_data)
            cj.save (cookieFile,ignore_discard=True)
            result = client.request(url,cj=cj)

        token_url = re.compile('getJSON\("(.+?)"').findall(result)[0]
        r2 = client.request(token_url,referer=referer)
        token = json.loads(r2)["token"]

        file = re.compile('(?:[\"\'])?file(?:[\"\'])?\s*:\s*(?:\'|\")(.+?)(?:\'|\")').findall(result)[0].replace('.flv','')
        rtmp = re.compile('streamer\s*:\s*(?:\'|\")(.+?)(?:\'|\")').findall(result)[0].replace(r'\\','\\').replace(r'\/','/')
        app = re.compile('.*.*rtmp://[\.\w:]*/([^\s]+)').findall(rtmp)[0]
        url=rtmp + ' app=' + app + ' playpath=' + file + ' swfUrl=http://www.streamlive.to/ads/streamlive.swf flashver=' + constants.flash_ver() + ' live=1 timeout=15 token=' + token + ' swfVfy=1 pageUrl='+page

        
        return url
    except:
        return