예제 #1
0
def yandex(url):
    try:
        cookie = client.request(url, output='cookie')

        r = client.request(url, cookie=cookie)
        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

        sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]

        idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]

        idclient = binascii.b2a_hex(os.urandom(16))

        post = {
            'idClient': idclient,
            'version': '3.9.2',
            'sk': sk,
            '_model.0': 'do-get-resource-url',
            'id.0': idstring
        }
        post = urllib.urlencode(post)

        r = client.request('https://yadi.sk/models/?_m=do-get-resource-url',
                           post=post,
                           cookie=cookie)
        r = json.loads(r)

        url = r['models'][0]['data']['file']

        return url
    except:
        return
예제 #2
0
    def sources(self, url, hostDict, hostprDict):
        try:
			sources = []
			for movielink in self.elysium_url:
				referer = movielink
				link = client.request(movielink)
				
				r = client.parseDOM(link, 'iframe', ret='src', attrs = {'class': 'movieframe'})
				for item in r:
					try:
						iframe = item.encode('utf-8')
						# print('MOVIEZONE IFRAMES',iframe)
						redirect = client.request(iframe, timeout='10')
						frame2 = client.parseDOM(redirect, 'iframe', ret='src')[0]
						frame2 = frame2.encode('utf-8')
						# print('MOVIEZONE IFRAMES2',frame2)
						finalurl = client.request(frame2, timeout='5')
						gv_frame = client.parseDOM(finalurl, 'source', ret='src')
						for items in gv_frame:
							url = items.encode('utf-8')
							url = client.replaceHTMLCodes(url)
							# print ('MOVIEZONE players', url)
							quality = directstream.googletag(url)[0]['quality']
							# print ('MOVIEZONE', quality, url)


							sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Moviezone', 'url': url, 'direct': True, 'debridonly': False})
					except:
						pass
			return sources
        except:
            return sources
예제 #3
0
def get_size(url):
    try:
        size = client.request(url, output='file_size')
        if size == '0': size = False
        size = convert_size(size)
        return size
    except: return False
예제 #4
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode({'keyword': title})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         url = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
         return url
     except:
         return
예제 #5
0
def get_scene_episode_number(tvdbid, season, episode):

    try:
        url = URL_PATTERN % (tvdbid, season, episode)
        r = client.request(url)
        r = json.loads(r)
        if r['result'] == 'success':
            data = r['data']['scene']
            return data['season'], data['episode']            
    except:
        pass

    return season, episode    
예제 #6
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode({'keyword': ('%s - Season %s' % (title, season))})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except:
         return
예제 #7
0
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/tv-show/%s/season/%01d/episode/%01d' % (
                 self.base_link, cleantitle.geturl(
                     alias['title']), int(season), int(episode))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if not url == None: break
         return url
     except:
         return
예제 #8
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            for alias in aliases:
                url = '%s/movie/%s' % (self.base_link,
                                       cleantitle.geturl(alias['title']))
                url = client.request(url,
                                     headers=headers,
                                     output='geturl',
                                     timeout='10')
                if not url == None: break
            if url == None:
                for alias in aliases:
                    url = '%s/movie/%s-%s' % (self.base_link,
                                              cleantitle.geturl(
                                                  alias['title']), year)
                    url = client.request(url,
                                         headers=headers,
                                         output='geturl',
                                         timeout='10')
                    if not url == None: break

            return url
        except:
            return
예제 #9
0
def vk(url):
    try:
        query = urlparse.parse_qs(urlparse.urlparse(url).query)

        try:
            oid, video_id = query['oid'][0], query['id'][0]
        except:
            oid, video_id = re.findall('\/video(.*)_(.*)', url)[0]

        sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (
            oid, video_id)
        html = client.request(sources_url)
        html = re.sub(r'[^\x00-\x7F]+', ' ', html)

        sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html)

        if not sources:
            sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html)

        sources = [(i[0], i[1].replace('\\', '')) for i in sources]
        sources = dict(sources)

        url = []
        try:
            url += [{'quality': 'HD', 'url': sources['720']}]
        except:
            pass
        try:
            url += [{'quality': 'SD', 'url': sources['540']}]
        except:
            pass
        try:
            url += [{'quality': 'SD', 'url': sources['480']}]
        except:
            pass
        if not url == []: return url
        try:
            url += [{'quality': 'SD', 'url': sources['360']}]
        except:
            pass
        if not url == []: return url
        try:
            url += [{'quality': 'SD', 'url': sources['240']}]
        except:
            pass
        if not url == []: return url
    except:
        return
예제 #10
0
def googlepass(url):
    try:
        try:
            headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
        except:
            headers = None
        url = url.split('|')[0].replace('\\', '')
        url = client.request(url, headers=headers, output='geturl')
        if 'requiressl=yes' in url:
            url = url.replace('http://', 'https://')
        else:
            url = url.replace('https://', 'http://')
        if headers: url += '|%s' % urllib.urlencode(headers)
        return url
    except:
        return
예제 #11
0
def cldmailru(url):
    try:
        v = url.split('public')[-1]

        r = client.request(url)
        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

        tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0]

        url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0]

        url = '%s%s?key=%s' % (url, v, tok)

        return url
    except:
        return
예제 #12
0
    def resolve(self, url):
        try:
            if self.embed_link in url:
                result = client.request(url, XHR=True)
                url = json.loads(result)['embed_url']
                return url

            try:
                if not url.startswith('http'):
                    url = 'http:' + url

                for i in range(3):
                    u = directstream.googlepass(url)
                    if not u == None: break

                return u
            except:
                return
        except:
            return
예제 #13
0
def odnoklassniki(url):
    try:
        media_id = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]

        result = client.request('http://ok.ru/dk',
                                post={
                                    'cmd': 'videoPlayerMetadata',
                                    'mid': media_id
                                })
        result = re.sub(r'[^\x00-\x7F]+', ' ', result)
        result = json.loads(result).get('videos', [])

        hd = []
        for name, quali in {
                'ultra': '4K',
                'quad': '1440p',
                'full': '1080p',
                'hd': 'HD'
        }.items():
            hd += [{
                'quality': quali,
                'url': i.get('url')
            } for i in result if i.get('name').lower() == name]

        sd = []
        for name, quali in {
                'sd': 'SD',
                'low': 'SD',
                'lowest': 'SD',
                'mobile': 'SD'
        }.items():
            sd += [{
                'quality': quali,
                'url': i.get('url')
            } for i in result if i.get('name').lower() == name]

        url = hd + sd[:1]
        if not url == []: return url
    except:
        return
예제 #14
0
def rdAuthorize():
    try:
        CLIENT_ID = 'X245A4XAIBGVM'
        USER_AGENT = 'Kodi Stream All The Sources/3.0'

        if not '' in credentials()['realdebrid'].values():
            if control.yesnoDialog(
                    control.lang(32531).encode('utf-8'),
                    control.lang(32532).encode('utf-8'), '', 'RealDebrid'):
                control.setSetting(id='realdebrid.id', value='')
                control.setSetting(id='realdebrid.secret', value='')
                control.setSetting(id='realdebrid.token', value='')
                control.setSetting(id='realdebrid.refresh', value='')
                control.setSetting(id='realdebrid.auth', value='')
            raise Exception()

        headers = {'User-Agent': USER_AGENT}
        url = 'https://api.real-debrid.com/oauth/v2/device/code?client_id=%s&new_credentials=yes' % (
            CLIENT_ID)
        result = client.request(url, headers=headers)
        result = json.loads(result)
        verification_url = (control.lang(32533) %
                            result['verification_url']).encode('utf-8')
        user_code = (control.lang(32534) % result['user_code']).encode('utf-8')
        device_code = result['device_code']
        interval = result['interval']

        progressDialog = control.progressDialog
        progressDialog.create('RealDebrid', verification_url, user_code)

        for i in range(0, 3600):
            try:
                if progressDialog.iscanceled(): break
                time.sleep(1)
                if not float(i) % interval == 0: raise Exception()
                url = 'https://api.real-debrid.com/oauth/v2/device/credentials?client_id=%s&code=%s' % (
                    CLIENT_ID, device_code)
                result = client.request(url, headers=headers, error=True)
                result = json.loads(result)
                if 'client_secret' in result: break
            except:
                pass

        try:
            progressDialog.close()
        except:
            pass

        id, secret = result['client_id'], result['client_secret']

        url = 'https://api.real-debrid.com/oauth/v2/token'
        post = urllib.urlencode({
            'client_id':
            id,
            'client_secret':
            secret,
            'code':
            device_code,
            'grant_type':
            'http://oauth.net/grant_type/device/1.0'
        })

        result = client.request(url, post=post, headers=headers)
        result = json.loads(result)

        token, refresh = result['access_token'], result['refresh_token']

        control.setSetting(id='realdebrid.id', value=id)
        control.setSetting(id='realdebrid.secret', value=secret)
        control.setSetting(id='realdebrid.token', value=token)
        control.setSetting(id='realdebrid.refresh', value=refresh)
        control.setSetting(id='realdebrid.auth', value='*************')
        raise Exception()
    except:
        control.openSettings('3.16')
예제 #15
0
def resolver(url, debrid):
    u = url
    u = u.replace('filefactory.com/stream/', 'filefactory.com/file/')

    try:
        if not debrid == 'realdebrid' and not debrid == True: raise Exception()

        if '' in credentials()['realdebrid'].values(): raise Exception()
        id, secret, token, refresh = credentials()['realdebrid'][
            'id'], credentials()['realdebrid']['secret'], credentials(
            )['realdebrid']['token'], credentials()['realdebrid']['refresh']

        USER_AGENT = 'Kodi Stream All The Sources/3.0'

        post = urllib.urlencode({'link': u})
        headers = {
            'Authorization': 'Bearer %s' % token,
            'User-Agent': USER_AGENT
        }
        url = 'https://api.real-debrid.com/rest/1.0/unrestrict/link'

        result = client.request(url, post=post, headers=headers, error=True)
        result = json.loads(result)

        if 'error' in result and result['error'] == 'bad_token':
            result = client.request(
                'https://api.real-debrid.com/oauth/v2/token',
                post=urllib.urlencode({
                    'client_id':
                    id,
                    'client_secret':
                    secret,
                    'code':
                    refresh,
                    'grant_type':
                    'http://oauth.net/grant_type/device/1.0'
                }),
                headers={'User-Agent': USER_AGENT},
                error=True)
            result = json.loads(result)
            if 'error' in result: return

            headers['Authorization'] = 'Bearer %s' % result['access_token']
            result = client.request(url, post=post, headers=headers)
            result = json.loads(result)

        url = result['download']
        return url
    except:
        pass

    try:
        if not debrid == 'premiumize' and not debrid == True: raise Exception()

        if '' in credentials()['premiumize'].values(): raise Exception()
        user, password = credentials()['premiumize']['user'], credentials(
        )['premiumize']['pass']

        url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink&params[login]=%s&params[pass]=%s&params[link]=%s' % (
            user, password, urllib.quote_plus(u))
        result = client.request(url, close=False)
        url = json.loads(result)['result']['location']
        return url
    except:
        pass

    try:
        if not debrid == 'alldebrid' and not debrid == True: raise Exception()

        if '' in credentials()['alldebrid'].values(): raise Exception()
        user, password = credentials()['alldebrid']['user'], credentials(
        )['alldebrid']['pass']

        login_data = urllib.urlencode({
            'action': 'login',
            'login_login': user,
            'login_password': password
        })
        login_link = 'http://alldebrid.com/register/?%s' % login_data
        cookie = client.request(login_link, output='cookie', close=False)

        url = 'http://www.alldebrid.com/service.php?link=%s' % urllib.quote_plus(
            u)
        result = client.request(url, cookie=cookie, close=False)
        url = client.parseDOM(result,
                              'a',
                              ret='href',
                              attrs={'class': 'link_dl'})[0]
        url = client.replaceHTMLCodes(url)
        url = '%s|Cookie=%s' % (url, urllib.quote_plus(cookie))
        return url
    except:
        pass

    try:
        if not debrid == 'rpnet' and not debrid == True: raise Exception()

        if '' in credentials()['rpnet'].values(): raise Exception()
        user, password = credentials()['rpnet']['user'], credentials(
        )['rpnet']['pass']

        login_data = urllib.urlencode({
            'username': user,
            'password': password,
            'action': 'generate',
            'links': u
        })
        login_link = 'http://premium.rpnet.biz/client_api.php?%s' % login_data
        result = client.request(login_link, close=False)
        result = json.loads(result)
        url = result['links'][0]['generated']
        return url
    except:
        return
예제 #16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)
                r = client.parseDOM(r['html'], 'div', attrs = {'class': 'les-content'})
                ids = client.parseDOM(r, 'a', ret='data-id')
                servers = client.parseDOM(r, 'a', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+):.*?',eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == int(episode)):
                            url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
                            script = client.request(url)

                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)

                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y']))
                            r = client.request(u)
                            url = json.loads(r)['playlist'][0]['sources']
                            url = [i['file'] for i in url if 'file' in i]
                            url = [directstream.googletag(i) for i in url]
                            url = [i[0] for i in url if i]
                            for s in url:
                                sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
                                                'url': s['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
예제 #17
0
def google(url):
    try:
        if any(x in url for x in ['youtube.', 'docid=']):
            url = 'https://drive.google.com/file/d/%s/view' % re.compile(
                'docid=([\w-]+)').findall(url)[0]

        netloc = urlparse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

        headers = {'User-Agent': client.agent()}

        result = client.request(url, output='extended', headers=headers)

        try:
            headers['Cookie'] = result[2]['Set-Cookie']
        except:
            pass

        result = result[0]

        if netloc == 'docs' or netloc == 'drive':
            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'photos':
            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = re.search('feedPreload:\s*(.*}]}})},', result,
                               re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1:
                result = [
                    i for i in result if str(id) in i['link'][0]['href']
                ][0]
            elif len(result) == 1:
                result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'plus':
            id = (urlparse.urlparse(url).path).split('/')[-1]

            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)

        url = []
        for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
            try:
                url += [[i for i in result if i.get('quality') == q][0]]
            except:
                pass

        for i in url:
            i.pop('height', None)
            i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})

        if not url: return
        return url
    except:
        return
예제 #18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = client.parseDOM(c, 'p')
                    u = [client.parseDOM(i, 'a', ret='href') for i in u]
                    u = [i[0] for i in u if len(i) == 1]
                    if not u: raise Exception()

                    if 'tvshowtitle' in data:
                         u = [(re.sub('(720p|1080p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i) for i in u]
                    else:
                         u = [(t, i) for i in u]

                    items += u
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                    fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                    fmt = [i.lower() for i in fmt]

                    if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                    if any(i in ['extras'] for i in fmt): raise Exception()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'
                    else: quality = 'SD'
                    if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                    elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                    info = []

                    if '3d' in fmt: info.append('3D')

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
예제 #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']),
                                      int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            result = client.request(url, headers=headers, timeout='10')
            result = client.parseDOM(result, 'title')[0]

            if '%TITLE%' in result: raise Exception()

            r = client.request(url,
                               headers=headers,
                               output='extended',
                               timeout='10')

            if not imdb in r[0]: raise Exception()

            cookie = r[4]
            headers = r[3]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers[
                'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers[
                'Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie
            headers['Referer'] = url

            u = '/ajax/jne.php'
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'elid': elid
            }
            post = urllib.urlencode(post)

            c = client.request(u,
                               post=post,
                               headers=headers,
                               XHR=True,
                               output='cookie',
                               error=True)

            headers['Cookie'] = cookie + '; ' + c

            r = client.request(u, post=post, headers=headers, XHR=True)
            r = str(json.loads(r))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    sources.append({
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'language':
                        'en',
                        'url':
                        i,
                        'direct':
                        True,
                        'debridonly':
                        False
                    })
                except:
                    pass

            return sources
        except:
            return sources