Ejemplo n.º 1
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = re.compile('file\s*:\s*"(.+?)"').findall(result)
            links = [i for i in links if 'google' in i]

            for link in links:
                try:
                    i = googleplus.tag(link)[0]
                    sources.append({
                        'source': 'GVideo',
                        'quality': i['quality'],
                        'provider': 'Dizilab',
                        'url': i['url']
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 2
0
def PLAYLINKAFDAH(name,url):
        moviename = name

        link = open_url(url)
        base_link_1 = 'https://afdah.org'
        
        search_link = '/results?q=%s'
        info_link = '/video_info'
        base_link = random.choice([base_link_1])
        url = urlparse.urljoin(base_link, url)

        result = client.source(url)

        video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
        post = urllib.urlencode({'video_id': video_id})

        result = client.source(urlparse.urljoin(base_link, info_link), post=post)

        u = [i for i in result.split('&') if 'google' in i][0]
        u = urllib.unquote_plus(u)
        u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
        u = [googleplus.tag(i)[0] for i in u]
        u = [i for i in u if i['quality'] in ['1080p', 'HD']]
        for i in u:
		i['quality']=re.sub('HD','720p',i['quality'])
		
		 
		addLink(moviename + " - " + i['quality'],i['url'],101,icon,fanart)
Ejemplo n.º 3
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = urlparse.urlparse(url).query

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
            result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
            result = [i[0] for i in result if query == i[1] or  query == ''][0]

            url = urlparse.urljoin(self.base_link, result)

            url = client.source(url, output='geturl')
            if not 'google' in url: raise Exception()

            url = googleplus.tag(url)
            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})

            return sources
        except:
            return sources
Ejemplo n.º 4
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(
                result)[0]
            post = urllib.urlencode({'video_id': video_id})

            result = client.source(urlparse.urljoin(self.base_link,
                                                    self.info_link),
                                   post=post)

            u = [i for i in result.split('&') if 'google' in i][0]
            u = urllib.unquote_plus(u)
            u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
            u = [googleplus.tag(i)[0] for i in u]
            u = [i for i in u if i['quality'] in ['1080p', 'HD']]

            for i in u:
                sources.append({
                    'source': 'GVideo',
                    'quality': i['quality'],
                    'provider': 'Afdah',
                    'url': i['url']
                })

            return sources
        except:
            return sources
Ejemplo n.º 5
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            self.base_link = random.choice([self.base_link_1, self.base_link_2])

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
            post = {'video_id': video_id}

            result = client.source(urlparse.urljoin(self.base_link, self.info_link), post=post)

            u = [i for i in result.split('&') if 'google' in i][0]
            u = urllib.unquote_plus(u)
            u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
            u = [googleplus.tag(i)[0] for i in u]
            u = [i for i in u if i['quality'] in ['1080p', 'HD']]

            for i in u: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Afdah', 'url': i['url']})

            return sources
        except:
            return sources
Ejemplo n.º 6
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            result = client.source(url)

            url = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'fullwindowlink'})[0]
            url = client.parseDOM(url, 'a', ret='href')[0]
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = client.parseDOM(result, 'body')[0]

            post = re.compile('movie_player_file *= *"(.+?)"').findall(
                result)[0]
            post = urllib.urlencode({'url': post})

            cookie = client.source(self.cookie_link,
                                   output='cookie',
                                   close=False)

            headers = {
                'Host': 'gl.hdmoviezone.net',
                'Accept': 'text/html, */*; q=0.01',
                'Content-Type':
                'application/x-www-form-urlencoded; charset=UTF-8',
                'Origin': 'http://www.hdmoviezone.net',
                'Cookie': cookie
            }

            result = client.source(self.stream_link,
                                   post=post,
                                   headers=headers)

            result = json.loads(result)
            result = result['content']

            links = [i['url'] for i in result]

            for url in links:
                try:
                    i = googleplus.tag(url)[0]
                    sources.append({
                        'source': 'GVideo',
                        'quality': i['quality'],
                        'provider': 'Moviezone',
                        'url': i['url']
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 7
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\?S\d*E\d*$').findall(url)

            try:
                url, season, episode = re.compile(
                    '(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
            except:
                pass

            self.base_link = random.choice(
                [self.base_link_1, self.base_link_2])

            post_id = re.compile('/.+?/(.+)').findall(url)[0].rsplit('/')[0]

            player = urlparse.urljoin(self.base_link, self.player_link)

            if len(content) == 0:
                post = self.player_post_1 % post_id
            else:
                post = client.source(player,
                                     post=self.player_post_2 % post_id,
                                     headers=self.headers)
                post = client.parseDOM(post,
                                       'ul',
                                       attrs={'class': 'movie-parts'})[0]
                post = client.parseDOM(post, 'li')
                post = [(client.parseDOM(i, 'a',
                                         ret='href'), client.parseDOM(i, 'a'))
                        for i in post]
                post = [(i[0][0], i[1][0]) for i in post
                        if len(i[0]) > 0 and len(i[1]) > 0]
                post = [i[0] for i in post if '%01d' % int(episode) == i[1]][0]
                post = urlparse.parse_qs(
                    urlparse.urlparse(post).query)['part_id'][0]
                post = self.player_post_3 % (post_id, post)

            url = client.source(player, post=post, headers=self.headers)
            url = re.compile('<source\s+src="([^"]+)').findall(url)[0]
            url = client.replaceHTMLCodes(url)

            if 'google' in url: quality = googleplus.tag(url)[0]['quality']
            else: quality = 'HD'

            sources.append({
                'source': 'GVideo',
                'quality': quality,
                'provider': 'Xmovies',
                'url': url
            })

            return sources
        except:
            return sources
Ejemplo n.º 8
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            id = re.compile('(\d*)').findall(url)[0]
            query = urlparse.urljoin(self.base_link, self.content_link % (id))

            result = self.__request(query)
            result = json.loads(result)
            result = result['listvideos']

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                links = [i['film_id'] for i in result]
            else:
                ep = re.compile('.+?\s(S\d*E\d*)$').findall(url)[0]
                links = [
                    i['film_id'] for i in result
                    if ep in i['film_name'].encode('utf-8').upper()
                ]

            for l in links[:3]:
                try:
                    url = urlparse.urljoin(self.base_link,
                                           self.source_link % (l, id))

                    result = self.__request(url)
                    result = json.loads(result)

                    url = result['videos']
                    url = [
                        self.__decrypt(self.film_key,
                                       base64.b64decode(i['film_link']))
                        for i in url
                    ]

                    url = '#'.join(url)
                    url = url.split('#')
                    url = [i for i in url if 'http' in i and 'google' in i]
                    url = [googleplus.tag(i)[0] for i in url]

                    for i in url:
                        sources.append({
                            'source': 'GVideo',
                            'quality': i['quality'],
                            'provider': 'MegaBox',
                            'url': i['url']
                        })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 9
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = re.compile('(\d*)').findall(url)[0]
            query = urlparse.urljoin(self.base_link, self.content_link % query)
            query += self.__extra()

            time.sleep(1.5)
            result = client.source(query, headers=self.headers)
            result = json.loads(result)
            result = self.__decrypt(self.data_key, result['data'])
            result = json.loads(result)
            result = result['listvideos']

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                links = [i['film_id'] for i in result]
            else:
                ep = re.compile('.+?\s(S\d*E\d*)$').findall(url)[0]
                links = [i['film_id'] for i in result if ep in i['film_name'].encode('utf-8').upper()]

            for l in links[:3]:
                try:
                    url = urlparse.urljoin(self.base_link, self.source_link % l)
                    url += self.__extra()

                    time.sleep(1.5)
                    url = client.source(url, headers=self.headers)
                    url = json.loads(url)

                    url = self.__decrypt(self.data_key, url['data'])
                    url = json.loads(url)['videos']
                    url = [self.__decrypt(self.film_key, i['film_link']) for i in url]

                    url = '#'.join(url)
                    url = url.split('#')
                    url = [i for i in url if 'http' in i and 'google' in i]
                    url = [googleplus.tag(i)[0] for i in url]

                    for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'GVcenter', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 10
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None:
                return sources

            content = re.compile("(.+?)\?S\d*E\d*$").findall(url)

            try:
                url, season, episode = re.compile("(.+?)\?S(\d*)E(\d*)$").findall(url)[0]
            except:
                pass

            self.base_link = random.choice([self.base_link_1, self.base_link_2])

            post_id = re.compile("/.+?/(.+)").findall(url)[0].rsplit("/")[0]

            player = urlparse.urljoin(self.base_link, self.player_link)

            if len(content) == 0:
                post = self.player_post_1 % post_id
            else:
                post = cloudflare.source(player, post=self.player_post_2 % post_id, headers=self.headers)
                post = client.parseDOM(post, "ul", attrs={"class": "movie-parts"})[0]
                post = client.parseDOM(post, "li")
                post = [(client.parseDOM(i, "a", ret="href"), client.parseDOM(i, "a")) for i in post]
                post = [(i[0][0], i[1][0]) for i in post if len(i[0]) > 0 and len(i[1]) > 0]
                post = [i[0] for i in post if "%01d" % int(episode) == i[1]][0]
                post = urlparse.parse_qs(urlparse.urlparse(post).query)["part_id"][0]
                post = self.player_post_3 % (post_id, post)

            url = cloudflare.source(player, post=post, headers=self.headers)
            url = re.compile('<source\s+src="([^"]+)').findall(url)[0]
            url = client.replaceHTMLCodes(url)

            if "google" in url:
                quality = googleplus.tag(url)[0]["quality"]
            else:
                quality = "HD"

            sources.append({"source": "GVideo", "quality": quality, "provider": "Xmovies", "url": url})

            return sources
        except:
            return sources
Ejemplo n.º 11
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link , url)
            result = client.source(url)

            url = client.parseDOM(result, 'div', attrs = {'class': 'fullwindowlink'})[0]
            url = client.parseDOM(url, 'a', ret='href')[0]
            url = urlparse.urljoin(self.base_link , url)

            result = client.source(url)
            result = client.parseDOM(result, 'body')[0]

            post = re.compile('movie_player_file *= *"(.+?)"').findall(result)[0]
            post = urllib.urlencode({'url': post})

            url = client.parseDOM(result, 'script', ret='src', attrs = {'type': '.+?'})[0]
            url = client.source(url)
            url = url.replace('\n','')
            url = re.compile('getServerHost.+?return\s+"(.+?)"').findall(url)[0]

            headers = {'Host': 'hdmoviezone.net',
            'Connection': 'keep-alive',
            'Accept': 'text/html, */*; q=0.01',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Origin': self.base_link }

            result = client.source(url, post=post, headers=headers)
            result = json.loads(result)
            result = result['content']

            links = [i['url'] for i in result]

            for url in links:
                try:
                    i = googleplus.tag(url)[0]
                    sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Moviezone', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 12
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            id = re.compile('(\d*)').findall(url)[0]
            sid = hashlib.md5('content%scthd' % id).hexdigest()
            query = urlparse.urljoin(self.base_link, self.content_link % (id, sid))

            result = self.__request(query)
            result = json.loads(result)
            result = result['listvideos']

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                links = [i['film_id'] for i in result]
            else:
                ep = re.compile('.+?\s(S\d*E\d*)$').findall(url)[0]
                links = [i['film_id'] for i in result if ep in i['film_name'].encode('utf-8').upper()]

            for l in links[:3]:
                try:
                    sid = hashlib.md5('%s%scthd' % (l, id)).hexdigest()

                    url = urlparse.urljoin(self.base_link, self.source_link % (l, id, sid))

                    result = self.__request(url)
                    result = json.loads(result)

                    url = result['videos']
                    url = [self.__decrypt(self.film_key, base64.b64decode(i['film_link'])) for i in url]

                    url = '#'.join(url)
                    url = url.split('#')
                    url = [i for i in url if 'http' in i and 'google' in i]
                    url = [googleplus.tag(i)[0] for i in url]

                    for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'GVcenter', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 13
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link , url)
            result = client.source(url)

            url = client.parseDOM(result, 'div', attrs = {'class': 'fullwindowlink'})[0]
            url = client.parseDOM(url, 'a', ret='href')[0]
            url = urlparse.urljoin(self.base_link , url)

            result = client.source(url)
            result = client.parseDOM(result, 'body')[0]

            post = re.compile('movie_player_file *= *"(.+?)"').findall(result)[0]
            post = urllib.urlencode({'url': post})

            cookie = client.source(self.cookie_link, output='cookie', close=False)

            headers = {'Host': 'gl.hdmoviezone.net',
            'Accept': 'text/html, */*; q=0.01',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Origin': 'http://www.hdmoviezone.net',
            'Cookie': cookie}

            result = client.source(self.stream_link, post=post, headers=headers)

            result = json.loads(result)
            result = result['content']

            links = [i['url'] for i in result]

            for url in links:
                try:
                    i = googleplus.tag(url)[0]
                    sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Moviezone', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 14
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\?S\d*E\d*$').findall(url)

            try: url, season, episode = re.compile('(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
            except: pass

            self.base_link = random.choice([self.base_link_1, self.base_link_2])

            post_id = re.compile('/.+?/(.+)').findall(url)[0].rsplit('/')[0]

            player = urlparse.urljoin(self.base_link, self.player_link)


            if len(content) == 0:
                post = self.player_post_1 % post_id
            else:
                post = client.source(player, post=self.player_post_2 % post_id, headers=self.headers)
                post = client.parseDOM(post, 'ul', attrs = {'class': 'movie-parts'})[0]
                post = client.parseDOM(post, 'li')
                post = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in post]
                post = [(i[0][0], i[1][0]) for i in post if len(i[0]) > 0 and len(i[1]) > 0]
                post = [i[0] for i in post if '%01d' % int(episode) == i[1]][0]
                post = urlparse.parse_qs(urlparse.urlparse(post).query)['part_id'][0]
                post = self.player_post_3 % (post_id, post)


            url = client.source(player, post=post, headers=self.headers)
            url = re.compile('<source\s+src="([^"]+)').findall(url)[0]
            url = client.replaceHTMLCodes(url)

            if 'google' in url: quality = googleplus.tag(url)[0]['quality']
            else: quality = 'HD'

            sources.append({'source': 'GVideo', 'quality': quality, 'provider': 'Xmovies', 'url': url})

            return sources
        except:
            return sources
Ejemplo n.º 15
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            u = client.parseDOM(result, 'meta', ret='content', attrs = {'property': 'og:url'})[0]
            links = re.compile('<a href="([?]link_id=.+?)".+?>(.+?)</a>').findall(result)
            links = [u + i[0]  for i in links if 'server' in i[1].lower()]

            for u in links[:3]:
                try:
                    result = client.source(u)

                    url = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video/.+?'})
                    if len(url) > 0:
                        i = googleplus.tag(url[0])[0]
                        sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url']})

                    url = re.compile('proxy[.]link=clickplay[*](.+?)"').findall(result)[-1]

                    key = base64.b64decode('bW5pcUpUcUJVOFozS1FVZWpTb00=')
                    decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationECB(key + (24 - len(key)) * '\0'))
                    url = url.decode('hex')
                    url = decrypter.feed(url) + decrypter.feed()

                    if 'google' in url: source = 'GVideo'
                    elif 'vk.com' in url: source = 'VK'
                    else: raise Exception()

                    url = resolvers.request(url)
                    for i in url: sources.append({'source': source, 'quality': i['quality'], 'provider': 'Clickplay', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 16
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            links = re.compile('file\s*:\s*"(.+?)"').findall(result)
            links = [i for i in links if 'google' in i]

            for link in links:
                try:
                    i = googleplus.tag(link)[0]
                    sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Dizilab', 'url': i['url']})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 17
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = urlparse.urlparse(url).query

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            result = client.parseDOM(result,
                                     'select',
                                     attrs={'id': 'myDropdown'})[0]
            result = zip(client.parseDOM(result, 'option', ret='value'),
                         client.parseDOM(result, 'option'))
            result = [i[0] for i in result if query == i[1] or query == ''][0]

            url = urlparse.urljoin(self.base_link, result)

            url = client.source(url, output='geturl')
            if not 'google' in url: raise Exception()

            url = googleplus.tag(url)
            for i in url:
                sources.append({
                    'source': 'GVideo',
                    'quality': i['quality'],
                    'provider': 'MVsnap',
                    'url': i['url']
                })

            return sources
        except:
            return sources
Ejemplo n.º 18
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = re.compile('(\d*)').findall(url)[0]
            query = urlparse.urljoin(self.base_link, self.content_link % query)
            query += self.__extra()

            time.sleep(1.5)
            result = client.source(query, headers=self.headers)
            result = json.loads(result)
            result = self.__decrypt(self.data_key, result['data'])
            result = json.loads(result)
            result = result['listvideos']

            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)

            if len(content) == 0:
                links = [i['film_id'] for i in result]
            else:
                ep = re.compile('.+?\s(S\d*E\d*)$').findall(url)[0]
                links = [
                    i['film_id'] for i in result
                    if ep in i['film_name'].encode('utf-8').upper()
                ]

            for l in links[:3]:
                try:
                    url = urlparse.urljoin(self.base_link,
                                           self.source_link % l)
                    url += self.__extra()

                    time.sleep(1.5)
                    url = client.source(url, headers=self.headers)
                    url = json.loads(url)

                    url = self.__decrypt(self.data_key, url['data'])
                    url = json.loads(url)['videos']
                    url = [
                        self.__decrypt(self.film_key, i['film_link'])
                        for i in url
                    ]

                    url = '#'.join(url)
                    url = url.split('#')
                    url = [i for i in url if 'http' in i and 'google' in i]
                    url = [googleplus.tag(i)[0] for i in url]

                    for i in url:
                        sources.append({
                            'source': 'GVideo',
                            'quality': i['quality'],
                            'provider': 'GVcenter',
                            'url': i['url']
                        })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 19
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            u = client.parseDOM(result,
                                'meta',
                                ret='content',
                                attrs={'property': 'og:url'})[0]
            links = re.compile(
                '<a href="([?]link_id=.+?)".+?>(.+?)</a>').findall(result)
            links = [u + i[0] for i in links if 'server' in i[1].lower()]

            for u in links[:3]:
                try:
                    result = client.source(u)

                    url = client.parseDOM(result,
                                          'source',
                                          ret='src',
                                          attrs={'type': 'video/.+?'})
                    if len(url) > 0:
                        i = googleplus.tag(url[0])[0]
                        sources.append({
                            'source': 'GVideo',
                            'quality': i['quality'],
                            'provider': 'Clickplay',
                            'url': i['url']
                        })

                    url = re.compile(
                        'proxy[.]link=clickplay[*](.+?)"').findall(result)[-1]

                    key = base64.b64decode('bW5pcUpUcUJVOFozS1FVZWpTb00=')
                    decrypter = pyaes.Decrypter(
                        pyaes.AESModeOfOperationECB(key +
                                                    (24 - len(key)) * '\0'))
                    url = url.decode('hex')
                    url = decrypter.feed(url) + decrypter.feed()

                    if 'google' in url: source = 'GVideo'
                    elif 'vk.com' in url: source = 'VK'
                    else: raise Exception()

                    url = resolvers.request(url)
                    for i in url:
                        sources.append({
                            'source': source,
                            'quality': i['quality'],
                            'provider': 'Clickplay',
                            'url': i['url']
                        })
                except:
                    pass

            return sources
        except:
            return sources