Пример #1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            hostDict = hostprDict + hostDict
            if url == None: return sources

            r = cfScraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
                       'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                valid, host = source_utils.is_host_valid(i.content, hostDict)
                if valid:
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return
Пример #2
0
    def __search(self, titles, year, content):
        try:

            query = [self.search_link % (urllib.quote_plus(cleantitle.getsearch(i))) for i in titles]

            query = [urlparse.urljoin(self.base_link, i) for i in query]

            t = [cleantitle.get(i) for i in set(titles) if i] #cleantitle.get(titles[0])

            for u in query:
                try:
                    r = client.request(u)

                    r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

                    if content == 'movies':
                        r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
                    else:
                        r = client.parseDOM(r, 'div', attrs={'id': 'series'})

                    r = [dom_parser2.parse_dom(i, 'figcaption') for i in r]
                    data = [(i[0].attrs['title'], dom_parser2.parse_dom(i[0].content, 'a', req='href')) for i in r if i]
                    data = [i[1][0].attrs['href'] for i in data if cleantitle.get(i[0]) in t]
                    if data: return source_utils.strip_domain(data[0])
                    else:
                        url = [dom_parser2.parse_dom(i[0].content, 'a', req='href') for i in r]
                        data = client.request(url[0][0]['href'])
                        data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                        if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])
                except:pass

            return
        except:
            return
Пример #3
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
Пример #5
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = cfScraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except:
         return
Пример #6
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                     self.title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 dsize, isize = utils._size(size)
             except BaseException:
                 dsize, isize = 0, ''
             self.items.append((name, link, isize, dsize))
         return self.items
     except BaseException:
         return self.items
Пример #7
0
    def __search(self, titles, year):
        try:
            tit = [i.split(':')[0] for i in titles]
            query = [
                self.search_link %
                (urllib.quote_plus(cleantitle.getsearch(i + ' ' + year)))
                for i in tit
            ]
            query = [urlparse.urljoin(self.base_link, i) for i in query]
            t = [cleantitle.get(i) for i in set(titles) if i]
            for u in query:
                try:
                    r = client.request(u)
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'card-content'})
                    r = dom_parser2.parse_dom(r, 'a')
                    r = [(i.attrs['href'], i.content) for i in r if i]
                    r = [(i[0], i[1]) for i in r
                         if year == re.findall('(\d{4})', i[1], re.DOTALL)[0]]
                    if len(r) == 1: return source_utils.strip_domain(r[0][0])
                    else:
                        r = [(i[0]) for i in r if cleantitle.get(i[1]) in t]
                        return source_utils.strip_domain(r[0])

                except:
                    pass

            return
        except:
            return
Пример #8
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
Пример #9
0
 def search(self, title, year):
     try:
         url = urljoin(self.base_link,
                       self.search_link % (quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = cfScraper.get(url, headers=headers).content
         r = ensure_text(r, errors='replace')
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         log_utils.log('RMZ - Exception', 1)
         return
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall(
                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.append(size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower()
                       for x in ['.rar.', '.zip.', '.iso.']) or any(
                           url.lower().endswith(x)
                           for x in ['.rar', '.zip', '.iso']):
                    raise Exception()

                if any(x in url.lower()
                       for x in ['youtube', 'sample', 'trailer']):
                    raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info2 = source_utils.get_release_quality(title, url)
                if url in str(self._sources): continue

                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
        except Exception:
            pass
Пример #11
0
    def _get_items(self, url):
        try:
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                        self.title):
                    continue

                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == self.hdlr:
                    continue

                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                self.items.append((name, link, isize, dsize))
            return self.items
        except:
            log_utils.log('1337x_exc0', 1)
            return self.items
Пример #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post,
                                                 'a',
                                                 req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    if not y == hdlr:
                        raise Exception()

                    items += [(link, qual)]

                except Exception:
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith(
                        'http') else client.request(
                            urlparse.urljoin(self.base_link, item[0]))

                    qual = client.parseDOM(r, 'h1')[0]
                    # quality = source_utils.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''',
                                     r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin(
                        'https://', url)

                    ua = {
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }

                    postID = url.split('/embed/')[1]
                    post_link = 'https://vidlink.org/embed/update_views'
                    payload = {'postID': postID}
                    headers = ua
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Referer'] = url

                    ihtml = client.request(post_link,
                                           post=payload,
                                           headers=headers)
                    linkcode = jsunpack.unpack(ihtml).replace('\\', '')
                    try:
                        extra_link = re.findall(r'var oploadID="(.+?)"',
                                                linkcode)[0]
                        oload = 'https://openload.co/embed/' + extra_link
                        sources.append({
                            'source': 'openload.co',
                            'quality': '1080p',
                            'language': 'en',
                            'url': oload,
                            'direct': False,
                            'debridonly': False
                        })

                    except Exception:
                        pass

                    give_me = re.findall(r'var file1="(.+?)"', linkcode)[0]
                    stream_link = give_me.split('/pl/')[0]
                    headers = {
                        'Referer':
                        'https://vidlink.org/',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }
                    r = client.request(give_me, headers=headers)
                    my_links = re.findall(
                        r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.', r)
                    for quality_bitches, link in my_links:

                        if '1080' in quality_bitches:
                            quality = '1080p'
                        elif '720' in quality_bitches:
                            quality = '720p'
                        elif '480' in quality_bitches:
                            quality = 'SD'
                        elif '360' in quality_bitches:
                            quality = 'SD'
                        else:
                            quality = 'SD'

                        final = stream_link + '/' + link + '.m3u8'
                        sources.append({
                            'source': 'GVIDEO',
                            'quality': quality,
                            'language': 'en',
                            'url': final,
                            'direct': True,
                            'debridonly': False
                        })

                except Exception:
                    pass

            return sources
        except Exception:
            return sources
Пример #13
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r1 = client.parseDOM(r, 'div', attrs={'id': 'playeroptions'})[0]
            links = dom.parse_dom(r1, 'li', req=['data-post', 'data-nume'])
            links = [(i.attrs['data-post'], i.attrs['data-nume'],
                      client.parseDOM(i.content,
                                      'span',
                                      attrs={'class': 'title'})[0])
                     for i in links]
            links = [(i[0], i[1], i[2]) for i in links
                     if not 'trailer' in i[1]]
            try:
                extra = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'links_table'})[0]
                extra = dom.parse_dom(extra, 'td')
                extra = [
                    dom.parse_dom(i.content, 'img', req='src') for i in extra
                    if i
                ]
                extra = [(i[0].attrs['src'],
                          dom.parse_dom(i[0].content, 'a', req='href'))
                         for i in extra if i]
                extra = [(re.findall('domain=(.+?)$',
                                     i[0])[0], i[1][0].attrs['href'])
                         for i in extra if i]
            except BaseException:
                pass
            info = []
            ptype = 'tv' if '/tvshows/' in query else 'movie'
            for item in links:

                plink = 'https://onlinemovie.gr/wp-admin/admin-ajax.php'
                pdata = {
                    'action': 'doo_player_ajax',
                    'post': item[0],
                    'nume': item[1],
                    'type': ptype
                }
                pdata = urllib.urlencode(pdata)
                link = client.request(plink, post=pdata)
                link = client.parseDOM(link, 'iframe', ret='src')[0]
                lang = 'gr'
                quality, info = source_utils.get_release_quality(
                    item[2], item[2])
                info.append('SUB')
                info = ' | '.join(info)
                if 'jwplayer' in link:
                    sub = re.findall('&sub=(.+?)&id', link)[0]
                    sub = urllib.unquote(sub)
                    sub = urlparse.urljoin(
                        self.base_link,
                        sub) if sub.startswith('/sub/') else sub
                    url = re.findall('source=(.+?)&sub', link)[0]
                    url = urllib.unquote(url)
                    url = urlparse.urljoin(self.base_link,
                                           url) if url.startswith('/') else url

                    if 'cdn' in url or 'nd' in url or url.endswith(
                            '.mp4') or url.endswith('.m3u8'):
                        sources.append({
                            'source': 'CDN',
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'api.myhls' in link:
                    quality2, info = source_utils.get_release_quality(
                        item[2], None)
                    info.append('SUB')
                    info = ' | '.join(info)
                    data = client.request(link, referer=self.base_link)
                    if not unjuice.test(data): raise Exception()
                    r = unjuice.run(data)
                    urls = re.findall(
                        '''file['"]:['"]([^'"]+).+?label":['"]([^'"]+)''', r,
                        re.DOTALL)
                    sub = [i[0] for i in urls if 'srt' in i[0]][0]
                    sub = urlparse.urljoin(
                        self.base_link,
                        sub) if sub.startswith('/sub/') else sub

                    urls = [(i[0], i[1]) for i in urls if not '.srt' in i[0]]
                    for i in urls:
                        host = 'GVIDEO'
                        quality, url = i[1].lower(), i[0]

                        url = '%s|User-Agent=%s&Referer=%s' % (
                            url, urllib.quote(client.agent()), link)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'myhls.stream' in link:
                    vid = link.split('/')[-1]
                    plink = 'https://myhls.stream/api/source/%s' % vid
                    data = client.request(plink,
                                          post='r=',
                                          referer=link,
                                          XHR=True)
                    data = json.loads(data)

                    urls = data['data']

                    sub = data['captions'][0]['path']
                    sub = 'https://myhls.stream/asset' + sub if sub.startswith(
                        '/') else sub

                    for i in urls:
                        url = i['file'] if not i['file'].startswith(
                            '/') else 'https://myhls.stream/%s' % i['file']
                        quality = i['label']
                        host = 'CDN-HLS'

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'drive' in link:
                    quality, info = source_utils.get_release_quality(
                        item[1], None)
                    info.append('SUB')
                    info = ' | '.join(info)
                    try:
                        links = directstream.google(item[0])
                        for x in links:
                            sources.append({
                                'source': 'GVIDEO',
                                'quality': x['quality'],
                                'language': lang,
                                'url': x['url'],
                                'info': info,
                                'direct': True,
                                'debridonly': False,
                                'sub': sub
                            })
                    except BaseException:
                        pass

                    try:
                        r = client.request(item[0])
                        links = re.findall('''\{file:\s*['"]([^'"]+)''', r,
                                           re.DOTALL)
                        for x in links:
                            sources.append({
                                'source': 'GVIDEO',
                                'quality': quality,
                                'language': lang,
                                'url': x,
                                'info': info,
                                'direct': True,
                                'debridonly': False,
                                'sub': sub
                            })

                    except BaseException:
                        pass

                else:
                    continue

            for item in extra:
                url = item[1]
                if 'movsnely' in url:
                    url = client.request(url, output='geturl', redirect=True)
                else:
                    url = url
                quality = 'SD'
                lang, info = 'gr', 'SUB'
                valid, host = source_utils.is_host_valid(item[0], hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False,
                    'sub': sub
                })

            return sources
        except BaseException:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                try:
                    name = client.parseDOM(post, 'title')[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)

                    if not re.findall(
                            '\w+',
                            cleantitle.get(t))[0] == cleantitle.get(title):
                        raise Exception()

                    y = re.findall('(\d{4}|S\d+E\d+|S\d+)', name, re.I)[0]
                    year = data['year']
                    if not y == year: raise Exception()
                    if not 'tvshowtitle' in data:
                        links = client.parseDOM(post, 'a', ret='href')
                    else:
                        ep = '%02d' % int(data['episode'])
                        pattern = '>Season[\s|\:]%d<(.+?)(?:<b>Season|</content)' % int(
                            data['season'])
                        data = re.findall(pattern, post, re.S | re.I)
                        data = dom_parser2.parse_dom(data, 'a', req='href')
                        links = [(i.attrs['href'], i.content.lower())
                                 for i in data]
                        links = [
                            i[0] for i in links
                            if (hdlr in i[0] or hdlr in i[1] or ep == i[1])
                        ]

                    for url in links:
                        if any(x in url for x in [
                                '.online', 'xrysoi.se', 'filmer', '.bp',
                                '.blogger'
                        ]):
                            continue

                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if 'hdvid' in host: valid = True
                        if not valid: continue
                        quality = 'SD'
                        info = 'SUB'

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'gr',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })

                except:
                    pass

            return sources
        except:
            return sources