Ejemplo n.º 1
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
Ejemplo n.º 2
0
 def searchShow(self, title, season, aliases):
     try:
         #title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urljoin(self.base_link, self.search_link % cleantitle.geturl(search))
         r = cfScraper.get(url).content
         r = ensure_text(r, errors='ignore')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         url = urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         log_utils.log('123movies1 exception', 1)
         return
Ejemplo n.º 3
0
 def search(self, title, year):
     try:
         url = urljoin(self.base_link,
                       self.search_link % (quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = cfScraper.get(url, headers=headers).content
         r = ensure_text(r, errors='replace')
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         log_utils.log('RMZ - Exception', 1)
         return
Ejemplo n.º 4
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
Ejemplo n.º 5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            year = data['year']

            search_id = title.lower()
            url = urljoin(self.base_link, self.search_link % (search_id.replace(' ', '+')))
            headers = {
                'User-Agent': client.agent(),
                'Accept': '*/*',
                'Accept-Encoding': 'identity;q=1, *;q=0',
                'Accept-Language': 'en-US,en;q=0.5',
                'Connection': 'keep-alive',
                'Pragma': 'no-cache',
                'Cache-Control': 'no-cache',
                'DNT': '1'
            }

            response = requests.Session()
            r = response.get(url, headers=headers, timeout=5).text
            r = client.parseDOM(r, 'div', attrs={'class': 'container'})[1]
            items = client.parseDOM(r, 'div', attrs={'class': r'col-xs-12 col-sm-6 col-md-3 '})
            for item in items:
                movie_url = client.parseDOM(item, 'a', ret='href')[0]
                movie_title = re.compile('div class="post-title">(.+?)<', re.DOTALL).findall(item)[0]
                if cleantitle.get(title).lower() == cleantitle.get(movie_title).lower():

                    r = response.get(movie_url, headers=headers, timeout=5).text
                    year_data = re.findall('<h2 style="margin-bottom: 0">(.+?)</h2>', r, re.IGNORECASE)[0]
                    if year == year_data:
                        links = re.findall(r"<a href='(.+?)'>(\d+)p<\/a>", r)

                        for link, quality in links:

                            if not link.startswith('https:'):
                                link = 'https:' + link.replace('http:', '')
                            link = link + '|Referer=https://iwaatch.com/movie/' + title

                            quality, info = source_utils.get_release_quality(quality, link)

                            sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False})
            return sources
        except:
            log_utils.log('iWAATCH - Exception', 1)
            return sources
Ejemplo n.º 6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s-s%02de%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            try:
                data = re.compile(
                    '<a href="(.+?)" target="_blank" rel="nofollow" title.+?'
                ).findall(r)
                for url in data:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        quality, info = source_utils.get_release_quality(
                            url, url)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
            except:
                log_utils.log('projectfree2 - Exception', 1)
                pass
            return sources
        except:
            log_utils.log('projectfree3 - Exception', 1)
            return sources
Ejemplo n.º 7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
Ejemplo n.º 8
0
    def searchMovie(self, title, year, aliases):
        try:
            #title = cleantitle.normalize(title)
            url = urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if url == None:
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]

            url = urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            log_utils.log('123movies2 exception', 1)
            return
Ejemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)

            self.hdlr = 's%02de%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            self.hdlr = self.hdlr.lower()
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            try:
                r = client.request(url)
                links = re.findall('<a href="(/torrent/.+?)"', r, re.DOTALL)[:20]

                threads = []
                for link in links:
                    threads.append(workers.Thread(self.get_sources, link))
                [i.start() for i in threads]
                [i.join() for i in threads]
                return self.sources
            except:
                log_utils.log('YourBT3 - Exception', 1)
                return self.sources

        except:
            log_utils.log('YourBT3 - Exception', 1)
            return self.sources
Ejemplo n.º 10
0
    def _get_items(self, url):
        try:
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                        self.title):
                    continue

                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == self.hdlr:
                    continue

                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                self.items.append((name, link, isize, dsize))
            return self.items
        except:
            log_utils.log('1337x_exc0', 1)
            return self.items
Ejemplo n.º 11
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                           query).lower()

            url = urljoin(self.base_link, self.search_link % quote_plus(query))

            r = client.request(url)
            #r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').strip()
            posts = client.parseDOM(r,
                                    'table',
                                    attrs={
                                        'class': 'table2',
                                        'cellspacing': '0'
                                    })[1]
            posts = client.parseDOM(posts, 'tr')[1:]
            for post in posts:

                links = client.parseDOM(post, 'a', ret='href')[0]
                links = client.replaceHTMLCodes(links).lstrip('/')
                hash = links.split('/')[0]
                name = links.split('/')[1]
                url = 'magnet:?xt=urn:btih:{}'.format(hash)
                if not query in cleantitle.get_title(name): continue

                quality, info = source_utils.get_release_quality(name)
                try:
                    size = client.parseDOM(post,
                                           'td',
                                           attrs={'class': 'tdnormal'})[1]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('tdl3 - Exception', 1)
            return sources
Ejemplo n.º 12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            try:
                r = client.request(url)
                posts = client.parseDOM(r, 'tr')
                for post in posts:
                    link = re.findall(
                        'a title="Download Torrent Magnet" href="(magnet:.+?)"',
                        post, re.DOTALL)
                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                    except BaseException:
                        dsize, isize = 0.0, ''
                    for url in link:
                        url = unquote_plus(url).split('&tr')[0].replace(
                            '&amp;', '&').replace(' ', '.')
                        if hdlr not in url:
                            continue
                        name = url.split('&dn=')[1]
                        quality, info = source_utils.get_release_quality(
                            name, url)
                        if any(x in url for x in [
                                'FRENCH', 'Ita', 'italian', 'TRUEFRENCH',
                                '-lat-', 'Dublado'
                        ]):
                            continue
                        info.insert(0, isize)
                        info = ' | '.join(info)
                        sources.append({
                            'source': 'Torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True,
                            'size': dsize,
                            'name': name
                        })
            except:
                return
            return sources
        except:
            return sources
Ejemplo n.º 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                url = self.search_link % quote_plus(query)
                url = urljoin(self.base_link, url)

                r = cfScraper.get(url).content
                r = ensure_text(r, errors='replace')

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []

                for post in posts:
                    try:
                        u = client.parseDOM(post,
                                            "div",
                                            attrs={"class": "postContent"})
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            u[0])[0]
                        u = client.parseDOM(u, "h2")
                        u = client.parseDOM(u, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i, size) for i in u]
                        items += u
                    except:
                        pass
            except:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): continue

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    try:
                        dsize, isize = source_utils._size(item[2])
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)

                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 14
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            try:
                r = client.request(url)
                links = zip(client.parseDOM(r, 'a', attrs={'class': 'btn btn-default magnet-button stats-action banner-button'}, ret='href'), client.parseDOM(r, 'td', attrs={'class': 'size'}))

                for link in links:
                    url = link[0].replace('&amp;', '&')
                    url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn=
                    url = url.split('&tr=')[0]
                    if 'magnet' not in url:
                        continue

                    if any(x in url.lower() for x in ['french', 'italian', 'spanish', 'truefrench', 'dublado', 'dubbed']):
                        continue

                    name = url.split('&dn=')[1]
                    t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = link[1]
                        size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB'
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)
                    info = ' | '.join(info)

                    sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url,
                                                'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})
                return sources

            except:
                return sources

        except:
            from prophetscrapers.modules import log_utils
            log_utils.log('bitlord - Exception', 1)
            return sources
Ejemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                           query).lower()

            url = urljoin(self.base_link, self.search_link % quote_plus(query))

            #r = client.request(url)
            #r = requests.get(url).content
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').replace('&nbsp;', ' ')
            r = client.parseDOM(
                r,
                'div',
                attrs={'style': 'display:table;width:100%;text-align:left'})
            posts = client.parseDOM(r, 'div', attrs={'class': 'one_result'})
            #log_utils.log('posts_is: '+str(posts))
            for post in posts:

                links = client.parseDOM(post,
                                        'div',
                                        attrs={'class': 'fa fa-magnet'})[0]
                url = client.parseDOM(links, 'a', ret='href')[0]
                url = client.replaceHTMLCodes(url).split('&tr=')[0]
                name = url.split('&dn=')[1]
                if not query in cleantitle.get_title(name): continue

                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = client.parseDOM(post,
                                           'span',
                                           attrs={'class': 'torrent_size'})[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('btdig3 - Exception', 1)
            return sources
Ejemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            host_dict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            if 'season' in data:
                season = data['season']
            if 'episode' in data:
                episode = data['episode']
            year = data['year']

            r = client.request(self.base_link, output='extended', timeout='10')
            #r = cfScraper.get(self.base_link).content
            cookie = r[3]
            headers = r[2]
            result = r[0]
            headers['Cookie'] = cookie

            query = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.getsearch(title)))
            query2 = urljoin(self.base_link,
                             self.search_link % quote_plus(title).lower())
            r = client.request(query, headers=headers, XHR=True)
            if len(r) < 20:
                r = client.request(query2, headers=headers, XHR=True)
            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                cltitle = cleantitle.get(title + 'season' + season)
                cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                vurl = '%s%s-episode-%s' % (self.base_link, str(
                    r[0][0]).replace('/info', ''), episode)
                vurl2 = None

            else:
                cltitle = cleantitle.getsearch(title)
                cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                r = [
                    i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                    or cltitle == cleantitle.getsearch(i[1])
                ]
                vurl = '%s%s-episode-0' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))
                vurl2 = '%s%s-episode-1' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))

            r = client.request(vurl, headers=headers)
            headers['Referer'] = vurl

            slinks = client.parseDOM(r,
                                     'div',
                                     attrs={'class': 'anime_muti_link'})
            slinks = client.parseDOM(slinks, 'li', ret='data-video')
            if len(slinks) == 0 and vurl2 is not None:
                r = client.request(vurl2, headers=headers)
                headers['Referer'] = vurl2
                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
            slinks = [
                slink
                if slink.startswith('http') else 'https:{0}'.format(slink)
                for slink in slinks
            ]

            for url in slinks:
                url = client.replaceHTMLCodes(url)
                #url = url.encode('utf-8')
                valid, host = source_utils.is_host_valid(url, host_dict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            log_utils.log('gowatchseries3 - Exception', 1)
            return sources
Ejemplo n.º 17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                self.title, int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote_plus(query))
                url = urljoin(self.base_link, url)

            else:
                url = self.moviesearch.format(quote_plus(query))
                url = urljoin(self.base_link, url)

            items = self._get_items(url)

            hostDict = hostDict + hostprDict
            for item in items:
                try:
                    name = item[0]
                    url = item[1]
                    url = url.split('&tr')[0]
                    quality, info = source_utils.get_release_quality(name, url)
                    info.insert(0, item[2])
                    info = ' | '.join(info)

                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': item[3],
                        'name': name
                    })
                except:
                    log_utils.log('glodls0_exc', 1)
                    pass

            return sources
        except:
            log_utils.log('glodls1_exc', 1)
            return sources
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = cleantitle.get_query(data['title'])

            query = '%s %s' % (title, data['year'])

            #_headers = {'User-Agent': client.agent()}

            url = self.search_link % quote(query)
            url = urljoin(self.base_link, url)
            html = client.request(url)  #, headers=_headers)
            try:
                results = client.parseDOM(html, 'div', attrs={'class':
                                                              'row'})[2]
            except Exception:
                return sources

            items = re.findall(
                'class="browse-movie-bottom">(.+?)</div>\s</div>', results,
                re.DOTALL)
            if items is None:
                return sources

            for entry in items:
                try:
                    try:
                        link, name = re.findall(
                            '<a href="(.+?)" class="browse-movie-title">(.+?)</a>',
                            entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        if not cleantitle.get(title) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    y = entry[-4:]
                    if not y == data['year']:
                        continue

                    response = client.request(link)  #, headers=_headers)
                    try:
                        entries = client.parseDOM(
                            response, 'div', attrs={'class': 'modal-torrent'})
                        for torrent in entries:
                            link, name = re.findall(
                                'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                                torrent, re.DOTALL)[0]
                            try:
                                _name = name.lower().replace('download',
                                                             '').replace(
                                                                 'magnet', '')
                            except:
                                _name = name
                            link = 'magnet:%s' % link
                            link = str(
                                client.replaceHTMLCodes(link).split('&tr')[0])
                            quality, info = source_utils.get_release_quality(
                                name, link)
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                                    torrent)[-1]
                                dsize, isize = source_utils._size(size)
                            except Exception:
                                dsize, isize = 0.0, ''
                            info.insert(0, isize)
                            info = ' | '.join(info)

                            sources.append({
                                'source': 'Torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': _name
                            })
                    except Exception:
                        continue
                except Exception:
                    continue

            return sources
        except:
            from prophetscrapers.modules import log_utils
            log_utils.log('Ytsam - Exception', 1)
            return sources
Ejemplo n.º 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle']
            title = cleantitle.get_query(title)

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))

            query = '%s S%02dE%02d' % (
                title,
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                title,
                data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)

            url = self.search_link % (quote_plus(query).replace('+', '-'))
            url = urljoin(self.base_link, url)
            html = client.request(url)

            try:
                results = client.parseDOM(html, 'table', attrs={'class': 'forum_header_border'})
                for result in results:
                    if 'magnet:' in result:
                        results = result
                        break
            except Exception:
                return sources
            rows = re.findall('<tr name="hover" class="forum_header_border">(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        columns = re.findall('<td\s.+?>(.+?)</td>', entry, re.DOTALL)
                        derka = re.findall('href="magnet:(.+?)" class="magnet" title="(.+?)"', columns[2], re.DOTALL)[0]
                        name = derka[1]
                        link = 'magnet:%s' % (str(client.replaceHTMLCodes(derka[0]).split('&tr')[0]))
                        t = name.split(hdlr)[0]
                        if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(title):
                            continue
                    except Exception:
                        continue
                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y == hdlr:
                        continue

                    quality, info = source_utils.get_release_quality(name, name)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', name)[-1]
                        dsize, isize = source_utils._size(size)
                    except Exception:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)

                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en',
                                    'url': link, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'name': name})
                except Exception:
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except:
            log_utils.log('eztv_exc', 1)
            return sources
Ejemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('%3A+', '+')

            #r = client.request(url)
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')

            posts = client.parseDOM(r, "div", attrs={"class": "postContent"})
            items = []
            for post in posts:
                try:
                    p = client.parseDOM(post, "p", attrs={"dir": "ltr"})[1:]
                    for i in p:
                        items.append(i)
                except:
                    pass

            try:
                for item in items:
                    u = client.parseDOM(item, 'a', ret='href')
                    name = re.findall('<strong>(.*?)</strong>', item,
                                      re.DOTALL)[0]
                    name = client.replaceHTMLCodes(name)
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): continue
                    for url in u:
                        if any(x in url for x in ['.rar', '.zip', '.iso']):
                            continue
                        quality, info = source_utils.get_release_quality(
                            name, url)
                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB|gb|mb))',
                                item, re.DOTALL)[0]
                            dsize, isize = source_utils._size(size)
                        except:
                            dsize, isize = 0.0, ''
                        info.insert(0, isize)
                        info = ' | '.join(info)
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': name
                            })
            except:
                pass
            return sources
        except:
            log_utils.log('max_rls Exception', 1)
            return sources
Ejemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            #r = client.request(self.base_link)
            #search_base = client.parseDOM(r, 'form', ret='action')[0]
            #log_utils.log(search_base)
            #url = urljoin(search_base, self.search_link)
            url = urljoin(self.base_link, self.search_link)
            url = url % quote_plus(query)

            r = client.request(url)

            r = client.parseDOM(r, 'h2')

            z = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                posts = [(i[1], i[0]) for i in z]
            else:
                posts = [(i[1], i[0]) for i in z]

            host_dict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    r = client.request(post[1])
                    r = ensure_text(r, errors='replace')
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'entry-content cf'})[0]

                    if 'tvshowtitle' in data:
                        z = zip(
                            re.findall(r'<p><b>(%s.+?)</b>' % title, r,
                                       re.I | re.S),
                            re.findall(r'<ul>(.+?)</ul>', r, re.S))
                        for f in z:
                            u = re.findall(r'\'(http.+?)\'',
                                           f[1]) + re.findall(
                                               r'\"(http.+?)\"', f[1])
                            u = [i for i in u if '/embed/' not in i]
                            t = f[0]
                            try:
                                s = re.findall(
                                    r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))',
                                    t)[0]
                            except:
                                s = '0'
                            items += [(t, i, s) for i in u]

                    else:
                        t = ensure_text(post[0], errors='replace')
                        u = re.findall(r'\'(http.+?)\'', r) + re.findall(
                            '\"(http.+?)\"', r)
                        u = [i for i in u if '/embed/' not in i]
                        try:
                            s = re.findall(
                                r'((?:\d+\.\d+|\d+\,\d+|\d+|\d+\,\d+\.\d+)\s*(?:GB|GiB|MB|MiB))',
                                r)[0]
                        except:
                            s = '0'
                        items += [(t, i, s) for i in u]

                except:
                    log_utils.log('MYVIDEOLINK ERROR', 1)
                    pass

            for item in items:
                try:
                    url = ensure_text(item[1])
                    url = client.replaceHTMLCodes(url)

                    void = ('.rar', '.zip', '.iso', '.part', '.png', '.jpg',
                            '.bmp', '.gif', 'sub', 'srt')
                    if url.endswith(void):
                        continue

                    name = ensure_text(item[0], errors='replace')
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        r'(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        continue

                    y = re.findall(
                        r'[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr:
                        continue

                    valid, host = source_utils.is_host_valid(url, host_dict)
                    if not valid:
                        continue
                    host = client.replaceHTMLCodes(host)

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = item[2]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'size': dsize,
                        'name': name
                    })
                except:
                    log_utils.log('MYVIDEOLINK ERROR', 1)
                    pass

            return sources
        except:
            log_utils.log('MYVIDEOLINK ERROR', 1)
            return sources
Ejemplo n.º 22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']),
                                      int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url,
                               headers=headers,
                               output='extended',
                               timeout='10')

            #if imdb not in r[0]:
            #raise Exception()

            try:
                cookie = r[4]
                headers = r[3]
            except:
                cookie = r[3]
                headers = r[2]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % unquote_plus(auth)
            headers['Authorization'] = auth
            headers['Referer'] = url

            u = '/ajax/vsozrflxcw.php'
            self.base_link = client.request(
                self.base_link,
                headers={'User-Agent': client.agent()},
                output='geturl')
            u = urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            tim = str(int(time.time())) if six.PY2 else six.ensure_binary(
                str(int(time.time())))
            elid = quote(base64.encodestring(tim)).strip()

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'nopop': '',
                'elid': elid
            }
            post = urlencode(post)
            cookie += ';%s=%s' % (idEl, elid)
            headers['Cookie'] = cookie

            r = client.request(u,
                               post=post,
                               headers=headers,
                               cookie=cookie,
                               XHR=True)
            r = str(json.loads(r))

            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'google' in i:
                        quality = 'SD'

                        if 'googleapis' in i:
                            try:
                                quality = source_utils.check_sd_url(i)
                            except Exception:
                                pass

                        if 'googleusercontent' in i:
                            i = directstream.googleproxy(i)
                            try:
                                quality = directstream.googletag(
                                    i)[0]['quality']
                            except Exception:
                                pass

                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })

                    elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                        try:
                            quality = source_utils.check_sd_url(i)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'direct': True,
                                'debridonly': False
                            })

                        except Exception:
                            pass
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        if valid:
                            if 'vidnode.net' in i:
                                i = i.replace('vidnode.net', 'vidcloud9.com')
                                hoster = 'vidcloud9'
                            sources.append({
                                'source': hoster,
                                'quality': '720p',
                                'language': 'en',
                                'url': i,
                                'direct': False,
                                'debridonly': False
                            })
                except Exception:
                    pass
            return sources
        except:
            log_utils.log('cartoonhd - Exception', 1)
            return sources
Ejemplo n.º 23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s season %d' % (title, int(data['season'])) if 'tvshowtitle' in data else title
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = quote_plus(query)

            url = urljoin(self.base_link, self.search_link % query)

            ua = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            _posts = client.parseDOM(r, 'div', attrs={'class': 'item'})
            posts = []
            for p in _posts:
                try:
                    post = (client.parseDOM(p, 'a', ret='href')[1],
                              client.parseDOM(p, 'a')[1],
                              re.findall(r'Release:\s*?(\d{4})</', p, re.I|re.S)[1])
                    posts.append(post)
                except:
                    pass
            posts = [(i[0], client.parseDOM(i[1], 'i')[0], i[2]) for i in posts if i]

            if 'tvshowtitle' in data:
                sep = 'season %d' % int(data['season'])
                sepi = 'season-%1d/episode-%1d.html' % (int(data['season']), int(data['episode']))
                post = [i[0] for i in posts if sep in i[1].lower()][0]
                data = cfScraper.get(post, headers=ua).content
                data = six.ensure_text(data, errors='replace')
                link = client.parseDOM(data, 'a', ret='href')
                link = [i for i in link if sepi in i][0]
            else:
                link = [i[0] for i in posts if cleantitle.get_title(title) in cleantitle.get_title(i[1]) and hdlr == i[2]][0]

            r = cfScraper.get(link, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
                v = v.encode('utf-8')
                b64 = base64.b64decode(v)
                b64 = six.ensure_text(b64, errors='ignore')
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$', urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = six.ensure_str(host)
                    valid, hoster = source_utils.is_host_valid(host, hostDict)
                    if valid:
                        sources.append({
                            'source': hoster,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    log_utils.log('plockers4 Exception', 1)
                    pass
            except:
                log_utils.log('plockers3 Exception', 1)
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0].replace('\/', '/')
                        host = client.replaceHTMLCodes(host)
                        host = six.ensure_str(host)
                        if 'other' in host: continue
                        valid, hoster = source_utils.is_host_valid(host, hostDict)
                        if valid:
                            sources.append({
                                'source': hoster,
                                'quality': 'SD',
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                    except:
                        log_utils.log('plockers5 Exception', 1)
                        pass
            return sources
        except:
            log_utils.log('plockers Exception', 1)
            return
Ejemplo n.º 24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            # if (self.user != '' and self.password != ''): #raise Exception()

            # login = urljoin(self.base_link, '/login.html')

            # post = urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

            # cookie = client.request(login, post=post, output='cookie', close=False)

            # r = client.request(login, post=post, cookie=cookie, output='extended')

            # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            # else:
            # headers = {}

            headers = {'User-Agent': client.randomagent()}
            if not str(url).startswith('http'):

                data = parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                def searchname(r):
                    r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                    r = [
                        i for i in r
                        if cleantitle.get(title) == cleantitle.get(i[1])
                    ]
                    r = [] if r == [] else [i[0] for i in r][0]
                    return r

                if 'tvshowtitle' in data:
                    link = urljoin(self.base_link,
                                   'tvshow-%s.html' % title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(
                        r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                             for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                            r = [(i,
                                  re.findall('watch-tvshow-(.+?)-\d+\.html',
                                             i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                else:
                    link = urljoin(self.base_link,
                                   'movies-%s.html' % title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(
                        r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                             for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                            r = [(i,
                                  re.findall('watch-movie-(.+?)-\d+\.html', i))
                                 for i in r]
                            r = searchname(r)
                            if r != []: break

                # leaving old search in for if streamlord renables searching on the site
                # query = urljoin(self.base_link, self.search_link)

                # post = urlencode({'searchapi2': title})

                # r = client.request(query, post=post, headers=headers)

                # if 'tvshowtitle' in data:
                # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                # else:
                # r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                # r = [i[0] for i in r][0]

                u = urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]

                    r = urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)

            quality = '720p' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall(
                    '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)',
                    u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urlencode(headers)
            except:
                try:
                    url = r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(
                        r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',
                        r, re.DOTALL)[0][1]

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False
            })

            return sources
        except:
            log_utils.log('streamlord_exc0', 1)
            return sources
Ejemplo n.º 25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote(query)
            url = urljoin(self.base_link, url)

            html = client.request(url)
            html = html.replace('&nbsp;', ' ')

            try:
                results = client.parseDOM(html,
                                          'table',
                                          attrs={'id': 'searchResult'})
            except:
                return sources

            url2 = url.replace('/1/', '/2/')

            html2 = client.request(url2)
            html2 = html2.replace('&nbsp;', ' ')

            try:
                results += client.parseDOM(html2,
                                           'table',
                                           attrs={'id': 'searchResult'})
            except:
                return sources

            results = ''.join(results)

            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        url = 'magnet:%s' % (re.findall(
                            'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        url = str(client.replaceHTMLCodes(url).split('&tr')[0])
                    except:
                        continue

                    try:
                        name = re.findall(
                            'class="detLink" title=".+?">(.+?)</a>', entry,
                            re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        name = unquote_plus(name).replace(' ', '.').lower()

                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(', '').replace(')', '').replace(
                                '&', 'and').replace('.US.',
                                                    '.').replace('.us.',
                                                                 '.').lower()
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue
                    except:
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''

                    info.insert(0, isize)

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'name': name
                    })
                except:
                    continue

            return sources

        except:
            log_utils.log('tpb_exc', 1)
            return sources
Ejemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ',
                           query).lower()

            url = urljoin(self.base_link, self.search_link % query)

            #r = client.request(url)
            #r = requests.get(url).content
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').replace('&nbsp;', ' ')
            r = client.parseDOM(r, 'div', attrs={'class': 'col s12'})
            posts = client.parseDOM(r, 'div')[1:]
            posts = [i for i in posts if 'magnet/' in i]
            for post in posts:

                links = client.parseDOM(post, 'a', ret='href')[0]
                url = 'magnet:?xt=urn:btih:' + links.lstrip('magnet/')
                try:
                    name = client.parseDOM(post, 'a', ret='title')[0]
                    if not query in cleantitle.get_title(name): continue
                except:
                    name = ''

                quality, info = source_utils.get_release_quality(name, name)
                try:
                    size = re.findall(r'<b class="cpill .+?-pill">(.+?)</b>',
                                      post)[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({
                    'source': 'Torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': name
                })

            return sources
        except:
            log_utils.log('bt4g3 - Exception', 1)
            return sources
Ejemplo n.º 27
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                return sources
            if debrid.status() is False:
                return sources
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = urljoin(self.base_link, self.search_link % quote_plus(query))
            r = ensure_str(cfScraper.get(url).content, errors='replace')
            #log_utils.log('ultrahd_r ' + str(r))
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
            r = [(dom_parser.parse_dom(i[0], 'a', req='href')) for i in r if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]
            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    data = ensure_text(cfScraper.get(item[0]).content, errors='replace')
                    data = client.parseDOM(data, 'div', attrs={'id': 'r-content'})[0]
                    urls = re.findall(r'\s*<u><a href="(.+?)".+?</a></u>', data, re.S)
                    try: details = client.parseDOM(data, 'div', attrs={'class': 'text_spoiler'})[0]
                    except: details = None
                    if details:
                        _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'General : (.+?)<br', details), re.findall(r'Length : (.+?) for', details))
                    else:
                        _zip = zip([u for u in urls if u.startswith('https://turbobit')], re.findall(r'/uploads/0-0-vip-(.+?).jpg', data, re.I|re.S))

                    for z in _zip:
                        try:
                            url = ensure_str(client.replaceHTMLCodes(z[0]))
                            name = ensure_str(client.replaceHTMLCodes(z[1])).replace('dual', ' dual ')
                            if 'dublaj' in name.lower(): continue

                            info = []
                            quality, info = source_utils.get_release_quality(url, name)
                            if quality == 'sd' and 'remux' in name.lower(): quality = '1080p'

                            try:
                                size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', z[2])[0]
                                dsize, isize = source_utils._size(size)
                            except:
                                dsize, isize = 0.0, ''
                            info.insert(0, isize)

                            info = ' | '.join(info)
                            if any(x in url for x in ['.rar', '.zip', '.iso']):
                                raise Exception()
                            # if not 'turbobit' in url:
                                # continue
                            sources.append({'source': 'turbobit', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'size': dsize, 'name': name, 'direct': True, 'debridonly': True})
                        except:
                            log_utils.log('ultrahd_exc2', 1)
                            pass
                except:
                    log_utils.log('ultrahd_exc1', 1)
                    pass
            return sources
        except:
            log_utils.log('ultrahd_exc0', 1)
            return sources
Ejemplo n.º 28
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)

            r = client.request(url)

            posts = re.findall('<h2 class="title">(.+?)</h2>', r,
                               re.IGNORECASE)

            hostDict = hostprDict + hostDict

            urls = []
            for item in posts:

                try:
                    link, name = re.findall('href="(.+?)" title="(.+?)"', item,
                                            re.IGNORECASE)[0]
                    if not cleantitle.get(title) in cleantitle.get(name):
                        continue
                    name = client.replaceHTMLCodes(name)
                    try:
                        _name = name.lower().replace('permalink to', '')
                    except:
                        _name = name

                    quality, info = source_utils.get_release_quality(
                        name, link)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            name)[-1]
                        dsize, isize = source_utils._size(size)
                    except Exception:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)

                    info = ' | '.join(info)

                    links = self.links(link)
                    urls += [(i, quality, info) for i in links]
                except Exception:
                    pass

            for item in urls:
                if 'earn-money' in item[0]:
                    continue

                if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                    continue
                url = client.replaceHTMLCodes(item[0])
                #url = url.encode('utf-8')
                url = ensure_text(url)

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                host = ensure_text(host)

                sources.append({
                    'source': host,
                    'quality': item[1],
                    'language': 'en',
                    'url': url,
                    'info': item[2],
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
            return sources
        except Exception:
            return sources
Ejemplo n.º 29
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return
            data = parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            title = data['title']
            year = data['year']

            tit = cleantitle.geturl(title + ' ' + year)
            query = urljoin(self.base_link, tit)

            r = client.request(query, referer=self.base_link, redirect=True)
            if not data['imdb'] in r:
                return sources

            links = []

            try:
                down = client.parseDOM(r, 'div', attrs={'id':
                                                        'tab-download'})[0]
                down = client.parseDOM(down, 'a', ret='href')[0]
                data = client.request(down)
                frames = client.parseDOM(data,
                                         'div',
                                         attrs={'class': 'single-link'})
                frames = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in frames if i
                ]
                for i in frames:
                    links.append(i)

            except Exception:
                pass
            try:
                streams = client.parseDOM(r, 'div', attrs={'id':
                                                           'tab-stream'})[0]
                streams = re.findall(r'''iframe src=(.+?) frameborder''',
                                     streams.replace('&quot;', ''),
                                     re.I | re.DOTALL)
                for i in streams:
                    links.append(i)
            except Exception:
                pass

            for url in links:
                try:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        valid, host = source_utils.is_host_valid(
                            url, hostprDict)
                        if not valid:
                            continue
                        else:
                            rd = True
                    else:
                        rd = False
                    #quality, _ = source_utils.get_release_quality(url, url)
                    quality = '720p'
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    if rd:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': True
                        })
                    else:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except:
            log_utils.log('filmxy', 1)
            return sources
Ejemplo n.º 30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        if not url: return sources
        try:
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)
            aliases = data['aliases']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            hdlr2 = 'S%d - %d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            query2 = '%s %s' % (title, hdlr2)
            query2 = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query2)

            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            urls.append(url)
            url2 = self.search_link % quote_plus(query2)
            url2 = urljoin(self.base_link, url2)
            urls.append(url2)

            for url in urls:
                try:
                    r = client.request(url)
                    if 'magnet' not in r:
                        return sources
                    r = re.sub(r'\n', '', r)
                    r = re.sub(r'\t', '', r)
                    tbody = client.parseDOM(r, 'tbody')
                    rows = client.parseDOM(tbody, 'tr')

                    for row in rows:
                        links = zip(
                            re.findall('href="(magnet:.+?)"', row, re.DOTALL),
                            re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                row, re.DOTALL),
                            [
                                re.findall(
                                    '<td class="text-center">([0-9]+)</td>',
                                    row, re.DOTALL)
                            ])

                        for link in links:
                            url = unquote_plus(link[0]).replace(
                                '&amp;', '&').replace(' ', '.').split('&tr')[0]
                            name = url.split('&dn=')[1]

                            quality, info = source_utils.get_release_quality(
                                name, url)
                            try:
                                size = link[1]
                                dsize, isize = source_utils._size(size)
                            except:
                                dsize, isize = 0.0, ''
                                pass
                            info.insert(0, isize)
                            info = ' | '.join(info)

                            sources.append({
                                'source': 'torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True,
                                'size': dsize,
                                'name': name
                            })
                except:
                    log_utils.log('nyaa3 - Exception', 1)
                    return sources
            return sources
        except:
            log_utils.log('nyaa4 - Exception', 1)
            return sources