Esempio n. 1
0
    def cache(self, url):

        try:

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            result = zip(client.parseDOM(result, 'series', ret='srsid'),
                         client.parseDOM(result, 'series'))
            result = [(i[0], cleantitle.get(i[1])) for i in result]

            return result

        except:

            pass
Esempio n. 2
0
    def cache(self, url):

        try:

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            result = zip(client.parseDOM(result, 'series', ret='srsid'),
                         client.parseDOM(result, 'series'))
            result = [(i[0], cleantitle.get(i[1])) for i in result]

            return result

        except Exception as e:

            log.log('Xsubs.tv failed at cache function, reason:  ' + str(e))

            return
    def cache(self, url):

        try:

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            result = list(
                zip(client.parseDOM(result, 'series', ret='srsid'),
                    client.parseDOM(result, 'series')))
            result = [(i[0], cleantitle.get(i[1])) for i in result]

            return result

        except Exception as e:

            _, __, tb = sys.exc_info()

            print(traceback.print_tb(tb))

            log_debug('Xsubs.tv failed at cache function, reason:  ' + str(e))

            return
Esempio n. 4
0
    def get(self, query):

        try:

            title, season, episode = re.findall('(.+?) S(\d+)E(\d+)$',
                                                query)[0]

            season, episode = '%01d' % int(season), '%02d' % int(episode)

            title = re.sub('^THE\s+|^A\s+', '', title.strip().upper())
            title = cleantitle.get(title)

            url = 'http://www.xsubs.tv/series/all.xml'

            srsid = cache.get(self.cache, 48, url)
            srsid = [i[0] for i in srsid if title == i[1]][0]

            url = 'http://www.xsubs.tv/series/%s/main.xml' % srsid

            result = client.request(url)
            ssnid = client.parseDOM(result,
                                    'series_group',
                                    ret='ssnid',
                                    attrs={'ssnnum': season})[0]

            url = 'http://www.xsubs.tv/series/%s/%s.xml' % (srsid, ssnid)

            result = client.request(url)

            items = client.parseDOM(result, 'subg')
            items = [(client.parseDOM(i, 'etitle', ret='number'), i)
                     for i in items]
            items = [
                i[1] for i in items if len(i[0]) > 0 and i[0][0] == episode
            ][0]
            items = re.findall('(<sr .+?</sr>)', items)

        except:

            return self.list

        for item in items:

            try:

                p = client.parseDOM(item, 'sr', ret='published_on')[0]
                if p == '': raise Exception()

                name = client.parseDOM(item, 'sr')[0]
                name = name.rsplit('<hits>', 1)[0]
                name = re.sub('</.+?><.+?>|<.+?>', ' ', name).strip()
                name = '%s %s' % (query, name)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'sr', ret='rlsid')[0]
                url = 'http://www.xsubs.tv/xthru/getsub/%s' % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'xsubstv',
                    'rating': 5
                })

            except:

                pass

        return self.list
    def get(self, query):

        query = py3_dec(query)

        try:

            try:
                title, season, episode = re.findall(
                    r'(.+?)[ .]s?(\d{1,2})(?: |.)?(?:ep?|x|\.)?(\d{1,2})?',
                    query,
                    flags=re.I)[0]
            except (IndexError, TypeError):
                log_debug(
                    "Search query is not a tv show related, xsubs.tv does not offer subs for movies"
                )
                return

            if season.startswith('0'):
                season = season[-1]

            title = re.sub(r'^THE\s+|^A\s+', '', title.strip().upper())
            title = cleantitle.get(title)

            url = ''.join([self.base_link, '/series/all.xml'])

            srsid = self.cache(url)
            srsid = [i[0] for i in srsid if title == i[1]][0]

            url = ''.join(
                [self.base_link, '/series/{0}/main.xml'.format(srsid)])

            result = client.request(url)

            try:
                ssnid = client.parseDOM(result,
                                        'series_group',
                                        ret='ssnid',
                                        attrs={'ssnnum': season})[0]
            except IndexError:
                return

            url = ''.join(
                [self.base_link, '/series/{0}/{1}.xml'.format(srsid, ssnid)])

            result = client.request(url)

            items = client.parseDOM(result, 'subg')
            items = [(client.parseDOM(i, 'etitle', ret='number'), i)
                     for i in items]
            items = [
                i[1] for i in items if len(i[0]) > 0 and i[0][0] == episode
            ][0]
            items = re.findall('(<sr .+?</sr>)', items)

        except Exception as e:

            _, __, tb = sys.exc_info()

            print(traceback.print_tb(tb))

            log_debug('Xsubs.tv failed at get function, reason: ' + str(e))

            return

        for item in items:

            try:

                p = client.parseDOM(item, 'sr', ret='published_on')[0]

                if p == '':

                    continue

                name = client.parseDOM(item, 'sr')[0]
                name = name.rsplit('<hits>', 1)[0]
                label = re.sub('</.+?><.+?>|<.+?>', ' ', name).strip()
                label = client.replaceHTMLCodes(label)
                name = '{0} {1}'.format(client.replaceHTMLCodes(query), label)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'sr', ret='rlsid')[0]
                url = ''.join(
                    [self.base_link, '/xthru/getsub/{0}'.format(url)])
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                downloads = client.parseDOM(item, 'hits')[0]

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'xsubstv',
                    'rating': 5,
                    'downloads': downloads,
                    'title': label
                })

            except Exception as e:

                _, __, tb = sys.exc_info()

                print(traceback.print_tb(tb))

                log_debug(
                    'Xsubs.tv failed at self.list formation function, reason:  '
                    + str(e))

                return

        return self.list
Esempio n. 6
0
    def get(self, query):
        try:
            match = re.findall('(.+?) \((\d{4})\)$', query)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'movies'})[0]
                url = re.findall('(/movies/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if c is not None:
                        if cleantitle.get(c[0]) == cleantitle.get(
                                title) and c[1] == year:
                            try:
                                item = self.r
                            except:
                                item = client.request(i)
                            break

            else:

                title, season, episode = re.findall('(.+?) S(\d+)E(\d+)$',
                                                    query)[0]

                season, episode = '%01d' % int(season), '%01d' % int(episode)

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'https://subz.xyz/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'tvshows'})[0]
                url = re.findall('(/series/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if c is not None:
                        if cleantitle.get(c[0]) == cleantitle.get(title):
                            item = i
                            break

                item = '%s/seasons/%s/episodes/%s' % (item, season, episode)
                item = client.request(item)

            item = re.sub(r'[^\x00-\x7F]+', ' ', item)
            items = client.parseDOM(item, 'tr', attrs={'data-id': '.+?'})
        except:
            return

        for item in items:
            try:

                r = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1]

                url = client.parseDOM(r, 'a', ret='href')[0]
                url = client.replaceHTMLCodes(url)
                url = url.replace("'", "").encode('utf-8')

                name = url.split('/')[-1].strip()
                name = re.sub('\s\s+', ' ', name)
                name = name.replace('_', '').replace('%20', '.')
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'subzxyz',
                    'rating': 5
                })
            except:
                pass

        return self.list
Esempio n. 7
0
    def get(self, query):

        try:

            try:
                match = re.findall(
                    r'(.+?) (?!\d{4})S?(\d{1,2}) ?X?E?(\d{1,2})$',
                    query,
                    flags=re.IGNORECASE)[0]
            except Exception:
                match = None

            if not match:

                match = re.findall(r'(.+?) *?\(?(\d{4})?\)?$', query)[0]

                if len(match[1]) == 4:

                    title, year = match[0], match[1]

                else:

                    title = match[0]

                query = ' '.join(
                    unquote_plus(re.sub('%\w\w', ' ',
                                        quote_plus(title))).split())

                url = 'https://subz.xyz/search?q={0}'.format(quote_plus(query))

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'movies'})[0]
                url = re.findall('(/movies/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:20][::-1]

                for i in url:

                    c = cache.get(self.cache, 2200, i)

                    if c is not None:

                        if len(match[1]) == 4:
                            year_check = c[1] == year
                        else:
                            year_check = True

                        if cleantitle.get(
                                c[0]) == cleantitle.get(title) and year_check:

                            try:

                                item = self.r

                            except Exception:

                                item = client.request(i)

                            break

                        else:

                            self.data.append(self.r)

            else:

                title, season, episode = match

                season, episode = '{0}'.format(season), '{0}'.format(episode)

                query = ' '.join(
                    unquote_plus(re.sub('%\w\w', ' ',
                                        quote_plus(title))).split())

                url = 'https://subz.xyz/search?q={0}'.format(quote_plus(query))

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result,
                                      'section',
                                      attrs={'class': 'tvshows'})[0]
                url = re.findall('(/series/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urljoin('https://subz.xyz', i) for i in url]
                url = url[:20][::-1]

                for i in url:

                    c = cache.get(self.cache, 2200, i)

                    if c is not None:

                        if cleantitle.get(c[0]) == cleantitle.get(title):

                            item = i

                            break

                item = '{0}/seasons/{1}/episodes/{2}'.format(
                    item, season, episode)
                item = client.request(item)

            if self.data:

                item = '\n\n'.join(self.data)

            item = re.sub(r'[^\x00-\x7F]+', ' ', item)
            items = client.parseDOM(item, 'tr', attrs={'data-id': '.+?'})

        except Exception as e:

            log.log('Subzxyz failed at get function, reason: ' + str(e))

            return

        for item in items:

            try:

                r = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1]

                url = client.parseDOM(r, 'a', ret='href')[0]
                url = client.replaceHTMLCodes(url)
                url = url.replace("'", "").encode('utf-8')

                name = url.split('/')[-1].strip()
                name = re.sub('\s\s+', ' ', name)
                name = name.replace('_', '').replace('%20', '.')
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'subzxyz',
                    'rating': 5
                })

            except Exception as e:

                log.log(
                    'Subzxyz failed at self.list formation function, reason: '
                    + str(e))

                return

        return self.list
Esempio n. 8
0
    def get(self, query):

        try:

            title, season, episode = re.findall('(.+?) S?(\d+) ?X?E?(\d+)$',
                                                query,
                                                flags=re.IGNORECASE)[0]

            season, episode = '{0}'.format(season), '{0}'.format(episode)

            title = re.sub('^THE\s+|^A\s+', '', title.strip().upper())
            title = cleantitle.get(title)

            url = 'http://www.xsubs.tv/series/all.xml'

            srsid = cache.get(self.cache, 48, url)
            srsid = [i[0] for i in srsid if title == i[1]][0]

            url = 'http://www.xsubs.tv/series/{0}/main.xml'.format(srsid)

            result = client.request(url)
            ssnid = client.parseDOM(result,
                                    'series_group',
                                    ret='ssnid',
                                    attrs={'ssnnum': season})[0]

            url = 'http://www.xsubs.tv/series/{0}/{1}.xml'.format(srsid, ssnid)

            result = client.request(url)

            items = client.parseDOM(result, 'subg')
            items = [(client.parseDOM(i, 'etitle', ret='number'), i)
                     for i in items]
            items = [
                i[1] for i in items if len(i[0]) > 0 and i[0][0] == episode
            ][0]
            items = re.findall('(<sr .+?</sr>)', items)

        except Exception as e:

            log.log('Xsubs.tv failed at get function, reason: ' + str(e))

            return

        for item in items:

            try:

                p = client.parseDOM(item, 'sr', ret='published_on')[0]

                if p == '':

                    raise Exception(
                        'Parsedom found no match, line 71 @ xsubztv.py')

                name = client.parseDOM(item, 'sr')[0]
                name = name.rsplit('<hits>', 1)[0]
                name = re.sub('</.+?><.+?>|<.+?>', ' ', name).strip()
                name = '{0} {1}'.format(query, name)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'sr', ret='rlsid')[0]
                url = 'http://www.xsubs.tv/xthru/getsub/{0}'.format(url)
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'xsubstv',
                    'rating': 5
                })

            except Exception as e:

                log.log(
                    'Xsubs.tv failed at self.list formation function, reason:  '
                    + str(e))

                return

        return self.list