Exemplo n.º 1
0
    def download(self, path, url):
        try:
            cookie = None

            anonymous = (self.user == '' or self.password == '')

            code, result = client.request(url, output='response', error=True)

            if code == '429' and anonymous == True:
                control.dialog.ok(str('xsubs.tv'), str(result), str(''))
                return
            elif anonymous == False:
                cookie = cache.get(self.cookie, 6)

            result, headers, content, cookie = client.request(
                url, cookie=cookie, output='extended')

            subtitle = content['Content-Disposition']
            subtitle = re.findall('"(.+?)"', subtitle)[0]

            subtitle = os.path.join(path, subtitle.decode('utf-8'))

            if not subtitle.endswith('.srt'): raise Exception()

            with open(subtitle, 'wb') as subFile:
                subFile.write(result)
            subFile.close()

            return subtitle
        except:
            pass
Exemplo n.º 2
0
    def archive(self):
        self.list = cache.get(self.item_list_11, 24)

        if self.list == None: return

        self.list = [i for i in self.list if '/agapimena/' in i['url']]

        for i in self.list:
            i.update({'action': 'reverseEpisodes'})

        for i in self.list:
            bookmark = dict(
                (k, v) for k, v in i.iteritems() if not k == 'next')
            bookmark['bookmark'] = i['url']
            i.update({
                'cm': [{
                    'title': 32501,
                    'query': {
                        'action': 'addBookmark',
                        'url': json.dumps(bookmark)
                    }
                }]
            })

        directory.add(self.list)
        return self.list
Exemplo n.º 3
0
    def videos(self, url):
        self.list = cache.get(self.item_list, 6, url)

        if self.list == None: return

        for i in self.list: i.update({'action': 'play', 'isFolder': 'False'})

        directory.add(self.list, content='videos')
        return self.list
Exemplo n.º 4
0
    def resolve(self, url):
        try:
            item = cache.get(self.item_list, 24, self.player_link)
            item = [i for i in item if url == i['url']][0]

            title, url, image = item['title'], item['uid'], item['image']

            url = client.request(url, output='geturl')

            return (title, url, image)
        except:
        	pass
    def get(self, query):
        try:
            title, season, episode = re.findall('(.+?) S(\d+)E(\d+)$', query)[0]

            season, episode = '%01d' % int(season), '%02d' % int(episode)

            title = re.sub('^THE\s+|^A\s+', '', title.strip().upper())
            title = cleantitle.get(title)

            url = 'http://www.xsubs.tv/series/all.xml'

            srsid = cache.get(self.cache, 48, url)
            srsid = [i[0] for i in srsid if title == i[1]][0]

            url = 'http://www.xsubs.tv/series/%s/main.xml' % srsid

            result = client.request(url)
            ssnid = client.parseDOM(result, 'series_group', ret='ssnid', attrs = {'ssnnum': season})[0]

            url = 'http://www.xsubs.tv/series/%s/%s.xml' % (srsid, ssnid)

            result = client.request(url)

            items = client.parseDOM(result, 'subg')
            items = [(client.parseDOM(i, 'etitle', ret='number'), i) for i in items]
            items = [i[1] for i in items if len(i[0]) > 0 and i[0][0] == episode][0]
            items = re.findall('(<sr .+?</sr>)', items)
        except:
            return

        for item in items:
            try:
                p = client.parseDOM(item, 'sr', ret='published_on')[0]
                if p == '': raise Exception()

                name = client.parseDOM(item, 'sr')[0]
                name = name.rsplit('<hits>', 1)[0]
                name = re.sub('</.+?><.+?>|<.+?>', ' ', name).strip()
                name = '%s %s' % (query, name)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = client.parseDOM(item, 'sr', ret='rlsid')[0]
                url = 'http://www.xsubs.tv/xthru/getsub/%s' % url
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({'name': name, 'url': url, 'source': 'xsubstv', 'rating': 5})
            except:
                pass

        return self.list
Exemplo n.º 6
0
    def tvshows(self):
        self.list = cache.get(self.item_list_11, 24)

        if self.list == None: return

        self.list = [i for i in self.list if i['filter'] == True]

        self.list += [{
            'title':
            'ALPHA NEWS йупяоу'.decode('iso-8859-7').encode('utf-8'),
            'image':
            'http://www.alphacyprus.com.cy/sites/tv/files/styles/alpha_-_multicolumn_list/public/thumbnails/alpha_news_0.png',
            'url':
            self.cynews_link
        }, {
            'title':
            'ALPHA емглеяысг йупяоу'.decode('iso-8859-7').encode('utf-8'),
            'image':
            'http://www.alphacyprus.com.cy/sites/tv/files/styles/alpha_-_multicolumn_list/public/thumbnails/img_3846.jpg',
            'url':
            'http://www.alphacyprus.com.cy/shows/informative/alphaenimerosi'
        }, {
            'title':
            'йахе леяа, аккг леяа'.decode('iso-8859-7').encode('utf-8'),
            'image':
            'http://www.alphacyprus.com.cy/sites/tv/files/styles/alpha_-_multicolumn_list/public/thumbnails/kathemeraallimera.jpg',
            'url':
            'http://www.alphacyprus.com.cy/shows/informative/kathemeraallimera'
        }]

        for i in self.list:
            i.update({'action': 'episodes'})

        for i in self.list:
            bookmark = dict(
                (k, v) for k, v in i.iteritems() if not k == 'next')
            bookmark['bookmark'] = i['url']
            i.update({
                'cm': [{
                    'title': 32501,
                    'query': {
                        'action': 'addBookmark',
                        'url': json.dumps(bookmark)
                    }
                }]
            })

        self.list = sorted(self.list, key=lambda k: k['title'].lower())

        directory.add(self.list)
        return self.list
Exemplo n.º 7
0
    def radios(self):
        self.list = cache.get(self.item_list, 0, self.player_link)

        if self.list == None: return

        for i in self.list: i.update({'action': 'play', 'isFolder': 'False'})

        for i in self.list:
            bookmark = dict((k,v) for k, v in i.iteritems() if not k == 'next')
            bookmark['bookmark'] = i['url']
            i.update({'cm': [{'title': 32501, 'query': {'action': 'addBookmark', 'url': json.dumps(bookmark)}}]})

        directory.add(self.list, infotype='Music')
        return self.list
Exemplo n.º 8
0
    def resolve(self, url):
        try:
            headers = {'User-Agent': 'NSPlayer/12.00.14393.0693 WMFSDK/12.00.14393.0693'}
            item = cache.get(self.item_list, 24, self.player_link)
            item = [i for i in item if url == i['url']][0]

            title, url, image = item['title'], item['uid'], item['image']

            url = client.request(url, headers=headers, output='geturl')
            url += '|%s' % urllib.urlencode(headers)

            return (title, url, image)
        except:
        	pass
Exemplo n.º 9
0
    def episodes(self, url, fulltitle=False, reverse=False):
        self.list = cache.get(self.item_list_2, 1, url)

        if self.list == None: return

        for i in self.list:
            i.update({'action': 'play', 'isFolder': 'False'})

        if fulltitle == True:
            for i in self.list:
                i.update({'title': '%s - %s' % (i['tvshowtitle'], i['title'])})

        if reverse == True:
            self.list = self.list[::-1]

        directory.add(self.list, content='files')
        return self.list
Exemplo n.º 10
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            limit=None,
            referer=None,
            cookie=None,
            output='',
            timeout='30'):
    try:
        handlers = []

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        try:
            if sys.version_info < (2, 7, 9): raise Exception()
            import ssl
            ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        except:
            pass

        try:
            headers.update(headers)
        except:
            headers = {}
        if 'User-Agent' in headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers:
            pass
        elif referer == None:
            headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme,
                                               urlparse.urlparse(url).netloc)
        else:
            headers['Referer'] = referer
        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'
        if 'Cookie' in headers:
            pass
        elif not cookie == None:
            headers['Cookie'] = cookie

        if redirect == False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try:
                del headers['Referer']
            except:
                pass

        request = urllib2.Request(url, data=post, headers=headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                if 'cf-browser-verification' in response.read(5242880):

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                          urlparse.urlparse(url).netloc)

                    cf = cache.get(cfcookie, 168, netloc,
                                   headers['User-Agent'], timeout)

                    headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post, headers=headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))

                elif error == False:
                    return

            elif error == False:
                return

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass

        elif output == 'response':
            if limit == '0':
                result = (str(response.code), response.read(224 * 1024))
            elif not limit == None:
                result = (str(response.code), response.read(int(limit) * 1024))
            else:
                result = (str(response.code), response.read(5242880))

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)

        elif output == 'extended':
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            content = response.headers
            result = response.read(5242880)
            return (result, headers, content, cookie)

        elif output == 'geturl':
            result = response.geturl()

        elif output == 'headers':
            content = response.headers
            return content

        else:
            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

        if close == True:
            response.close()

        return result
    except:
        return
    def get(self, query):
        try:
            match = re.findall('(.+?) \((\d{4})\)$', query)

            if len(match) > 0:

                title, year = match[0][0], match[0][1]

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'http://subztv.gr/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result, 'div', attrs={'id': 'movies'})[0]
                url = re.findall('(/movies/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urlparse.urljoin('http://subztv.gr', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if not c == None:
                        if cleantitle.get(c[0]) == cleantitle.get(
                                title) and c[1] == year:
                            try:
                                item = self.r
                            except:
                                item = client.request(i)
                            break

            else:

                title, season, episode = re.findall('(.+?) S(\d+)E(\d+)$',
                                                    query)[0]

                season, episode = '%01d' % int(season), '%01d' % int(episode)

                query = ' '.join(
                    urllib.unquote_plus(
                        re.sub('%\w\w', ' ',
                               urllib.quote_plus(title))).split())

                url = 'http://subztv.gr/search?q=%s' % urllib.quote_plus(query)

                result = client.request(url)
                result = re.sub(r'[^\x00-\x7F]+', ' ', result)

                url = client.parseDOM(result, 'div', attrs={'id': 'series'})[0]
                url = re.findall('(/series/\d+)', url)
                url = [x for y, x in enumerate(url) if x not in url[:y]]
                url = [urlparse.urljoin('http://subztv.gr', i) for i in url]
                url = url[:3]

                for i in url:
                    c = cache.get(self.cache, 2200, i)

                    if not c == None:
                        if cleantitle.get(c[0]) == cleantitle.get(title):
                            item = i
                            break

                item = '%s/seasons/%s/episodes/%s' % (item, season, episode)
                item = client.request(item)

            item = re.sub(r'[^\x00-\x7F]+', ' ', item)
            items = client.parseDOM(item, 'tr', attrs={'data-id': '\d+'})
        except:
            return

        for item in items:
            try:
                if not 'img/el.png' in item: raise Exception()

                name = client.parseDOM(item, 'td', attrs={'class': '.+?'})[-1]
                name = name.split('>')[-1].strip()
                name = re.sub('\s\s+', ' ', name)
                name = client.replaceHTMLCodes(name)
                name = name.encode('utf-8')

                url = re.findall('\'(http(?:s|)\://.+?)\'', item)[-1]
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                self.list.append({
                    'name': name,
                    'url': url,
                    'source': 'subztvgr',
                    'rating': 5
                })
            except:
                pass

        return self.list