Example #1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            links = client.parseDOM(r, 'p')

            hostDict = hostprDict + hostDict

            locDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]


            for link in links:
                try:
                    host = re.findall('Downloads-Server(.+?)(?:\'|\")\)', link)[0]
                    host = host.strip().lower().split()[-1]
                    if host == 'fichier': host = '1fichier'
                    host = [x[1] for x in locDict if host == x[0]][0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    url = client.parseDOM(link, 'a', ret='href')[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    r = client.parseDOM(link, 'a')[0]

                    fmt = r.strip().lower().split()

                    if '1080p' in fmt: quality = '1080p'
                    elif '720p' in fmt: quality = 'HD'

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', r)[-1]
                        div = 1 if size.endswith(' GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        info = '%.2f GB' % size
                    except:
                        info = ''

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            return sources
        except:
            return sources
Example #2
0
def resolve(regex):
    try:
        vanilla = re.compile('(<regex>.+)', re.MULTILINE|re.DOTALL).findall(regex)[0]
        cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>', re.MULTILINE|re.DOTALL).findall(regex)
        for i in cddata:
            regex = regex.replace('<![CDATA['+i+']]>', urllib.quote_plus(i))

        regexs = re.compile('(<regex>.+)', re.MULTILINE|re.DOTALL).findall(regex)[0]
        regexs = re.compile('<regex>(.+?)</regex>', re.MULTILINE|re.DOTALL).findall(regexs)
        regexs = [re.compile('<(.+?)>(.*?)</.+?>', re.MULTILINE|re.DOTALL).findall(i) for i in regexs]

        regexs = [dict([(client.replaceHTMLCodes(x[0]), client.replaceHTMLCodes(urllib.unquote_plus(x[1]))) for x in i]) for i in regexs]
        regexs = [(i['name'], i) for i in regexs]
        regexs = dict(regexs)

        url = regex.split('<regex>', 1)[0].strip()
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')

        r = getRegexParsed(regexs, url)

        try:
            ln = ''
            ret = r[1]
            listrepeat = r[2]['listrepeat']
            regexname = r[2]['name']

            for obj in ret:
                try:
                    item = listrepeat
                    for i in range(len(obj)+1):
                        item = item.replace('[%s.param%s]' % (regexname, str(i)), obj[i-1])

                    item2 = vanilla
                    for i in range(len(obj)+1):
                        item2 = item2.replace('[%s.param%s]' % (regexname, str(i)), obj[i-1])

                    item2 = re.compile('(<regex>.+?</regex>)', re.MULTILINE|re.DOTALL).findall(item2)
                    item2 = [x for x in item2 if not '<name>%s</name>' % regexname in x]
                    item2 = ''.join(item2)

                    ln += '\n<item>%s\n%s</item>\n' % (item, item2)
                except:
                    pass

            return ln
        except:
            pass

        if r[1] == True:
            return r[0]
    except:
        return
Example #3
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
        try:
            r = 'search/tvdb/%s?type=show&extended=full' % tvdb
            r = json.loads(trakt.getTrakt(r))
            if not r: return '0'

            d = r[0]['show']['genres']
            if not ('anime' in d or 'animation' in d): return '0'

            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = self.search_link % (urllib.quote_plus(tvshowtitle))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = client.parseDOM(r, 'ol', attrs = {'id': 'searchresult'})[0]
            r = client.parseDOM(r, 'h2')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in r]
            r = [i for i in r if t == cleantitle.get(i[1])]
            r = r[-1][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #4
0
    def movie(self, imdb, title, localtitle, year):
        try:
            t = 'http://www.imdb.com/title/%s' % imdb
            t = client.request(t, headers={'Accept-Language': 'ar-AR'})
            t = client.parseDOM(t, 'title')[0]
            t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()

            q = self.search_link % urllib.quote_plus(t)
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = client.parseDOM(r, 'div', attrs={'class': 'item'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'span', attrs={'class': 'tt'}),
                  client.parseDOM(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [
                i[0] for i in r
                if cleantitle.get(t) == cleantitle.get(i[1]) and year == i[2]
            ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass
    def movie(self, imdb, title, localtitle, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)

            q = urlparse.urljoin(self.base_link, url)

            r = proxy.geturl(q)
            if not r == None: return url

            t = cleantitle.get(title)

            q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))
            r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1]))
                 for i in r]
            r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]])
                 for i in r]
            r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = re.findall('(?://.+?|)(/.+)', r[0])[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #6
0
    def movie(self, imdb, title, localtitle, year):
        try:
            query = urlparse.urljoin(self.base_link, self.search_link)
            query = query % urllib.quote_plus(title)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a',
                                  ret='title'), re.findall('(\d{4})', i))
                 for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]

            r = [
                i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]
            ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #7
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        if url == None: return

        url = '/%s/%01d-sezon/%01d-bolum' % (url.replace('/', ''), int(season), int(episode))
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
Example #8
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'tv_episode_item')
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'tv_episode_item'})

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                premiered)[0]
            premiered = '%s %01d %s' % (premiered[1].replace(
                '01', 'January').replace('02', 'February').replace(
                    '03', 'March').replace('04', 'April').replace(
                        '05', 'May').replace('06', 'June').replace(
                            '07', 'July').replace('08', 'August').replace(
                                '09',
                                'September').replace('10', 'October').replace(
                                    '11', 'November').replace(
                                        '12', 'December'), int(
                                            premiered[2]), premiered[0])

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_episode_name'}),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_num_versions'}))
                      for i in result]
            result = [
                (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0
            ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [
                (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0
            ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [
                i for i in result
                if title == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1:
                url = [
                    i for i in result if 'season-%01d-episode-%01d' %
                    (int(season), int(episode)) in i[0]
                ]

            url = url[0][0]
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            t = cleantitle.get(tvshowtitle)

            u = urlparse.urljoin(self.base_link, self.search_link)

            p = {'q': tvshowtitle.rsplit(':', 1)[0], 'limit': '10', 'timestamp': int(time.time() * 1000), 'verifiedCheck': ''}
            p = urllib.urlencode(p)

            r = client.request(u, post=p, XHR=True)
            r = json.loads(r)

            r = [i for i in r if i['meta'].strip().split()[0].lower() == 'tv']
            r = [i['permalink'] for i in r if t == cleantitle.get(i['title'])][:2]
            r = [(i, urlparse.urljoin(self.base_link, i)) for i in r]
            r = [(i[0], client.request(i[1])) for i in r]
            r = [(i[0], i[1]) for i in r if not i[1] == None]
            r = [(i[0], re.sub('\s|<.+?>|</.+?>', '', i[1])) for i in r]
            r = [(i[0], re.findall('eleased:(\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0]) for i in r if i[1]]
            r = [i for i in r if year in i[1]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #10
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'tv_episode_item'})

            title = cleantitle.get(title)

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i,
                                       'span',
                                       attrs={'class': 'tv_episode_name'}),
                       re.compile('(\d{4}-\d{2}-\d{2})').findall(i))
                      for i in result]
            result = [
                (i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0
            ] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
            result = [
                (i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0
            ] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
            result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]

            url = [
                i for i in result
                if title == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if len(url) == 0: url = [i for i in result if premiered == i[2]]
            if len(url) == 0 or len(url) > 1:
                url = [
                    i for i in result if 'season-%01d-episode-%01d' %
                    (int(season), int(episode)) in i[0]
                ]

            url = client.replaceHTMLCodes(url[0][0])
            url = proxy.parse(url)
            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                if 'tvshowtitle' in data:
                    url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                    year = re.findall('(\d{4})', data['premiered'])[0]

                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    r = client.request(url)

                    y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
                    y = re.findall('(\d{4})', y)[0]
                    if not y == year: raise Exception()

                else:
                    url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])

                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    r = client.request(url)

            else:
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)


            r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,.+?label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)

            for i in r:
                try:
                    if '1080' in i[1]: quality = '1080p'
                    elif '720' in i[1]: quality = 'HD'
                    else: raise Exception()

                    url = i[0].replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    if not '.php' in i[0]: raise Exception()
                    url = url.encode('utf-8')

                    sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #12
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query)

            result = str(proxy.request(query, 'free movies'))
            if 'page=2' in result or 'page%3D2' in result:
                result += str(proxy.request(query + '&page=2', 'free movies'))

            result = client.parseDOM(result, 'div', attrs={'class': 'item'})

            tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]

            result = [(client.parseDOM(i, 'a', ret='href'),
                       client.parseDOM(i, 'a', ret='title')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if any(x in i[1] for x in years)]

            r = [(proxy.parse(i[0]), i[1]) for i in result]

            match = [
                i[0] for i in r
                if tvshowtitle == cleantitle.get(i[1]) and '(%s)' %
                str(year) in i[1]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if len(match) > 0:
                        url = match[0]
                        break
                    r = proxy.request(urlparse.urljoin(self.base_link, i),
                                      'free movies')
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'link_ite')

            links = client.parseDOM(result,
                                    'table',
                                    attrs={'class': 'link_ite.+?'})

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')
                    url = [x for x in url if 'gtfo' in x][-1]
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['gtfo'][0]
                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i,
                                              'div',
                                              attrs={'class': 'quality'})
                    if any(x in ['[CAM]', '[TS]'] for x in quality):
                        quality = 'CAM'
                    else:
                        quality = 'SD'
                    quality = quality.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #14
0
    def movie(self, imdb, title, localtitle, year):
        try:
            url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(title),
                                 year)

            url = client.request(url, output='geturl')

            if url == None: raise Exception()

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass

        try:
            t = cleantitle.get(title)

            q = '%s %s' % (title, year)
            q = self.search_link.decode('base64') % urllib.quote_plus(q)

            r = client.request(q, error=True)

            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]
            r = [(i[0],
                  re.findall('(?:^Watch Movie |^Watch |)(.+?)\((\d{4})', i[1]))
                 for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [(urllib.unquote_plus(i[0]), i[1], i[2]) for i in r]
            r = [(urlparse.urlparse(i[0]).path, i[1], i[2]) for i in r]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = re.sub('/watch-movie-|-\d+$', '/', r[0][0].strip())

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            pass
Example #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            h = {'User-Agent': client.agent()}

            r = client.request(url, headers=h, output='extended')

            s = client.parseDOM(r[0], 'ul', attrs={'class': 'episodes'})
            s = client.parseDOM(s, 'a', ret='data.+?')
            s = [
                client.replaceHTMLCodes(i).replace(':', '=').replace(
                    ',', '&').replace('"', '').strip('{').strip('}') for i in s
            ]

            for u in s:
                try:
                    url = '/io/1.0/stream?%s' % u
                    url = urlparse.urljoin(self.base_link, url)

                    r = client.request(url)
                    r = json.loads(r)

                    url = [i['src'] for i in r['streams']]

                    for i in url:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            if url == None: return

            url = '%s/season/%01d/episode/%01d' % (url, int(season), int(episode))
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #17
0
    def movie(self, imdb, title, localtitle, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)
            url = urlparse.urljoin(self.base_link, url)

            url = client.request(url, output='geturl')

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')

            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['url'][0]
                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    if quality == 'quality_cam' or quality == 'quality_ts':
                        quality = 'CAM'
                    elif quality == 'quality_dvd':
                        quality = 'SD'
                    else:
                        raise Exception()

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
            locDict = [i[0] for i in hostDict]

            result = client.request(url)

            links = []

            try:
                r = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0]
                r = client.parseDOM(r, 'iframe', ret='src')[0]
                links += [(r, url)]
            except:
                pass

            try:
                r = client.parseDOM(result, 'div', attrs = {'class': 'generic-video-item'})
                r = [(i.split('</div>', 1)[-1].split()[0], client.parseDOM(i, 'a', ret='href', attrs = {'rel': '.+?'})) for i in r]
                links += [(i[0], i[1][0]) for i in r if i[1]]
            except:
                pass

            for i in links:
                try:
                    try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i[0].strip().lower()).netloc)[0]
                    except: host = i[0].lower()
                    host = host.rsplit('.', 1)[0]
                    if not host in locDict: raise Exception()
                    host = [x[1] for x in hostDict if x[0] == host][0]
                    host = host.encode('utf-8')

                    url = i[1]
                    url = urlparse.urljoin(self.base_link, url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #20
0
def parse(url):
    try:
        url = client.replaceHTMLCodes(url)
    except:
        pass
    try:
        url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
    except:
        pass
    try:
        url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
    except:
        pass
    return url
Example #21
0
    def movie(self, imdb, title, localtitle, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, limit='1')
            r = client.parseDOM(r, 'title')[0]
            if r == '': raise Exception()

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #22
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
        try:
            result = cache.get(self.onlinedizi_tvcache, 120)

            tvshowtitle = cleantitle.get(tvshowtitle)

            result = [i[0] for i in result if tvshowtitle == i[1]][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = proxy.request(url, 'movies')

            links = client.parseDOM(r, 'tr')

            for i in links:
                try:
                    url = re.findall('callvalue\((.+?)\)', i)[0]
                    url = re.findall('(http.+?)(?:\'|\")', url)[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    quality = re.findall('quality(\w+)\.png', i)[0]
                    if quality == 'CAM' in i or quality == 'TS':
                        quality = 'CAM'
                    else:
                        quality = 'SD'

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            filter = [i for i in sources if i['quality'] == 'SD']
            if filter: sources = filter

            return sources
        except:
            return sources
Example #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if url.isdigit(): url = '/watch-%s-online-free-%s.html' % (url, url)

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'ovie')

            quality = re.compile('Quality(.+?)<').findall(result.replace('\n',''))
            quality = quality[0].strip() if quality else 'SD'
            if quality == 'CAM' or quality == 'TS': quality = 'CAM'
            elif quality == 'SCREENER': quality = 'SCR'
            else: quality = 'SD'

            dupes = []
            links = re.findall('\'(.+?)\'', result) + re.findall('\"(.+?)\"', result)
            links = ['http:' + i if not i.startswith('http') else i for i in links]
            links = [proxy.parse(i) for i in links]
            links = [x for y,x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = urlparse.urlparse(url).query
                    url = url.decode('base64')
                    url = re.findall('((?:http|https)://.+?/.+?)(?:&|$)', url)[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in dupes: raise Exception()
                    dupes.append(url)

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = proxy.request(url, 'tv shows')

            links = client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #26
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = '%s/watch/%s-season-%01d-%s.html' % (
                self.base_link, cleantitle.geturl(data['tvshowtitle']),
                int(season), str((int(data['year']) + int(season)) - 1))
            url = client.request(url, output='geturl')
            if url == None: raise Exception()

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url += '?episode=%01d' % int(episode)
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #27
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, year):
        try:
            t = cleantitle.get(tvshowtitle)

            q = '%s %s' % (tvshowtitle, year)
            q = self.search_link.decode('base64') % urllib.quote_plus(q)

            r = client.request(q)

            r = json.loads(r)['results']
            r = [(i['url'], i['titleNoFormatting']) for i in r]

            r = [(i[0], re.findall('(?:^Watch Movie |^Watch |)(.+?)$', i[1]))
                 for i in r]
            r = [(i[0], i[1][0].rsplit('TV Series')[0].strip('(')) for i in r
                 if i[1]]
            r = [(urllib.unquote_plus(i[0]), i[1]) for i in r]
            r = [(urlparse.urlparse(i[0]).path, i[1]) for i in r]
            r = [i for i in r if t == cleantitle.get(i[1])]

            r = urlparse.urljoin(self.base_link, r[0][0].strip())

            if '/watch-movie-' in r: r = re.sub('/watch-movie-|-\d+$', '/', r)

            y = re.findall('(\d{4})', r)

            if y:
                y = y[0]
            else:
                y = client.request(r)
                y = re.findall('(?:D|d)ate\s*:\s*(\d{4})', y)[0]

            if not year == y: raise Exception()

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #28
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'span', attrs={'itemprop': 'name'}),
                  re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2])
                 for i in r if i[1]] + [(i[0], None, i[2])
                                        for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0])
                 for i in r if i[2]] + [(i[0], i[1], None)
                                        for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [
                i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url:
                url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception()

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #29
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            tv_maze = tvmaze.tvMaze()
            num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode))
            num = str(num)

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(r, 'tr', attrs = {'class': ''})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [i[0] for i in r if num == i[1]][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Example #30
0
 def searchMovie(self, title, year):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(
             self.base_link, self.moviesearch_link %
             (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url)
         t = cleantitle.get(title)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'),
               client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r
              if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [
             i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2]
         ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return