예제 #1
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language': 'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
예제 #2
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            for alias in aliases:
                url = '%s/full-movie/%s' % (self.base_link,
                                            cleantitle.geturl(alias['title']))
                url = client.request(url,
                                     headers=headers,
                                     output='geturl',
                                     timeout='10')
                if not url == None and url != self.base_link: break
            if url == None:
                for alias in aliases:
                    url = '%s/full-movie/%s-%s' % (self.base_link,
                                                   cleantitle.geturl(
                                                       alias['title']), year)
                    url = client.request(url,
                                         headers=headers,
                                         output='geturl',
                                         timeout='10')
                    if not url == None and url != self.base_link: break

            return url
        except:
            failure = traceback.format_exc()
            print('CartoonHD - Exception: \n' + str(failure))
            return
예제 #3
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
예제 #4
0
    def get_episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            ep_id = "%01dx%01d" % (int(season), int(episode))
            url = self.shows_link % (cleantitle.geturl(title), season, cleantitle.geturl(title), ep_id)
            url = urlparse.urljoin(self.base_link, url)

            url = url.encode('utf-8')
            print("Chillflix shows url", url)
            return url
        except:
            return
예제 #5
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('-', '+')
         u = self.base_link + self.search_link % title
         u = client.request(u)
         i = client.parseDOM(u, "div", attrs={"class": "movies-list"})
         for r in i:
             r = re.compile('<a href="(.+?)"').findall(r)
             for url in r:
                 title = cleantitle.geturl(title).replace("+", "-")
                 if not title in url:
                     continue
                 return url
     except:
         return
예제 #6
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                (cleantitle.geturl(title.replace('\'', '-'))))
            r = client.request(url, timeout='10', headers=headers)
            r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            try:
                match = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and year == i[2]
                ][0]
            except:
                match = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]

            url = re.findall('(?://.+?|)(/.+)', match)[0]
            url = client.replaceHTMLCodes(url)
            return url.encode('utf-8')
        except:
            failure = traceback.format_exc()
            print('XMovies - Exception: \n' + str(failure))
            return
예제 #7
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + '/movies/%s-%s' % (title, year)
         return url
     except:
         return
예제 #8
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
                r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                      dom_parser2.parse_dom(i, 'div', attrs={'class': 'status'})[0]) for i in r if i]
                r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0],
                      re.findall('(\d+)', i[1].content)[0]) for i in r if i]
                r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                     if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(
                        int(season)))]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
예제 #9
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         ctitle = cleantitle.geturl(title)
         url = self.base_link + self.movie_link % (ctitle)
         return url
     except:
         return
예제 #10
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvshowtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_link % tvshowtitle
         return url
     except:
         return
예제 #11
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, year)))
         return url
     except:
         return
예제 #12
0
    def get_movie(self, imdb, title, year):
        try:
            url = self.moviesearch_link % (cleantitle.geturl(title), year)
            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r)
            try:
                r1 = client.parseDOM(r, 'title')[0]
                if not '(%s)' % year in r1: raise Exception()
                return url
            except:
                pass

            title = cleantitle.movie(title)
            years = [
                '(%s)' % str(year),
                '(%s)' % str(int(year) + 1),
                '(%s)' % str(int(year) - 1)
            ]
            print("Y", url, "X ", title, r)

            r = client.parseDOM(r, 'div', attrs={'class': 'item_movie'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'img', ret='alt')[0]) for i in r]
            r = [(i[0][0], i[1], re.findall('(\d{4})', i[1])[0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [i for i in r if title == cleantitle.movie(i[1])]
            r = [i[0] for i in r if any(x in i[1] for x in years)]
            url = re.findall('(//.+?|)(/.+)', r[0])[0][1]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
예제 #13
0
    def get_movie(self, imdb, title, year):
        try:
            urls = []
            url = self.moviesearch_link % (cleantitle.geturl(title), year)
            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r)
            posts = client.parseDOM(r, 'div', attrs = {'class': 'post'})

            for post in posts:
                extra = False
                tags = client.parseDOM(post, 'a', attrs = {'rel' : 'category tag'})
                for tag in tags:
                    #Make sure it isnt an extra
                    if tag == 'Extras':
                        extra = True
                        break
            
                if extra == False:
                    containerDiv = client.parseDOM(post, 'div', attrs = {'class' : 'posttitle'})

                    if not containerDiv:
                        containerDiv = client.parseDOM(post, 'div', attrs = {'class' : 'expandposttitle'})

                    href = client.parseDOM(containerDiv, 'a', ret='href')[0]
                    title = client.parseDOM(containerDiv,'a', ret='title')[0]
                    href = href.encode('utf-8')
                    title = title.encode('utf-8')
                    urls.append({'url' : href, 'title' : title})
                    
            return urls
        except Exception as e:
            control.log('wrzcraft error')
            control.log(e)
            return
예제 #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            year = url['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = client.request(url, headers=h)
            r = BeautifulSoup(r, 'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = client.request(r, headers=h)
            r = BeautifulSoup(r, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class': 'movieplay'}).find('iframe')['src']
            if not quality in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                            'debridonly': False})
            return sources
        except:
            return sources
예제 #15
0
    def get_movie(self, imdb, title, year):
        try:
            if control.setting('alluc_user'):
                if control.setting('realdebrid_token') or control.setting(
                        'premiumize_user'):
                    self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s+%s'
                else:
                    self.moviesearch_link = '/api/search/stream/?user=%s&password=%s&query=%s+%s'

                url = self.moviesearch_link % (control.setting(
                    'alluc_user'), control.setting('alluc_password'),
                                               cleantitle.geturl(title), year)
                r = urlparse.urljoin(self.base_link, url)
                r = r + "+%23newlinks"
                r = client.request(r)
                r1 = json.loads(r)

                for item in r1['result']:
                    if len(item['hosterurls']) == 1 and 'en' in item['lang']:
                        tmp = item['hosterurls'][0]['url']
                        tmp = client.replaceHTMLCodes(tmp)
                        tmp = tmp.encode('utf-8')
                        title = item['title'].encode('utf-8')
                        self.stream_url.append({
                            'url': tmp,
                            'hoster': item['hostername'],
                            'title': title
                        })
            return self.stream_url
        except Exception as e:
            control.log(e)
            return
예제 #16
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(tvshowtitle)
         url = clean_title
         return url
     except:
         return
예제 #17
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)

            q = urlparse.urljoin(self.base_link, url)

            r = proxy.geturl(q)
            if not r == None: return url

            t = cleantitle.get(title)

            q = self.search_link_2 % urllib.quote_plus(cleantitle.query(title))
            q = urlparse.urljoin(self.base_link, q)

            r = client.request(q)

            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))
            r = [(i[0], re.findall('(?:\'|\")(.+?)(?:\'|\")', i[1]))
                 for i in r]
            r = [(i[0], [re.findall('(.+?)\((\d{4})', x) for x in i[1]])
                 for i in r]
            r = [(i[0], [x[0] for x in i[1] if x]) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]]

            url = re.findall('(?://.+?|)(/.+)', r[0])[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
예제 #18
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle)
         url = url.replace('-', '+')
         return url
     except:
         return
예제 #19
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('--', '-')
         url = self.base_link + self.search_link % title
         return url
     except:
         return
    def get_show(self,
                 imdb=None,
                 tvdb=None,
                 tvshowtitle=None,
                 year=None,
                 season=None,
                 proxy_options=None,
                 key=None):
        try:
            if control.setting('Provider-%s' % name) == False:
                log('INFO', 'get_show', 'Provider Disabled by User')
                return None
            if self.siteonline == False:
                log('INFO', 'get_show', 'Provider is Offline')
                return None

            try:
                url = cleantitle.geturl(tvshowtitle)
                return url
            except:
                return

        except Exception as e:
            log('ERROR',
                'get_show',
                '%s: %s' % (tvshowtitle, e),
                dolog=self.init)
            return
예제 #21
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url[
                'episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(
                    url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(
                    self.base_link,
                    self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'),
                       client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
예제 #22
0
 def movie(self, imdb, title, localtitle, aliases, year):
     if 1:  # try:
         clean_title = cleantitle.geturl(title).replace('-', '%20')
         url = urlparse.urljoin(
             self.base_link,
             (self.search_link % (clean_title)
              )) + '$$$$$' + title + '$$$$$' + year + '$$$$$' + 'movie'
         return url
예제 #23
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            title = cleantitle.geturl(title)
            url = self.base_link + self.search_link % (title, year)
            return url
        except Exception:

            return
예제 #24
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         title = title.replace('-', '+')
         query = '%s+%s' % (title, year)
         url = self.base_link % query
         return url
     except:
         return
예제 #25
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + self.search_movie % title
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('GoldMovies - Exception: \n' + str(failure))
         return
예제 #26
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         url = urlparse.urljoin(self.base_link,
                                (self.search_link % (clean_title, year)))
         return url
     except:
         failure = traceback.format_exc()
         print('Flixanity - Exception: \n' + str(failure))
         return
예제 #27
0
파일: xmovies_mv.py 프로젝트: mpie/repo
 def get_movie(self, imdb, title, year):
     try:
         url = self.moviesearch_link % (cleantitle.geturl(title.replace('\'', '-')), year)
         r = urlparse.urljoin(self.base_link, url)
         r = client.request(r, limit='1')
         r = client.parseDOM(r, 'title')[0]
         if not '(%s)' % year in r: raise Exception()
         return url
     except:
         return
예제 #28
0
    def __search(self, search_url, title, year):
        try:
            url = search_url % cleantitle.geturl(title)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1', timeout='10')
            r = dom_parser.parse_dom(r, 'title')[0].content
            return url if year in r else None
        except:
            pass
예제 #29
0
 def get_movie(self, imdb, title, year):
     try:
         url = self.moviesearch_link % (cleantitle.geturl(
             title.replace('\'', '-')), year)
         r = urlparse.urljoin(self.base_link, url)
         r = client.request(r, limit='1')
         r = client.parseDOM(r, 'title')[0]
         if not '(%s)' % year in r: raise Exception()
         return url
     except:
         return
예제 #30
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title).replace('-', '+').replace(
             ': ', '+')
         url = urlparse.urljoin(self.base_link,
                                self.search_link % clean_title).lower()
         url = {'url': url, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         return
예제 #31
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            query = ('%s-%s' % (clean_title, year))
            url = urlparse.urljoin(self.base_link, query)
            response = client.request(url)

            url = re.findall('''<a\s*href=['\"](http://www\.buzzmovie\.site/\?p=\d+)''', response)[0]

            return url
        except Exception:
            return
예제 #32
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         q = cleantitle.geturl(title)
         q2 = q.replace('-', '+')
         url = self.base_link + self.search_link % q2
         r = client.request(url)
         match = re.compile('<div class="title"><a href="(.+?)">' + title +
                            '</a></div>').findall(r)
         for url in match:
             return url
     except:
         return
예제 #33
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         q = cleantitle.geturl(title)
         q2 = q.replace('-', '+')
         url = self.base_link + self.search_link % q2
         r = client.request(url)
         match = re.compile('<a class="title" href="(.+?)-' + q + '\.html"').findall(r)
         for url in match:
             url = '%s-%s.html' % (url, q)
             return url
     except:
         return
예제 #34
0
    def get_movie(self, imdb, title, year):
        try:
            url = self.search_link % (cleantitle.geturl(title), year)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, limit='1')
            r = client.parseDOM(r, 'title')[0]
            if r == '': raise Exception()

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
예제 #35
0
파일: 1movies_mv_tv.py 프로젝트: mpie/repo
 def searchMovie(self, title, year):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url, timeout='10')
         t = cleantitle.get(title)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
예제 #36
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        #            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'MoviesHD', 'url': i['url']})
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                imdb = data['imdb'];
                year = data['year']

                if 'tvshowtitle' in data:
                    url = '%s/tv-show/%s/season/%01d/episode/%01d' % (
                    self.base_link, cleantitle.geturl(title).replace('+','-'), int(data['season']), int(data['episode']))
                else:
                    url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title).replace('+','-'))

                result = client.request(url, limit='5')

                if result == None and not 'tvshowtitle' in data:
                    url += '-%s' % year
                    result = client.request(url, limit='5')

                result = client.parseDOM(result, 'title')[0]

                if '%TITLE%' in result: raise Exception()

                r = client.request(url, output='extended')

                if not imdb in r[0]: raise Exception()


            else:
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url, output='extended')

            cookie = r[4];
            headers = r[3];
            result = r[0]

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie
            headers['Referer'] = url

            u = '/ajax/tnembeds.php'
            self.base_link = client.request(self.base_link, output='geturl')
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            post = urllib.urlencode(post)

            r = client.request(u, post=post, XHR=True)
            r = str(json.loads(r))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    sources.append(
                        {'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i, 'provider': 'MoviesHD'})
                except:
                    pass

            return sources

        except Exception as e:
            control.log('ERROR moviesHD %s' % e)
            return sources
예제 #37
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            choice = random.choice(self.random_link)
            base_link = 'http://%s' % choice
            strm_link = 'http://play.%s' % choice + '/grabber-api/episode/%s?token=%s'

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                if 'tvshowtitle' in data:
                    url = '/tv-series/%s-season-%01d/watch/' % (cleantitle.geturl(title), int(data['season']))
                    year = str((int(data['year']) + int(data['season'])) - 1)
                    episode = '%01d' % int(data['episode'])

                else:
                    url = '/movie/%s/watch' % cleantitle.geturl(title)
                    year = data['year']
                    episode = None
                url = url.replace('+','-')
                url = urlparse.urljoin(base_link, url)
                referer = url

                r = client.request(url)

                y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

                if not year == y: raise Exception()
            else:
                try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
                except: episode = None

                url = urlparse.urljoin(base_link, url)
                url = re.sub('/watch$', '', url.strip('/')) + '/watch/'
                referer = url

                r = client.request(url)

            r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')

                    t = re.findall('player_type\s*:\s*"(.+?)"', p)[0]
                    if t == 'embed': raise Exception()

                    s = client.parseDOM(p, 'input', ret='value', attrs = {'name': 'episodeID'})[0]
                    t = ''.join(random.sample(string.digits + string.ascii_uppercase + string.ascii_lowercase, 8))
                    k = hashlib.md5('!@#$%^&*(' + s + t).hexdigest()
                    v = hashlib.md5(t + referer + s).hexdigest()

                    stream = strm_link % (s, t)
                    cookie = '%s=%s' % (k, v)

                    u = client.request(stream, referer=referer, cookie=cookie, timeout='10')

                    u = json.loads(u)['playlist'][0]['sources']
                    u = [i['file'] for i in u if 'file' in i]

                    for i in u:
                        try:
                            sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Movie25', 'url': i})
                            #sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url})
                        except: pass
                except:
                    pass

            return sources
        except Exception as e:
            control.log('ERROR movie25 %s' % e)
            return sources
예제 #38
0
파일: watch5s_mv_tv.py 프로젝트: mpie/repo
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            headers = {}
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            if 'tvshowtitle' in data:
                url = '/tv-series/%s-season-%01d/watch/' % (cleantitle.geturl(title), int(data['season']))
                year = str((int(data['year']) + int(data['season'])) - 1)
                episode = '%01d' % int(data['episode'])

            else:
                url = '/movie/%s/watch/' % cleantitle.geturl(title)
                year = data['year']
                episode = None

            url = urlparse.urljoin(self.base_link, url)
            referer = url

            r = client.request(url)

            y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

            if not year == y: raise Exception()


            r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')

                    t = re.findall('player_type\s*:\s*"(.+?)"', p)[0]
                    if t == 'embed': raise Exception()

                    episodeId = client.parseDOM(p, 'input', ret='value', attrs = {'name': 'episodeID'})[0]
                    js = json.loads(client.request(self.token_link,post=urllib.urlencode({'id': episodeId}), referer=referer, timeout='10'))
                    hash = js['hash']
                    token = js['token']
                    _ = js['_']
                    url = self.grabber_link % (episodeId, hash, token, _)
                    u = client.request(url, referer=referer, timeout='10')
                    js = json.loads(u)

                    try:
                        u = js['playlist'][0]['sources']
                        u = [i['file'] for i in u if 'file' in i]

                        for i in u:
                            try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'watch5s', 'url': i, 'direct': True, 'debridonly': False})
                            except: pass
                    except:
                        pass

                    try:
                        u = js['backup']
                        u = urlparse.parse_qs(urlparse.urlsplit(u).query)
                        u = dict([(i, u[i][0]) if u[i] else (i, '') for i in u])
                        eid = u['eid']
                        mid = u['mid']
                        p = client.request(self.backup_token_link % (eid, mid, _), XHR=True, referer=referer, timeout='10')
                        x = re.search('''_x=['"]([^"']+)''', p).group(1)
                        y = re.search('''_y=['"]([^"']+)''', p).group(1)
                        u = client.request(self.backup_link % (eid, x, y), referer=referer, XHR=True, timeout='10')
                        js = json.loads(u)
                        try:
                            u = js['playlist'][0]['sources']
                            u = [i['file'] for i in u if 'file' in i]

                            for i in u:
                                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'watch5s', 'url': i, 'direct': True, 'debridonly': False})
                                except: pass
                        except:
                            pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
예제 #39
0
    def get_movie(self, imdb, title, year):
        try:
            if control.setting('alluc_user'):
                if control.setting('realdebrid_token') or control.setting('premiumize_user'):
                    self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s+%s'
                else:
                    self.moviesearch_link = '/api/search/stream/?user=%s&password=%s&query=%s+%s'
                
                url = self.moviesearch_link % (control.setting('alluc_user'), control.setting('alluc_password'),cleantitle.geturl(title), year)
                r = urlparse.urljoin(self.base_link, url)
                r = r + "+%23newlinks"
                r = client.request(r)
                r1 = json.loads(r)

                for item in r1['result']:
                    if len(item['hosterurls']) == 1 and 'en' in item['lang']:
                        tmp = item['hosterurls'][0]['url']
                        tmp = client.replaceHTMLCodes(tmp)
                        tmp = tmp.encode('utf-8')
                        title = item['title'].encode('utf-8')
                        self.stream_url.append({'url': tmp, 'hoster': item['hostername'], 'title': title })
            return self.stream_url
        except Exception as e: 
            control.log(e)
            return