예제 #1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         u = client.parseDOM(r, "div", attrs={"id": "lnk list-downloads"})
         for t in u:
             r = client.parseDOM(t, 'a', ret='href')
             for url in r:
                 if '2160p' in url:
                     quality = '2160p'
                 elif '1080p' in url:
                     quality = '1080p'
                 elif '720p' in url:
                     quality = '720p'
                 else:
                     quality = 'SD'
                 url = url.split('php?')[1]
                 print url
                 sources.append(
                     {'source': 'Direct', 'quality': quality, 'language': 'en', 'url': url, 'direct': True,
                      'debridonly': False})
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('GoldMovies - Exception: \n' + str(failure))
         return
예제 #2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            if url == None: return
            sources = []

            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            html = client.request(url, headers=headers)

            vidpage = re.compile('id="tab-movie".+?data-file="(.+?)"', re.DOTALL).findall(html)

            for link in vidpage:
                if 'trailer' not in link.lower():
                    link = urlparse.urljoin(self.base_link, link)
                    sources.append({'source': 'DirectLink', 'quality': 'SD', 'language': 'en', 'url': link, 'info': [],
                                    'direct': True, 'debridonly': False})
            other_links = re.findall('data-url="(.+?)"', html)
            for link in other_links:
                if link.startswith('//'):
                    link = 'http:' + link
                    sources.append({'source': 'DirectLink', 'quality': 'SD', 'language': 'en', 'url': link, 'info': [],
                                    'direct': False, 'debridonly': False})
                else:
                    sources.append({'source': 'DirectLink', 'quality': 'SD', 'language': 'en', 'url': link, 'info': [],
                                    'direct': False, 'debridonly': False})

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('BobMovies - Exception: \n' + str(failure))
            return
예제 #3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile(
                 'iframe id="odbIframe" src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.', '')
                 host = host.split('/')[0].lower()
                 sources.append({
                     'source': host,
                     'quality': 'HD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except Exception:
             failure = traceback.format_exc()
             log_utils.log('ODB - Exception: \n' + str(failure))
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return sources
     return sources
예제 #4
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = imdb
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
예제 #5
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         data = {'tvshowtitle': tvshowtitle, 'year': year}
         return urllib.urlencode(data)
     except:
         failure = traceback.format_exc()
         log_utils.log('Best-Moviez - Exception: \n' + str(failure))
         return
예제 #6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = self.base_link + self.movie_link % imdb
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
예제 #7
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + self.search_movie % title
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('GoldMovies - Exception: \n' + str(failure))
         return
예제 #8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('2DDL - Exception: \n' + str(failure))
         return
예제 #9
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         imdb = url
         url = self.base_link + self.tv_link % (imdb, season, episode)
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('ODB - Exception: \n' + str(failure))
         return
예제 #10
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

            return urllib.urlencode(data)
        except:
            failure = traceback.format_exc()
            log_utils.log('Best-Moviez - Exception: \n' + str(failure))
            return
예제 #11
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         url = url + '-season-%s-episode-%s/' % (season, episode)
         print url
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('GoldMovies - Exception: \n' + str(failure))
         return
예제 #12
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         return urllib.urlencode({
             'imdb': imdb,
             'title': title,
             'localtitle': localtitle,
             'year': year
         })
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return
예제 #13
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            scrape = cleantitle.get_simple(title)
            google = '%s%s' % (self.goog, scrape.replace(' ', '+'))
            get_page = requests.get(google).content
            log_utils.log('Scraper bobmovies - Movie - title: ' + str(title))
            log_utils.log('Scraper bobmovies - Movie - search_id: ' + str(scrape))

            match = re.compile('<a href="(.+?)"', re.DOTALL).findall(get_page)
            for url1 in match:
                if '/url?q=' in url1:
                    if self.base_link in url1 and 'google' not in url1:
                        url2 = url1.split('/url?q=')[1]
                        url2 = url2.split('&amp')[0]
                        headers = {
                            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
                        html = requests.get(url2, headers=headers, timeout=5).content
                        results = re.compile(
                            '<div class="page_film_top full_film_top">.+?<h1>(.+?)</h1>.+?<td class="name">Quality:</td><td><a href=.+?">(.+?)</a>.+?<td class="name">Year:</td><td><a href=.+?">(.+?)</a>',
                            re.DOTALL).findall(html)
                        for item_title, qual, date in results:
                            if not scrape == cleantitle.get_simple(item_title):
                                continue
                            if not year in date:
                                continue
                            log_utils.log('Scraper bobmovies - Movie - url2: ' + str(url2))
                            return url2
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('BobMovies - Exception: \n' + str(failure))
            return
예제 #14
0
 def get(self, netloc, ua, timeout):
     try:
         self.netloc = netloc
         self.ua = ua
         self.timeout = timeout
         self.cookie = None
         self._get_cookie(netloc, ua, timeout)
         if self.cookie is None:
             log_utils.log('%s returned an error. Could not collect tokens.' % netloc, log_utils.LOGDEBUG)
         return self.cookie
     except Exception as e:
         log_utils.log('%s returned an error. Could not collect tokens - Error: %s.' % (netloc, str(e)),
                       log_utils.LOGDEBUG)
         return self.cookie
예제 #15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['title'], url['premiered'], url['season'], url[
                'episode'] = title, premiered, season, episode
            url = urllib.urlencode(url)
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('2DDL - Exception: \n' + str(failure))
            return
예제 #16
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url.update({
             'premiered': premiered,
             'season': season,
             'episode': episode
         })
         return urllib.urlencode(url)
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return
예제 #17
0
def resolver(url, debrid):
    try:
        debrid_resolver = [
            resolver for resolver in debrid_resolvers
            if resolver.name == debrid
        ][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)

        return stream_url
    except Exception as e:
        log_utils.log('%s Resolve Failure: %s' % (debrid, e),
                      log_utils.LOGWARNING)
        return None
예제 #18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            headers = {'User-Agent': client.randomagent()}
            html = client.request(url, headers=headers)

            Links = re.compile('id="link_.+?target="_blank" id="(.+?)"', re.DOTALL).findall(html)
            for vid_url in Links:
                if 'openload' in vid_url:
                    try:
                        source_html = client.request(vid_url, headers=headers)
                        source_string = re.compile('description" content="(.+?)"', re.DOTALL).findall(source_html)[0]
                        quality, info = source_utils.get_release_quality(source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append(
                        {'source': 'Openload', 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                         'direct': False, 'debridonly': False})
                elif 'streamango' in vid_url:
                    try:
                        source_html = client.request(vid_url, headers=headers)
                        source_string = re.compile('description" content="(.+?)"', re.DOTALL).findall(source_html)[0]
                        quality, info = source_utils.get_release_quality(source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append(
                        {'source': 'Streamango', 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                         'direct': False, 'debridonly': False})
                else:
                    if resolveurl.HostedMediaFile(vid_url):
                        quality, info = source_utils.get_release_quality(vid_url, vid_url)
                        host = vid_url.split('//')[1].replace('www.', '')
                        host = host.split('/')[0].split('.')[0].title()
                        sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                             'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return sources
예제 #19
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search_id = title.replace(':', ' ').replace(' ', '+').lower()
            start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id))

            search_results = client.request(start_url)
            match = re.compile('<header>.+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(search_results)
            for item_url, item_title in match:
                movie_name, movie_year = re.findall("(.*?)(\d+)", item_title)[0]
                if not cleantitle.get(title) == cleantitle.get(movie_name):
                    continue
                if not year in movie_year:
                    continue
                return item_url
        except:
            failure = traceback.format_exc()
            log_utils.log('HDPopcorn - Exception: \n' + str(failure))
            return
예제 #20
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search_id = title.lower().replace(':', ' ').replace('-', ' ')

            start_url = urlparse.urljoin(self.base_link, (self.search_link % (search_id.replace(' ', '%20'))))

            headers = {'User-Agent': client.randomagent()}
            html = client.request(start_url, headers=headers)

            match = re.compile('<span class="name"><a title="(.+?)" href="(.+?)".+?title="(.+?)"', re.DOTALL).findall(
                html)
            for name, item_url, link_year in match:
                if year in link_year:
                    if cleantitle.get(title) in cleantitle.get(name):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return
예제 #21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                Links = client.parseDOM(post, 'enclosure', ret='url')
                if not len(Links) == None:
                    for vid_url in Links:
                        quality, info = source_utils.get_release_quality(url, vid_url)
                        host = vid_url.split('//')[1].replace('www.', '')
                        host = host.split('/')[0].lower()
                        sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info,
                             'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Best-Moviez - Exception: \n' + str(failure))
            return sources
예제 #22
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(search)))
         log_utils.log('shit Returned: %s' % str(url), log_utils.LOGNOTICE)
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except:
         return
예제 #23
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     if url == None: return
     try:
         OPEN = client.request(url)
         headers = {'Origin': 'http://hdpopcorns.co', 'Referer': url,
                    'X-Requested-With': 'XMLHttpRequest',
                    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
         try:
             params = re.compile('FileName1080p.+?value="(.+?)".+?FileSize1080p.+?value="(.+?)".+?value="(.+?)"',
                                 re.DOTALL).findall(OPEN)
             for param1, param2, param3 in params:
                 request_url = '%s/select-movie-quality.php' % (self.base_link)
                 form_data = {'FileName1080p': param1, 'FileSize1080p': param2, 'FSID1080p': param3}
             link = client.request(request_url, post=form_data, headers=headers, timeout=3)
             final_url = re.compile('<strong>1080p</strong>.+?href="(.+?)"', re.DOTALL).findall(link)[0]
             sources.append(
                 {'source': 'DirectLink', 'quality': '1080p', 'language': 'en', 'url': final_url, 'direct': True,
                  'debridonly': False})
         except:
             pass
         try:
             params = re.compile('FileName720p.+?value="(.+?)".+?FileSize720p".+?value="(.+?)".+?value="(.+?)"',
                                 re.DOTALL).findall(OPEN)
             for param1, param2, param3 in params:
                 request_url = '%s/select-movie-quality.php' % (self.base_link)
                 form_data = {'FileName720p': param1, 'FileSize720p': param2, 'FSID720p': param3}
             link = client.request(request_url, post=form_data, headers=headers, timeout=3)
             final_url = re.compile('<strong>720p</strong>.+?href="(.+?)"', re.DOTALL).findall(link)[0]
             sources.append(
                 {'source': 'DirectLink', 'quality': '720p', 'language': 'en', 'url': final_url, 'direct': True,
                  'debridonly': False})
         except:
             pass
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('HDPopcorn - Exception: \n' + str(failure))
         return sources
예제 #24
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            scrape = title.lower().replace(' ', '+').replace(':', '')

            start_url = self.search_link % (self.goog, scrape, year)
            html = client.request(start_url)
            results = re.compile('href="(.+?)"', re.DOTALL).findall(html)
            for url in results:
                if self.base_link in url:
                    if 'webcache' in url:
                        continue
                    if cleantitle.get(title) in cleantitle.get(url):
                        chkhtml = client.request(url)
                        chktitle = re.compile('<title>(.+?)</title>', re.DOTALL).findall(chkhtml)[0]
                        if cleantitle.get(title) in cleantitle.get(chktitle):
                            if year in chktitle:
                                url = url.replace('bobmovies.us', 'bobmovies.net')
                                return url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('BobMovies - Exception: \n' + str(failure))
            return
예제 #25
0
def __getTrakt(url, post=None):
    try:
        url = urlparse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2}

        if getTraktCredentialsInfo():
            headers.update({'Authorization': 'Bearer %s' % control.setting('trakt.token')})

        result = client.request(url, post=post, headers=headers, output='extended', error=True)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in ['500', '502', '503', '504', '520', '521', '522', '524']:
            log_utils.log('Temporary Trakt Error: %s' % resp_code, log_utils.LOGWARNING)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % resp_code, log_utils.LOGWARNING)
            return
        elif resp_code in ['429']:
            log_utils.log('Trakt Rate Limit Reached: %s' % resp_code, log_utils.LOGWARNING)
            return

        if resp_code not in ['401', '405']:
            return result, resp_header

        oauth = urlparse.urljoin(BASE_URL, '/oauth/token')
        opost = {'client_id': V2_API_KEY, 'client_secret': CLIENT_SECRET, 'redirect_uri': REDIRECT_URI,
                 'grant_type': 'refresh_token', 'refresh_token': control.setting('trakt.refresh')}

        result = client.request(oauth, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result['access_token'], result['refresh_token']

        control.setSetting(id='trakt.token', value=token)
        control.setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        result = client.request(url, post=post, headers=headers, output='extended', error=True)
        return result[0], result[2]
    except Exception as e:
        log_utils.log('Unknown Trakt Error: %s' % e, log_utils.LOGWARNING)
        pass
예제 #26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = client.request(url)
            posts = client.parseDOM(html, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'a', ret='href')
                    s = re.search(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                        post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '',
                        name,
                        flags=re.I)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('2DDL - Exception: \n' + str(failure))
            return sources
예제 #27
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         content_type = 'episode' if 'tvshowtitle' in data else 'movie'
         years = (data['year'], str(int(data['year']) + 1),
                  str(int(data['year']) - 1))
         if content_type == 'movie':
             title = cleantitle.get(data['title'])
             localtitle = cleantitle.get(data['localtitle'])
             ids = [data['imdb']]
             r = control.jsonrpc(
                 '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title", "originaltitle", "file"]}, "id": 1}'
                 % years)
             r = unicode(r, 'utf-8', errors='ignore')
             r = json.loads(r)['result']['movies']
             r = [
                 i for i in r if str(i['imdbnumber']) in ids or title in [
                     cleantitle.get(i['title'].encode('utf-8')),
                     cleantitle.get(i['originaltitle'].encode('utf-8'))
                 ]
             ]
             r = [
                 i for i in r
                 if not i['file'].encode('utf-8').endswith('.strm')
             ][0]
             r = control.jsonrpc(
                 '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"properties": ["streamdetails", "file"], "movieid": %s }, "id": 1}'
                 % str(r['movieid']))
             r = unicode(r, 'utf-8', errors='ignore')
             r = json.loads(r)['result']['moviedetails']
         elif content_type == 'episode':
             title = cleantitle.get(data['tvshowtitle'])
             localtitle = cleantitle.get(data['localtvshowtitle'])
             season, episode = data['season'], data['episode']
             ids = [data['imdb'], data['tvdb']]
             r = control.jsonrpc(
                 '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties": ["imdbnumber", "title"]}, "id": 1}'
                 % years)
             r = unicode(r, 'utf-8', errors='ignore')
             r = json.loads(r)['result']['tvshows']
             r = [
                 i for i in r if str(i['imdbnumber']) in ids or title in [
                     cleantitle.get(i['title'].encode('utf-8')),
                     cleantitle.get(i['originaltitle'].encode('utf-8'))
                 ]
             ][0]
             r = control.jsonrpc(
                 '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "season", "operator": "is", "value": "%s"}, {"field": "episode", "operator": "is", "value": "%s"}]}, "properties": ["file"], "tvshowid": %s }, "id": 1}'
                 % (str(season), str(episode), str(r['tvshowid'])))
             r = unicode(r, 'utf-8', errors='ignore')
             r = json.loads(r)['result']['episodes']
             r = [
                 i for i in r
                 if not i['file'].encode('utf-8').endswith('.strm')
             ][0]
             r = control.jsonrpc(
                 '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"properties": ["streamdetails", "file"], "episodeid": %s }, "id": 1}'
                 % str(r['episodeid']))
             r = unicode(r, 'utf-8', errors='ignore')
             r = json.loads(r)['result']['episodedetails']
         url = r['file'].encode('utf-8')
         try:
             quality = int(r['streamdetails']['video'][0]['width'])
         except:
             quality = -1
         if quality >= 2160: quality = '4K'
         if quality >= 1440: quality = '1440p'
         if quality >= 1080: quality = '1080p'
         if 720 <= quality < 1080: quality = 'HD'
         if quality < 720: quality = 'SD'
         info = []
         try:
             f = control.openFile(url)
             s = f.size()
             f.close()
             s = '%.2f GB' % (float(s) / 1024 / 1024 / 1024)
             info.append(s)
         except:
             pass
         try:
             e = urlparse.urlparse(url).path.split('.')[-1].upper()
             info.append(e)
         except:
             pass
         info = ' | '.join(info)
         info = info.encode('utf-8')
         sources.append({
             'source': '0',
             'quality': quality,
             'language': 'en',
             'url': url,
             'info': info,
             'local': True,
             'direct': True,
             'debridonly': False
         })
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('Library - Exception: \n' + str(failure))
         return sources
예제 #28
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'
                            # if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': False
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources
예제 #29
0
        'plugin.video.fanfilm/?action=tvshowsToLibrarySilent&url=traktcollection'
    )
    control.execute(
        'RunPlugin(plugin://%s)' %
        'plugin.video.fanfilm/?action=moviesToLibrarySilent&url=traktcollection'
    )


try:
    MediaVersion = control.addon('script.fanfilm.media').getAddonInfo(
        'version')
    AddonVersion = control.addon('plugin.video.fanfilm').getAddonInfo(
        'version')

    log_utils.log(
        '######################### FANFILM ############################',
        log_utils.LOGNOTICE)
    log_utils.log(
        '####### CURRENT FANFILM VERSIONS REPORT ######################',
        log_utils.LOGNOTICE)
    log_utils.log('### FANFILM PLUGIN VERSION: %s ###' % str(AddonVersion),
                  log_utils.LOGNOTICE)
    log_utils.log('### FANFILM MEDIA VERSION: %s ###' % str(MediaVersion),
                  log_utils.LOGNOTICE)
    log_utils.log(
        '###############################################################',
        log_utils.LOGNOTICE)
except:
    log_utils.log(
        '######################### FANFILM ############################',
        log_utils.LOGNOTICE)
예제 #30
0
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False,
            limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [urllib2.ProxyHandler({'http': '%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 8) < sys.version_info < (2, 7, 12):
            try:
                import ssl;
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        _headers = {}
        if headers:
            try:
                _headers.update(headers)
            except:
                log_exception()
        if 'User-Agent' in _headers:
            pass
        elif not mobile == True:
            # headers['User-Agent'] = agent()
            _headers[
                'User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.75 Safari/537.36"
        else:
            _headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if not 'Accept-Language' in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR == True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif not cookie == None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'

        if redirect == False:

            # old implementation
            # class NoRedirection(urllib2.HTTPErrorProcessor):
            #    def http_response(self, request, response): return response

            # opener = urllib2.build_opener(NoRedirection)
            # opener = urllib2.install_opener(opener)

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers, req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try:
                del _headers['Referer']
            except:
                pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)

        try:
            import ssl
            response = urllib2.urlopen(request, timeout=int(timeout), context=ssl._create_unverified_context())
        except urllib2.HTTPError as response:

            if response.code == 503 or ('shinden' in url and response.code == 403):
                s = requests.Session()
                headersok = {'User-Agent': _headers['User-Agent']}
                s.headers = headersok
                scraper = cloudflare6.create_scraper(s)
                if output == 'session':
                    return s
                if output == 'cookie':
                    return scraper.get_cookie_string(url, user_agent=_headers['User-Agent'])[0]
                else:
                    return scraper.get(url).content
            else:
                log_utils.log('Request-Error (%s): %s' % (str(response.code), url), log_utils.LOGDEBUG)
                if error == False: return

        if output == 'cookie':
            try:
                result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            response.close()
            return content

        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            _headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua, timeout)

            result = _basic_request(url, headers=_headers, post=post, timeout=timeout, limit=limit)

        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1]) for item in response.info().items()])
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            if close == True: response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True: response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url), log_utils.LOGDEBUG)