def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-base_url2" type="text" label="    %s %s" default="%s" visible="eq(-4,true)"/>'
         % (name, i18n('tv_shows'), i18n('base_url'), cls.tv_base_url))
     return settings
示例#2
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-4,true)"/>'
         % (name, i18n('username')))
     settings.append(
         '         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-5,true)"/>'
         % (name, i18n('password')))
     return settings
 def get_settings(cls):
     name = cls.get_name()
     settings = [
         '         <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>'
         % (name, name, i18n('enabled')),
         '         <setting id="%s-sub_check" type="bool" label="    %s" default="false" visible="eq(-1,true)"/>'
         % (name, i18n('page_existence')),
         '         <setting id="%s_last_results" type="number" default="0" visible="false"/>'
         % (name)
     ]
     return settings
示例#4
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-filter" type="slider" range="0,180" option="int" label="     %s" default="30" visible="eq(-4,true)"/>'
         % (name, i18n('filter_results_days')))
     settings.append(
         '         <setting id="%s-select" type="enum" label="     %s" lvalues="30636|30637" default="0" visible="eq(-5,true)"/>'
         % (name, i18n('auto_select')))
     return settings
示例#5
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-filter" type="slider" range="0,180" option="int" label="     %s" default="30" visible="eq(-4,true)"/>'
         % (name, i18n("filter_results_days"))
     )
     settings.append(
         '         <setting id="%s-select" type="enum" label="     %s" lvalues="30636|30637" default="0" visible="eq(-5,true)"/>'
         % (name, i18n("auto_select"))
     )
     return settings
示例#6
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-4,true)"/>'
         % (name, i18n('username')))
     settings.append(
         '         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-5,true)"/>'
         % (name, i18n('password')))
     settings.append(
         '         <setting id="%s-result_limit" label="     %s" type="slider" default="10" range="10,100" option="int" visible="eq(-6,true)"/>'
         % (name, i18n('result_limit')))
     return settings
示例#7
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-auto_pick" type="bool" label="    %s" default="false" visible="eq(-4,true)"/>'
         % (name, i18n('auto_pick')))
     return settings
示例#8
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-4,true)"/>'
         % (name, i18n("username"))
     )
     settings.append(
         '         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-5,true)"/>'
         % (name, i18n("password"))
     )
     settings.append(
         '         <setting id="%s-include_premium" type="bool" label="     %s" default="false" visible="eq(-6,true)"/>'
         % (name, i18n("include_premium"))
     )
     return settings
示例#9
0
    def resolve_link(self, link):
        playlist = super(self.__class__, self)._http_get(link, cache_limit=.5)
        try:
            ns = '{http://xspf.org/ns/0/}'
            root = ET.fromstring(playlist)
            tracks = root.findall('.//%strack' % (ns))
            locations = []
            for track in tracks:
                duration = track.find('%sduration' % (ns)).text
                try:
                    duration = int(duration)
                except:
                    duration = 0
                if duration >= MIN_DURATION:
                    location = track.find('%slocation' % (ns)).text
                    locations.append({
                        'duration': duration / 1000,
                        'url': location
                    })

            if len(locations) > 1:
                result = xbmcgui.Dialog().select(i18n('choose_stream'), [
                    self.__format_time(location['duration'])
                    for location in locations
                ])
                if result > -1:
                    return locations[result]['url']
            elif locations:
                return locations[0]['url']
        except Exception as e:
            log_utils.log('Failure during furk playlist parse: %s' % (e),
                          log_utils.LOGWARNING)
示例#10
0
    def resolve_link(self, link):
        try:
            headers = dict([item.split('=') for item in (link.split('|')[1]).split('&')])
            for key in headers: headers[key] = urllib.unquote(headers[key])
            link = link.split('|')[0]
        except:
            headers = {}

        if not link.startswith('http'):
            link = urlparse.urljoin(self.base_url, link)
        html = self._http_get(link, headers=headers, cache_limit=0)
                    
        fragment = dom_parser.parse_dom(html, 'div', {'class': 'player'})
        if fragment:
            iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
            if iframe_url:
                iframe_url = iframe_url[0]
                headers = {'Referer': link}
                html = self._http_get(iframe_url, headers=headers, cache_limit=0)
                sitekey = dom_parser.parse_dom(html, 'div', {'class': 'g-recaptcha'}, ret='data-sitekey')
                if sitekey:
                    token = recaptcha_v2.UnCaptchaReCaptcha().processCaptcha(sitekey[0], lang='en')
                    if token:
                        data = {'g-recaptcha-response': token}
                        html = self._http_get(iframe_url, data=data, cache_limit=0)
                        log_utils.log(html)
                        
                match = re.search("\.replace\(\s*'([^']+)'\s*,\s*'([^']*)'\s*\)", html, re.I)
                if match:
                    html = html.replace(match.group(1), match.group(2))

                match = re.search("window\.atob[\([]+'([^']+)", html)
                if match:
                    func_count = len(re.findall('window\.atob', html))
                    html = match.group(1)
                    for _i in xrange(func_count):
                        html = base64.decodestring(html)
                
                streams = []
                for match in re.finditer('''<source[^>]+src=["']([^'"]+)[^>]+label=['"]([^'"]+)''', html):
                    streams.append(match.groups())
                
                if len(streams) > 1:
                    if not self.auto_pick:
                        result = xbmcgui.Dialog().select(i18n('choose_stream'), [e[1] for e in streams])
                        if result > -1:
                            return streams[result][0] + '|User-Agent=%s' % (scraper_utils.get_ua())
                    else:
                        best_stream = ''
                        best_q = 0
                        for stream in streams:
                            stream_url, label = stream
                            if Q_ORDER[scraper_utils.height_get_quality(label)] > best_q:
                                best_q = Q_ORDER[scraper_utils.height_get_quality(label)]
                                best_stream = stream_url
                        
                        if best_stream:
                            return best_stream + '|User-Agent=%s' % (scraper_utils.get_ua())
                elif streams:
                    return streams[0][0] + '|User-Agent=%s' % (scraper_utils.get_ua())
示例#11
0
 def get_settings(cls):
     """
     Returns a list of settings to be used for this scraper. Settings are automatically checked for updates every time scrapers are imported
     The list returned by each scraper is aggregated into a big settings.xml string, and then if it differs from the current settings xml in the Scrapers category
     the existing settings.xml fragment is removed and replaced by the new string
     """
     name = cls.get_name()
     return [
         '         <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>'
         % (name, name, i18n('enabled')),
         '         <setting id="%s-base_url" type="text" label="    %s" default="%s" visible="eq(-1,true)"/>'
         % (name, i18n('base_url'), cls.base_url),
         '         <setting id="%s-sub_check" type="bool" label="    %s" default="true" visible="eq(-2,true)"/>'
         % (name, i18n('page_existence')),
         '         <setting id="%s_last_results" type="number" default="0" visible="false"/>'
         % (name),
     ]
示例#12
0
    def resolve_link(self, link):
        try:
            headers = dict(
                [item.split('=') for item in (link.split('|')[1]).split('&')])
            for key in headers:
                headers[key] = urllib.unquote(headers[key])
            link = link.split('|')[0]
        except:
            headers = {}

        html = self._http_get(link, headers=headers, cache_limit=.5)
        fragment = dom_parser.parse_dom(html, 'div', {'class': 'player'})
        if fragment:
            iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
            if iframe_url:
                headers = {'Referer': link}
                html = self._http_get(iframe_url[0],
                                      headers=headers,
                                      cache_limit=.5)
                match = re.search("window\.atob\('([^']+)", html)
                if match:
                    func_count = len(re.findall('window\.atob', html))
                    html = match.group(1)
                    for _i in xrange(func_count):
                        html = base64.decodestring(html)

                streams = []
                for match in re.finditer(
                        '''<source[^>]+src=["']([^'"]+)[^>]+label=['"]([^'"]+)''',
                        html):
                    streams.append(match.groups())

                if len(streams) > 1:
                    if not self.auto_pick:
                        result = xbmcgui.Dialog().select(
                            i18n('choose_stream'), [e[1] for e in streams])
                        if result > -1:
                            return streams[result][0]
                    else:
                        best_stream = ''
                        best_q = 0
                        for stream in streams:
                            stream_url, label = stream
                            if Q_ORDER[scraper_utils.height_get_quality(
                                    label)] > best_q:
                                best_q = Q_ORDER[
                                    scraper_utils.height_get_quality(label)]
                                best_stream = stream_url

                        if best_stream:
                            return best_stream
                elif streams:
                    return streams[0][0]
 def resolve_link(self, link):
     query = urlparse.parse_qs(link)
     if 'hash_id' in query:
         hash_id = query['hash_id'][0].lower()
         if self.__add_torrent(hash_id):
             browse_url = BROWSE_URL % (hash_id)
             browse_url = urlparse.urljoin(self.base_url, browse_url)
             js_data = self._json_get(browse_url, cache_limit=0)
             videos = self.__get_videos(js_data['content'])
             
             if len(videos) > 1:
                 result = xbmcgui.Dialog().select(i18n('choose_stream'), [video['label'] for video in videos])
                 if result > -1:
                     return videos[result]['url']
             elif videos:
                 return videos[0]['url']
    def resolve_link(self, link):
        query = urlparse.parse_qs(link)
        if 'hash_id' in query:
            hash_id = query['hash_id'][0].lower()
            if self.__add_torrent(hash_id):
                browse_url = BROWSE_URL % (hash_id)
                browse_url = urlparse.urljoin(self.base_url, browse_url)
                js_data = self._json_get(browse_url, cache_limit=0)
                videos = self.__get_videos(js_data['content'])

                if len(videos) > 1:
                    result = xbmcgui.Dialog().select(
                        i18n('choose_stream'),
                        [video['label'] for video in videos])
                    if result > -1:
                        return videos[result]['url']
                elif videos:
                    return videos[0]['url']
示例#15
0
    def resolve_link(self, link):
        try:
            headers = dict([item.split('=') for item in (link.split('|')[1]).split('&')])
            for key in headers: headers[key] = urllib.unquote(headers[key])
            link = link.split('|')[0]
        except:
            headers = {}

        html = self._http_get(link, headers=headers, cache_limit=0)
        fragment = dom_parser.parse_dom(html, 'div', {'class': 'player'})
        if fragment:
            iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
            if iframe_url:
                headers = {'Referer': link}
                html = self._http_get(iframe_url[0], headers=headers, cache_limit=0)
                match = re.search("window\.atob\('([^']+)", html)
                if match:
                    func_count = len(re.findall('window\.atob', html))
                    html = match.group(1)
                    for _i in xrange(func_count):
                        html = base64.decodestring(html)
                
                streams = []
                for match in re.finditer('''<source[^>]+src=["']([^'"]+)[^>]+label=['"]([^'"]+)''', html):
                    streams.append(match.groups())
                
                if len(streams) > 1:
                    if not self.auto_pick:
                        result = xbmcgui.Dialog().select(i18n('choose_stream'), [e[1] for e in streams])
                        if result > -1:
                            return streams[result][0] + '|User-Agent=%s' % (scraper_utils.get_ua())
                    else:
                        best_stream = ''
                        best_q = 0
                        for stream in streams:
                            stream_url, label = stream
                            if Q_ORDER[scraper_utils.height_get_quality(label)] > best_q:
                                best_q = Q_ORDER[scraper_utils.height_get_quality(label)]
                                best_stream = stream_url
                        
                        if best_stream:
                            return best_stream + '|User-Agent=%s' % (scraper_utils.get_ua())
                elif streams:
                    return streams[0][0] + '|User-Agent=%s' % (scraper_utils.get_ua())
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append(
         '         <setting id="%s-use_https" type="bool" label="     %s" default="false" visible="eq(-4,true)"/>'
         % (name, i18n('use_https')))
     settings.append(
         '         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-5,true)"/>'
         % (name, i18n('username')))
     settings.append(
         '         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-6,true)"/>'
         % (name, i18n('password')))
     settings.append(
         '         <setting id="%s-base_url2" type="text" label="     %s %s" default="%s" visible="eq(-7,true)"/>'
         % (name, i18n('movies'), i18n('base_url'), cls.movie_base_url))
     settings.append(
         '         <setting id="%s-base_url3" type="text" label="     %s %s" default="%s" visible="eq(-8,true)"/>'
         % (name, i18n('tv_shows'), i18n('base_url'), cls.tv_base_url))
     return settings
示例#17
0
    def resolve_link(self, link):
        playlist = super(self.__class__, self)._http_get(link, cache_limit=.5)
        try:
            ns = '{http://xspf.org/ns/0/}'
            root = ET.fromstring(playlist)
            tracks = root.findall('.//%strack' % (ns))
            locations = []
            for track in tracks:
                duration = track.find('%sduration' % (ns)).text
                try: duration = int(duration)
                except: duration = 0
                if duration >= MIN_DURATION:
                    location = track.find('%slocation' % (ns)).text
                    locations.append({'duration': duration / 1000, 'url': location})

            if len(locations) > 1:
                result = xbmcgui.Dialog().select(i18n('choose_stream'), [self.__format_time(location['duration']) for location in locations])
                if result > -1:
                    return locations[result]['url']
            elif locations:
                return locations[0]['url']
        except Exception as e:
            log_utils.log('Failure during furk playlist parse: %s' % (e), log_utils.LOGWARNING)
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append('         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-4,true)"/>' % (name, i18n('username')))
     settings.append('         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-5,true)"/>' % (name, i18n('password')))
     return settings
示例#19
0
    def resolve_link(self, link):
        try:
            headers = dict(
                [item.split('=') for item in (link.split('|')[1]).split('&')])
            for key in headers:
                headers[key] = urllib.unquote(headers[key])
            link = link.split('|')[0]
        except:
            headers = {}

        if not link.startswith('http'):
            link = urlparse.urljoin(self.base_url, link)
        html = self._http_get(link, headers=headers, cache_limit=0)

        fragment = dom_parser.parse_dom(html, 'div', {'class': 'player'})
        if fragment:
            iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
            if iframe_url:
                iframe_url = iframe_url[0]
                headers = {'Referer': link}
                html = self._http_get(iframe_url,
                                      headers=headers,
                                      cache_limit=0)
                sitekey = dom_parser.parse_dom(html,
                                               'div', {'class': 'g-recaptcha'},
                                               ret='data-sitekey')
                if sitekey:
                    token = recaptcha_v2.UnCaptchaReCaptcha().processCaptcha(
                        sitekey[0], lang='en')
                    if token:
                        data = {'g-recaptcha-response': token}
                        html = self._http_get(iframe_url,
                                              data=data,
                                              cache_limit=0)

                match = re.search(
                    "\.replace\(\s*'([^']+)'\s*,\s*'([^']*)'\s*\)", html, re.I)
                if match:
                    html = html.replace(match.group(1), match.group(2))

                match = re.search("window\.atob[\([]+'([^']+)", html)
                if match:
                    func_count = len(re.findall('window\.atob', html))
                    html = match.group(1)
                    for _i in xrange(func_count):
                        html = base64.decodestring(html)

                streams = []
                for match in re.finditer(
                        '''<source[^>]+src=["']([^;'"]+)[^>]+label=['"]([^'"]+)''',
                        html):
                    streams.append(match.groups())

                if len(streams) > 1:
                    if not self.auto_pick:
                        result = xbmcgui.Dialog().select(
                            i18n('choose_stream'), [e[1] for e in streams])
                        if result > -1:
                            return streams[result][0] + '|User-Agent=%s' % (
                                scraper_utils.get_ua())
                    else:
                        best_stream = ''
                        best_q = 0
                        for stream in streams:
                            stream_url, label = stream
                            if Q_ORDER[scraper_utils.height_get_quality(
                                    label)] > best_q:
                                best_q = Q_ORDER[
                                    scraper_utils.height_get_quality(label)]
                                best_stream = stream_url

                        if best_stream:
                            return best_stream + '|User-Agent=%s' % (
                                scraper_utils.get_ua())
                elif streams:
                    return streams[0][0] + '|User-Agent=%s' % (
                        scraper_utils.get_ua())

                iframe_url = dom_parser.parse_dom(html, 'iframe', ret='src')
                if iframe_url:
                    return iframe_url[0]

        log_utils.log('No WatchHD Link Found: %s' % (html),
                      log_utils.LOGWARNING)
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append('         <setting id="%s-filter" type="slider" range="0,180" option="int" label="     %s" default="60" visible="eq(-4,true)"/>' % (name, i18n('filter_results_days')))
     return settings
示例#21
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append('         <setting id="%s-base_url2" type="text" label="    %s %s" default="%s" visible="eq(-4,true)"/>' % (name, i18n('tv_shows'), i18n('base_url'), cls.tv_base_url))
     return settings
示例#22
0
 def get_settings(cls):
     """
     Returns a list of settings to be used for this scraper. Settings are automatically checked for updates every time scrapers are imported
     The list returned by each scraper is aggregated into a big settings.xml string, and then if it differs from the current settings xml in the Scrapers category
     the existing settings.xml fragment is removed and replaced by the new string
     """
     name = cls.get_name()
     return [
         '         <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>' % (name, name, i18n('enabled')),
         '         <setting id="%s-base_url" type="text" label="    %s" default="%s" visible="eq(-1,true)"/>' % (name, i18n('base_url'), cls.base_url),
         '         <setting id="%s-sub_check" type="bool" label="    %s" default="true" visible="eq(-2,true)"/>' % (name, i18n('page_existence')),
         '         <setting id="%s_last_results" type="number" default="0" visible="false"/>' % (name),
     ]
示例#23
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append('         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-4,true)"/>' % (name, i18n('username')))
     settings.append('         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-5,true)"/>' % (name, i18n('password')))
     settings.append('         <setting id="%s-result_limit" label="     %s" type="slider" default="10" range="10,100" option="int" visible="eq(-6,true)"/>' % (name, i18n('result_limit')))
     return settings
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append('         <setting id="%s-select" type="enum" label="     %s" lvalues="30636|30637" default="0" visible="eq(-4,true)"/>' % (name, i18n('auto_select')))
     return settings
示例#25
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     name = cls.get_name()
     settings.append('         <setting id="%s-auto_pick" type="bool" label="    %s" default="false" visible="eq(-4,true)"/>' % (name, i18n('auto_pick')))
     return settings
示例#26
0
 def get_settings(cls):
     settings = super(cls, cls).get_settings()
     settings = scraper_utils.disable_sub_check(settings)
     name = cls.get_name()
     settings.append('         <setting id="%s-use_https" type="bool" label="     %s" default="false" visible="eq(-4,true)"/>' % (name, i18n('use_https')))
     settings.append('         <setting id="%s-username" type="text" label="     %s" default="" visible="eq(-5,true)"/>' % (name, i18n('username')))
     settings.append('         <setting id="%s-password" type="text" label="     %s" option="hidden" default="" visible="eq(-6,true)"/>' % (name, i18n('password')))
     settings.append('         <setting id="%s-base_url2" type="text" label="     %s %s" default="%s" visible="eq(-7,true)"/>' % (name, i18n('movies'), i18n('base_url'), cls.movie_base_url))
     settings.append('         <setting id="%s-base_url3" type="text" label="     %s %s" default="%s" visible="eq(-8,true)"/>' % (name, i18n('tv_shows'), i18n('base_url'), cls.tv_base_url))
     return settings
示例#27
0
 def get_settings(cls):
     name = cls.get_name()
     settings = [
         '         <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>' % (name, name, i18n('enabled')),
         '         <setting id="%s-sub_check" type="bool" label="    %s" default="false" visible="eq(-1,true)"/>' % (name, i18n('page_existence')),
         '         <setting id="%s_last_results" type="number" default="0" visible="false"/>' % (name)
     ]
     return settings