Example #1
0
 def __get_movie_sources(self, source_url):
     hosters = []
     query = kodi.parse_query(urlparse.urlparse(source_url).query)
     movie_id = query.get('movie_id') or self.__get_movie_id(source_url)
     if not movie_id: return hosters
     
     details_url = scraper_utils.urljoin(self.movie_base_url, MOVIE_DETAILS_URL)
     detail_data = self._json_get(details_url, params={'movie_id': movie_id}, cache_limit=24)
     try: torrents = detail_data['data']['movie']['torrents']
     except KeyError: torrents = []
     try: hashes = [torrent['hash'].lower() for torrent in torrents]
     except KeyError: hashes = []
     hash_data = self.__get_hash_data(hashes)
     for torrent in torrents:
         hash_id = torrent['hash'].lower()
         try: status = hash_data['hashes'][hash_id]['status']
         except KeyError: status = ''
         if status.lower() != 'finished': continue
         stream_url = 'hash_id=%s' % (hash_id)
         host = scraper_utils.get_direct_hostname(self, stream_url)
         quality = QUALITY_MAP.get(torrent['quality'], QUALITIES.HD720)
         hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
         if 'size_bytes' in torrent: hoster['size'] = scraper_utils.format_size(torrent['size_bytes'], 'B')
         if torrent['quality'] == '3D': hoster['3D'] = True
         hosters.append(hoster)
     return hosters
Example #2
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        for stream in self.__get_videos(source_url, video):
            if video.video_type == VIDEO_TYPES.EPISODE and not scraper_utils.release_check(
                    video, stream['name']):
                continue

            host = scraper_utils.get_direct_hostname(self, stream['url'])
            hoster = {
                'multi-part': False,
                'class': self,
                'views': None,
                'url': stream['url'],
                'rating': None,
                'host': host,
                'quality': stream['quality'],
                'direct': True
            }
            if 'size' in stream:
                hoster['size'] = scraper_utils.format_size(stream['size'])
            if 'name' in stream: hoster['extra'] = stream['name']
            hosters.append(hoster)

        return hosters
Example #3
0
 def __get_videos(self, content):
     videos = []
     for item in content.itervalues():
         if item['type'].lower() == 'dir':
             videos += self.__get_videos(item['children'])
         else:
             if item['ext'].upper() not in VIDEO_EXT: continue
             label = '(%s) %s' % (scraper_utils.format_size(item['size'], 'B'), item['name'])
             video = {'label': label, 'url': item['url']}
             videos.append(video)
             if self.include_trans and 'transcoded' in item and item['transcoded']:
                 transcode = item['transcoded']
                 if 'size' in transcode:
                     label = '(%s) (Transcode) %s' % (scraper_utils.format_size(transcode['size'], 'B'), item['name'])
                 else:
                     label = '(Transcode) %s' % (item['name'])
                 video = {'label': label, 'url': transcode['url']}
                 videos.append(video)
                 
     return videos
Example #4
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        headers = {
            'User-Agent': scraper_utils.get_ua(),
            'Referer': self.base_url + source_url
        }
        if video.video_type == VIDEO_TYPES.MOVIE:
            meta = scraper_utils.parse_movie_link(source_url)
            stream_url = source_url + scraper_utils.append_headers(headers)
            quality = scraper_utils.height_get_quality(meta['height'])
            hoster = {
                'multi-part': False,
                'host': scraper_utils.get_direct_hostname(self, stream_url),
                'class': self,
                'quality': quality,
                'views': None,
                'rating': None,
                'url': stream_url,
                'direct': True
            }
            if 'format' in meta: hoster['format'] = meta['format']
            hosters.append(hoster)
        else:
            for episode in self.__match_episode(source_url, video):
                meta = scraper_utils.parse_episode_link(episode['title'])
                stream_url = episode['url'] + scraper_utils.append_headers(
                    headers)
                stream_url = stream_url.replace(self.base_url, '')
                quality = scraper_utils.height_get_quality(meta['height'])
                hoster = {
                    'multi-part': False,
                    'host':
                    scraper_utils.get_direct_hostname(self, stream_url),
                    'class': self,
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'url': stream_url,
                    'direct': True
                }
                if 'format' in meta: hoster['format'] = meta['format']
                if 'size' in episode:
                    hoster['size'] = scraper_utils.format_size(
                        int(episode['size']))
                hosters.append(hoster)

        return hosters
Example #5
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=1)
        for _attrs, item in dom_parser2.parse_dom(html, 'a',
                                                  {'class': 'full-torrent1'}):
            stream_url = dom_parser2.parse_dom(item, 'span', req='onclick')
            host = dom_parser2.parse_dom(item, 'div',
                                         {'class': 'small_server'})
            match = re.search('Views:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
            views = match.group(1) if match else None
            match = re.search('Size:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
            size = int(match.group(1)) * 1024 * 1024 if match else None
            if not stream_url or not host: continue

            stream_url = stream_url[0].attrs['onclick']
            host = host[0].content.lower()
            host = host.replace('stream server: ', '')
            match = re.search("'(/redirect/[^']+)", stream_url)
            if match: stream_url = match.group(1)
            quality = scraper_utils.get_quality(video, host, QUALITIES.HIGH)
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'quality': quality,
                'views': views,
                'rating': None,
                'url': stream_url,
                'direct': False
            }
            if size is not None:
                hoster['size'] = scraper_utils.format_size(size, 'B')
            hosters.append(hoster)
        return hosters
Example #6
0
    def get_sources(self, video):
        sources = []
        try:
            orion = Orion(
                base64.b64decode(base64.b64decode(base64.b64decode(
                    self.key))).replace(' ', ''))
            if not orion.userEnabled() or not orion.userValid():
                raise Exception()

            query = ''
            type = None
            if video.video_type == VIDEO_TYPES.MOVIE:
                type = Orion.TypeMovie
                query = '%s %s' % (str(video.title), str(video.year))
            else:
                type = Orion.TypeShow
                query = '%s S%sE%s' % (str(video.title), str(
                    video.season), str(video.episode))

            results = orion.streams(type=type,
                                    query=query,
                                    streamType=Orion.StreamHoster)

            for data in results:
                try:
                    if self._valid(data):
                        orion = {}
                        try:
                            orion['stream'] = data['id']
                        except:
                            pass
                        try:
                            orion['item'] = data
                        except:
                            pass

                        stream = {
                            'orion': orion,
                            'class': self,
                            'multi-part': False,
                            'host': self._source(data, True),
                            'quality': self._quality(data),
                            'language': self._language(data),
                            'url': data['stream']['link'],
                            'views': self._popularity(data, False),
                            'rating': int(self._popularity(data, True)),
                            'direct': data['access']['direct'],
                        }

                        if data['video']['codec']:
                            stream['format'] = data['video']['codec']

                        if data['file']['size']:
                            stream['size'] = scraper_utils.format_size(
                                data['file']['size'])

                        if data['video']['3d']:
                            stream['3D'] = data['video']['3d']

                        if data['subtitle']['languages'] and len(
                                data['subtitle']['languages']) > 0:
                            stream['subs'] = '-'.join(
                                data['subtitle']['languages']).upper()

                        sources.append(stream)
                except:
                    self._error()
        except:
            self._error()
        return sources
Example #7
0
    def __get_links(self, url, video):
        hosters = []
        search_url = scraper_utils.urljoin(self.base_url, SEARCH_URL)
        query = self.__translate_search(url)
        result = self._http_get(search_url,
                                data=query,
                                allow_redirect=False,
                                cache_limit=.5)
        for item in result.get('files', []):
            checks = [False] * 6
            if item.get('type', '').upper() != 'VIDEO': checks[0] = True
            if item.get('is_ready') != '1': checks[1] = True
            if item.get('av_result') in ['warning', 'infected']:
                checks[2] = True
            if 'video_info' not in item: checks[3] = True
            if item.get('video_info') and not re.search(
                    '#0:(0|1)(\((eng|und)\))?:\s*Audio:', item['video_info'],
                    re.I):
                checks[4] = True
            if not scraper_utils.release_check(video, item['name']):
                checks[5] = True
            if any(checks):
                logger.log(
                    'Furk.net result excluded: %s - |%s|' %
                    (checks, item['name']), log_utils.LOGDEBUG)
                continue

            match = re.search('(\d{3,})\s*x\s*(\d{3,})', item['video_info'])
            if match:
                width, _height = match.groups()
                quality = scraper_utils.width_get_quality(width)
            else:
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(item['name'])
                else:
                    meta = scraper_utils.parse_episode_link(item['name'])
                quality = scraper_utils.height_get_quality(meta['height'])

            if 'url_pls' in item:
                size_gb = scraper_utils.format_size(int(item['size']), 'B')
                if self.max_bytes and int(item['size']) > self.max_bytes:
                    logger.log(
                        'Result skipped, Too big: |%s| - %s (%s) > %s (%sGB)' %
                        (item['name'], item['size'], size_gb, self.max_bytes,
                         self.max_gb))
                    continue

                stream_url = item['url_pls']
                host = scraper_utils.get_direct_hostname(self, stream_url)
                hoster = {
                    'multi-part': False,
                    'class': self,
                    'views': None,
                    'url': stream_url,
                    'rating': None,
                    'host': host,
                    'quality': quality,
                    'direct': True
                }
                hoster['size'] = size_gb
                hoster['extra'] = item['name']
                hosters.append(hoster)
            else:
                logger.log(
                    'Furk.net result skipped - no playlist: |%s|' %
                    (json.dumps(item)), log_utils.LOGDEBUG)

        return hosters