def get_sources(self, video): source_url = self.get_url(video) hosters = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(url, require_debrid=True, cache_limit=.5) post = dom_parser2.parse_dom(html, 'div', {'class': 'entry-content'}) if not post: return hosters for match in re.finditer('(?:href="|>)(https?://[^"<]+)', post[0].content): stream_url = match.group(1) if scraper_utils.excluded_link( stream_url) or 'imdb.com' in stream_url: continue host = urlparse.urlparse(stream_url).hostname if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(stream_url) else: meta = scraper_utils.parse_episode_link(stream_url) quality = scraper_utils.height_get_quality(meta['height']) hoster = { 'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False } hosters.append(hoster) return hosters
def get_sources(self, video): hosters = [] sources = {} source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(url, require_debrid=True, cache_limit=.5) fragment = dom_parser2.parse_dom(html, 'div', {'class': 'entry-content'}) if fragment: for _attrs, td in dom_parser2.parse_dom(fragment[0].content, 'td'): for attrs, _content in dom_parser2.parse_dom(td, 'a', req='href'): meta = scraper_utils.parse_episode_link(attrs['href']) sources[attrs['href']] = scraper_utils.height_get_quality( meta['height']) for source, values in sources.iteritems(): if scraper_utils.excluded_link(source): continue host = urlparse.urlparse(source).hostname hoster = { 'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': values, 'direct': False } hosters.append(hoster) return hosters
def get_sources(self, video): source_url = self.get_url(video) hosters = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(url, require_debrid=True, cache_limit=.5) sources = self.__get_post_links(html) for source, value in sources.iteritems(): if scraper_utils.excluded_link(source): continue host = urlparse.urlparse(source).hostname if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(value['release']) else: meta = scraper_utils.parse_episode_link(value['release']) quality = scraper_utils.height_get_quality(meta['height']) hoster = { 'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': quality, 'direct': False } if 'format' in meta: hoster['format'] = meta['format'] hosters.append(hoster) return hosters
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) headers = {'User-Agent': LOCAL_UA} html = self._http_get(url, require_debrid=True, headers=headers, cache_limit=.5) for match in re.finditer( "<span\s+class='info2'(.*?)(<span\s+class='info|<hr\s*/>)", html, re.DOTALL): for match2 in re.finditer('href="([^"]+)', match.group(1)): stream_url = match2.group(1) meta = scraper_utils.parse_episode_link(stream_url) quality = scraper_utils.height_get_quality(meta['height']) host = urlparse.urlparse(stream_url).hostname hoster = { 'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False } hosters.append(hoster) return hosters
def __get_links_from_json2(self, url, page_url, video_type): sources = {} headers = {'Referer': page_url} headers.update(XHR) html = self._http_get(url, headers=headers, cache_limit=0) js_data = scraper_utils.parse_json(html, url) try: playlist = js_data.get('playlist', []) for source in playlist[0].get('sources', []): stream_url = source['file'] if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) elif 'label' in source: quality = scraper_utils.height_get_quality(source['label']) else: if video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(stream_url) else: meta = scraper_utils.parse_episode_link(stream_url) quality = scraper_utils.height_get_quality(meta['height']) sources[stream_url] = {'quality': quality, 'direct': True} logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG) except Exception as e: logger.log('Exception during yesmovies extract: %s' % (e), log_utils.LOGDEBUG) return sources
def __get_sources(self, video, html): sources = {} for match in re.finditer('<center>\s*<b>\s*(.*?)\s*</b>.*?<tr>(.*?)</tr>', html, re.DOTALL): release, links = match.groups() release = re.sub('</?[^>]*>', '', release) if scraper_utils.release_check(video, release): meta = scraper_utils.parse_episode_link(release) for match in re.finditer('href="([^"]+)', links): sources[match.group(1)] = scraper_utils.height_get_quality(meta['height']) return sources
def __get_post_links(self, html, video): sources = {} post = dom_parser2.parse_dom(html, 'article', {'id': re.compile('post-\d+')}) if post: for _attrs, fragment in dom_parser2.parse_dom(post[0].content, 'h2'): for attrs, _content in dom_parser2.parse_dom(fragment, 'a', req='href'): stream_url = attrs['href'] meta = scraper_utils.parse_episode_link(stream_url) release_quality = scraper_utils.height_get_quality(meta['height']) host = urlparse.urlparse(stream_url).hostname quality = scraper_utils.get_quality(video, host, release_quality) sources[stream_url] = quality return sources
def search(self, video_type, title, year, season=''): # @UnusedVariable results = [] search_url = base64.decodestring(SEARCH_URL) % ( urllib.quote_plus(title)) html = self._http_get(search_url, cache_limit=2) if html: js_data = scraper_utils.parse_json(html) search_meta = scraper_utils.parse_episode_link(title) for item in js_data.get('results', []): metatags = item.get('richSnippet', {}).get('metatags', {}) post_date = metatags.get('articlePublishedTime') if post_date: post_date = re.sub('[+-]\d+:\d+$', '', post_date) post_date = scraper_utils.to_datetime( post_date, '%Y-%m-%dT%H:%M:%S').date() if self.__too_old(post_date): continue match_title = metatags.get('ogTitle', '') if not match_title: match_title = item['titleNoFormatting'] match_title = re.sub( re.compile('\s*-\s*Scene\s*Down$', re.I), '', match_title) match_url = item['url'] match_year = '' item_meta = scraper_utils.parse_episode_link(match_title) if scraper_utils.meta_release_check(video_type, search_meta, item_meta): result = { 'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url) } results.append(result) if not results: results = self.__site_search(video_type, title, year) return results
def __get_quality(self, item, video): if item.get('width'): return scraper_utils.width_get_quality(item['width']) elif item.get('height'): return scraper_utils.height_get_quality(item['height']) elif 'name' in item: if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(item['name']) else: meta = scraper_utils.parse_episode_link(item['name']) return scraper_utils.height_get_quality(meta['height']) else: return QUALITIES.HIGH
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters headers = { 'User-Agent': scraper_utils.get_ua(), 'Referer': self.base_url + source_url } if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(source_url) stream_url = source_url + scraper_utils.append_headers(headers) quality = scraper_utils.height_get_quality(meta['height']) hoster = { 'multi-part': False, 'host': scraper_utils.get_direct_hostname(self, stream_url), 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True } if 'format' in meta: hoster['format'] = meta['format'] hosters.append(hoster) else: for episode in self.__match_episode(source_url, video): meta = scraper_utils.parse_episode_link(episode['title']) stream_url = episode['url'] + scraper_utils.append_headers( headers) stream_url = stream_url.replace(self.base_url, '') quality = scraper_utils.height_get_quality(meta['height']) hoster = { 'multi-part': False, 'host': scraper_utils.get_direct_hostname(self, stream_url), 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True } if 'format' in meta: hoster['format'] = meta['format'] if 'size' in episode: hoster['size'] = scraper_utils.format_size( int(episode['size'])) hosters.append(hoster) return hosters
def __get_links(self, url, video): hosters = [] seen_urls = set() for search_type in SEARCH_TYPES: search_url, params = self.__translate_search(url, search_type) if not search_url: continue html = self._http_get(search_url, params=params, cache_limit=.5) js_result = scraper_utils.parse_json(html, search_url) if js_result.get('status') != 'success': logger.log( 'Alluc API Error: |%s|%s|: %s' % (search_url, params, js_result.get('message', 'Unknown Error')), log_utils.LOGWARNING) continue for result in js_result['result']: stream_url = result['hosterurls'][0]['url'] if len(result['hosterurls']) > 1: continue if result['extension'] == 'rar': continue if stream_url in seen_urls: continue if scraper_utils.release_check(video, result['title']): host = urlparse.urlsplit(stream_url).hostname quality = scraper_utils.get_quality( video, host, self._get_title_quality(result['title'])) hoster = { 'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': False } hoster['extra'] = scraper_utils.cleanse_title( result['title']) if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(hoster['extra']) else: meta = scraper_utils.parse_episode_link( hoster['extra']) if 'format' in meta: hoster['format'] = meta['format'] hosters.append(hoster) seen_urls.add(stream_url) return hosters
def __get_links(self, url, video): hosters = [] search_url = scraper_utils.urljoin(self.base_url, SEARCH_URL) query = self.__translate_search(url) result = self._http_get(search_url, data=query, allow_redirect=False, cache_limit=.5) for item in result.get('files', []): checks = [False] * 6 if item.get('type', '').upper() != 'VIDEO': checks[0] = True if item.get('is_ready') != '1': checks[1] = True if item.get('av_result') in ['warning', 'infected']: checks[2] = True if 'video_info' not in item: checks[3] = True if item.get('video_info') and not re.search('#0:(0|1)(\((eng|und)\))?:\s*Audio:', item['video_info'], re.I): checks[4] = True if not scraper_utils.release_check(video, item['name']): checks[5] = True if any(checks): logger.log('Furk.net result excluded: %s - |%s|' % (checks, item['name']), log_utils.LOGDEBUG) continue match = re.search('(\d{3,})\s*x\s*(\d{3,})', item['video_info']) if match: width, _height = match.groups() quality = scraper_utils.width_get_quality(width) else: if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(item['name']) else: meta = scraper_utils.parse_episode_link(item['name']) quality = scraper_utils.height_get_quality(meta['height']) if 'url_pls' in item: size_gb = scraper_utils.format_size(int(item['size']), 'B') if self.max_bytes and int(item['size']) > self.max_bytes: logger.log('Result skipped, Too big: |%s| - %s (%s) > %s (%sGB)' % (item['name'], item['size'], size_gb, self.max_bytes, self.max_gb)) continue stream_url = item['url_pls'] host = scraper_utils.get_direct_hostname(self, stream_url) hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True} hoster['size'] = size_gb hoster['extra'] = item['name'] hosters.append(hoster) else: logger.log('Furk.net result skipped - no playlist: |%s|' % (json.dumps(item)), log_utils.LOGDEBUG) return hosters
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(url, require_debrid=True, cache_limit=.5) fragment = dom_parser2.parse_dom(html, 'div', {'class': 'post-cont'}) if not fragment: return hosters match = re.search('<p>\s*<strong>(.*?)<script', fragment[0].content, re.DOTALL) if not match: return hosters for attrs, _content in dom_parser2.parse_dom(match.group(1), 'a', req='href'): stream_url = attrs['href'] if scraper_utils.excluded_link(stream_url): continue if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(stream_url) else: meta = scraper_utils.parse_episode_link(stream_url) host = urlparse.urlparse(stream_url).hostname quality = scraper_utils.get_quality( video, host, scraper_utils.height_get_quality(meta['height'])) hoster = { 'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False } hosters.append(hoster) return hosters
def __get_mirror_links(self, html, video): sources = {} for attrs, _content in dom_parser2.parse_dom(html, 'img', req='src'): image = attrs['src'] if image.endswith('/mirrors.png'): match = re.search('%s.*?<p>(.*?)</p>' % (image), html, re.DOTALL) if match: for attrs, _content in dom_parser2.parse_dom( match.group(1), 'a', req='href'): stream_url = attrs['href'] host = urlparse.urlparse(stream_url).hostname meta = scraper_utils.parse_episode_link(stream_url) base_quality = scraper_utils.height_get_quality( meta['height']) sources[stream_url] = { 'quality': scraper_utils.get_quality(video, host, base_quality), 'direct': False } return sources
def __get_post_links(self, html, video): sources = {} post = dom_parser2.parse_dom(html, 'div', {'class': 'postContent'}) if post: post = post[0].content for fragment in re.finditer('(<strong>.*?)(?=<strong>|$)', post, re.DOTALL): fragment = fragment.group(1) release = dom_parser2.parse_dom(fragment, 'strong') if release: release = release[0].content meta = scraper_utils.parse_episode_link(release) release_quality = scraper_utils.height_get_quality( meta['height']) for attrs, _content in dom_parser2.parse_dom(fragment, 'a', req='href'): link = attrs['href'] host = urlparse.urlparse(link).hostname quality = scraper_utils.get_quality( video, host, release_quality) sources[link] = quality return sources
def __get_release(self, html, video): try: select = int(kodi.get_setting('%s-select' % (self.get_name()))) except: select = 0 ul_id = 'releases' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes' fragment = dom_parser2.parse_dom(html, 'ul', {'id': ul_id}) if fragment: best_qorder = 0 best_page = None for _attrs, item in dom_parser2.parse_dom(fragment[0].content, 'li'): match = dom_parser2.parse_dom(item, 'span', req=['href', 'title']) if not match: match = dom_parser2.parse_dom(item, 'a', req=['href', 'title']) if not match: continue page_url, release = match[0].attrs['href'], match[0].attrs['title'] match = dom_parser2.parse_dom(item, 'span', {'class': 'time'}) if match and self.__too_old(match[0].content): break release = re.sub('^\[[^\]]*\]\s*', '', release) if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(release) else: if not scraper_utils.release_check(video, release, require_title=False): continue meta = scraper_utils.parse_episode_link(release) if select == 0: best_page = page_url break else: quality = scraper_utils.height_get_quality(meta['height']) logger.log('result: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG) if Q_ORDER[quality] > best_qorder: logger.log('Setting best as: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG) best_page = page_url best_qorder = Q_ORDER[quality] return best_page
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, require_debrid=True, cache_limit=.5) fragment = dom_parser2.parse_dom(html, 'table', {'class': 'links-table'}) if not fragment: return hosters for _attrs, row in dom_parser2.parse_dom(fragment[0].content, 'tr'): match = re.search("playVideo\.bind\(.*?'([^']+)(?:[^>]*>){2}(.*?)</td>", row, re.DOTALL) if not match: continue stream_url, release = match.groups() if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': sources = scraper_utils.parse_google(self, stream_url) else: sources = [stream_url] for source in sources: host = scraper_utils.get_direct_hostname(self, source) if host == 'gvideo': quality = scraper_utils.gv_get_quality(source) direct = True else: host = urlparse.urlparse(source).hostname if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(release) else: meta = scraper_utils.parse_episode_link(release) base_quality = scraper_utils.height_get_quality(meta['height']) quality = scraper_utils.get_quality(video, host, base_quality) direct = False hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': source, 'direct': direct} hosters.append(hoster) return hosters
def _blog_proc_results(self, html, post_pattern, date_format, video_type, title, year): results = [] search_date = '' search_sxe = '' if video_type == VIDEO_TYPES.EPISODE: match = re.search('(.*?)\s*(S\d+E\d+)\s*', title) if match: show_title, search_sxe = match.groups() else: match = re.search('(.*?)\s*(\d{4})[._ -]?(\d{2})[._ -]?(\d{2})\s*', title) if match: show_title, search_year, search_month, search_day = match.groups() search_date = '%s-%s-%s' % (search_year, search_month, search_day) search_date = scraper_utils.to_datetime(search_date, "%Y-%m-%d").date() else: show_title = title else: show_title = title today = datetime.date.today() for match in re.finditer(post_pattern, html, re.DOTALL): post_data = match.groupdict() post_title = post_data['post_title'] post_title = re.sub('<[^>]*>', '', post_title) if 'quality' in post_data: post_title += '- [%s]' % (post_data['quality']) try: filter_days = int(kodi.get_setting('%s-filter' % (self.get_name()))) except ValueError: filter_days = 0 if filter_days and date_format and 'date' in post_data: post_data['date'] = post_data['date'].strip() filter_days = datetime.timedelta(days=filter_days) post_date = scraper_utils.to_datetime(post_data['date'], date_format).date() if not post_date: logger.log('Failed date Check in %s: |%s|%s|%s|' % (self.get_name(), post_data['date'], date_format), log_utils.LOGWARNING) post_date = today if today - post_date > filter_days: continue match_year = '' match_date = '' match_sxe = '' match_title = full_title = post_title if video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(post_title) match_year = meta['year'] else: meta = scraper_utils.parse_episode_link(post_title) match_sxe = 'S%02dE%02d' % (int(meta['season']), int(meta['episode'])) match_date = meta['airdate'] match_title = meta['title'] full_title = '%s (%sp) [%s]' % (meta['title'], meta['height'], meta['extra']) norm_title = scraper_utils.normalize_title(show_title) match_norm_title = scraper_utils.normalize_title(match_title) title_match = norm_title and (match_norm_title in norm_title or norm_title in match_norm_title) year_match = not year or not match_year or year == match_year sxe_match = not search_sxe or (search_sxe == match_sxe) date_match = not search_date or (search_date == match_date) logger.log('Blog Results: |%s|%s|%s| - |%s|%s|%s| - |%s|%s|%s| - |%s|%s|%s| (%s)' % (match_norm_title, norm_title, title_match, year, match_year, year_match, search_date, match_date, date_match, search_sxe, match_sxe, sxe_match, self.get_name()), log_utils.LOGDEBUG) if title_match and year_match and date_match and sxe_match: quality = scraper_utils.height_get_quality(meta['height']) result = {'url': scraper_utils.pathify_url(post_data['url']), 'title': scraper_utils.cleanse_title(full_title), 'year': match_year, 'quality': quality} results.append(result) return results
def __get_links(self, url, video): hosters = [] search_url, params = self.__translate_search(url) html = self._http_get(search_url, params=params, cache_limit=.5) js_result = scraper_utils.parse_json(html, search_url) down_url = js_result.get('downURL') dl_farm = js_result.get('dlFarm') dl_port = js_result.get('dlPort') for item in js_result.get('data', []): post_hash, size, post_title, ext, duration = item['0'], item[ '4'], item['10'], item['11'], item['14'] checks = [False] * 6 if not scraper_utils.release_check(video, post_title): checks[0] = True if 'alangs' in item and item['alangs'] and 'eng' not in item[ 'alangs']: checks[1] = True if re.match('^\d+s', duration) or re.match('^[0-5]m', duration): checks[2] = True if 'passwd' in item and item['passwd']: checks[3] = True if 'virus' in item and item['virus']: checks[4] = True if 'type' in item and item['type'].upper() != 'VIDEO': checks[5] = True if any(checks): logger.log( 'EasyNews Post excluded: %s - |%s|' % (checks, item), log_utils.LOGDEBUG) continue stream_url = down_url + urllib.quote( '/%s/%s/%s%s/%s%s' % (dl_farm, dl_port, post_hash, ext, post_title, ext)) stream_url = stream_url + '|Authorization=%s' % (urllib.quote( self.auth)) host = scraper_utils.get_direct_hostname(self, stream_url) quality = None if 'width' in item: try: width = int(item['width']) except: width = 0 if width: quality = scraper_utils.width_get_quality(width) if quality is None: if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(post_title) else: meta = scraper_utils.parse_episode_link(post_title) quality = scraper_utils.height_get_quality(meta['height']) if self.max_bytes: match = re.search('([\d.]+)\s+(.*)', size) if match: size_bytes = scraper_utils.to_bytes(*match.groups()) if size_bytes > self.max_bytes: logger.log( 'Result skipped, Too big: |%s| - %s (%s) > %s (%s GB)' % (post_title, size_bytes, size, self.max_bytes, self.max_gb)) continue hoster = { 'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True } if any(i for i in ['X265', 'HEVC'] if i in post_title.upper()): hoster['format'] = 'x265' if size: hoster['size'] = size if post_title: hoster['extra'] = post_title hosters.append(hoster) return hosters
def get_sources(self, video): source_url = self.get_url(video) hosters = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=.5) sources = {} for _attrs, fragment in dom_parser2.parse_dom(html, 'ul', {'class': 'enlaces'}): for attrs, _content in dom_parser2.parse_dom(fragment, 'a', req='href'): stream_url = attrs['href'] if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(stream_url) else: meta = scraper_utils.parse_episode_link(stream_url) sources.update({ stream_url: { 'quality': scraper_utils.height_get_quality(meta['height']), 'direct': False } }) for _attrs, fragment in dom_parser2.parse_dom( html, 'div', {'class': 'movieplay'}) + dom_parser2.parse_dom( html, 'div', {'id': re.compile('player\d+')}): for attrs, _content in dom_parser2.parse_dom( fragment, 'iframe', req='src') + dom_parser2.parse_dom( fragment, 'iframe', req='data-lazy-src'): iframe_url = attrs.get('src', '') if not iframe_url.startswith('http'): iframe_url = attrs.get('data-lazy-src', '') if not iframe_url.startswith('http'): continue if '//player' in iframe_url: html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=.5) sources.update(scraper_utils.parse_sources_list( self, html)) else: if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(iframe_url) else: meta = scraper_utils.parse_episode_link(iframe_url) sources.update({ iframe_url: { 'quality': scraper_utils.height_get_quality(meta['height']), 'direct': False } }) for stream_url, values in sources.iteritems(): direct = values['direct'] quality = values['quality'] if direct: host = scraper_utils.get_direct_hostname(self, stream_url) stream_url += scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) else: stream_url = stream_url host = urlparse.urlparse(stream_url).hostname hoster = { 'multi-part': False, 'url': stream_url, 'class': self, 'quality': quality, 'host': host, 'rating': None, 'views': None, 'direct': direct } hosters.append(hoster) return hosters