def __get_gk_links(self, link, iframe_url): sources = {} data = {'link': link} headers = XHR headers.update({'Referer': iframe_url, 'User-Agent': USER_AGENT}) html = self._http_get(GK_URL, data=data, headers=headers, cache_limit=.25) js_data = scraper_utils.parse_json(html, GK_URL) if 'link' in js_data: if isinstance(js_data['link'], basestring): stream_url = js_data['link'] if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': for source in scraper_utils.parse_google(self, stream_url): sources[source] = {'quality': scraper_utils.gv_get_quality(source), 'direct': True} else: sources[stream_url] = {'quality': QUALITIES.HIGH, 'direct': False} else: for link in js_data['link']: stream_url = link['link'] if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) elif 'label' in link: quality = scraper_utils.height_get_quality(link['label']) else: quality = QUALITIES.HIGH sources[stream_url] = {'quality': quality, 'direct': True} return sources
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=8) fragment = dom_parser2.parse_dom(html, 'div', {'class': 'playex'}) if fragment: html = fragment[0].content iframe_url = dom_parser2.parse_dom(html, 'iframe', req='src') if not iframe_url: return hosters iframe_url = iframe_url[0].attrs['src'] if iframe_url.startswith('/'): iframe_url = scraper_utils.urljoin(self.base_url, iframe_url) html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=.5) obj = dom_parser2.parse_dom(html, 'object', req='data') if obj: streams = dict((stream_url, { 'quality': scraper_utils.gv_get_quality(stream_url), 'direct': True }) for stream_url in scraper_utils.parse_google( self, obj[0].attrs['data'])) else: streams = scraper_utils.parse_sources_list(self, html) for stream_url, values in streams.iteritems(): host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) else: quality = values['quality'] stream_url += scraper_utils.append_headers({ 'User-Agent': scraper_utils.get_ua(), 'Referer': page_url }) source = { 'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True } hosters.append(source) return hosters
def get_sources(self, video): source_url = self.get_url(video) hosters = [] if source_url and source_url != FORCE_NO_MATCH: page_url = urlparse.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=.5) match = re.search('var\s*video_id="([^"]+)', html) if match: video_id = match.group(1) data = {'v': video_id} headers = {'Referer': page_url} headers.update(XHR) html = self._http_get(self.info_url, data=data, headers=headers, cache_limit=0) sources = scraper_utils.parse_json(html, self.info_url) for source in sources: match = re.search('url=(.*)', sources[source]) if match: stream_url = urllib.unquote(match.group(1)) host = self._get_direct_hostname(stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) else: quality = scraper_utils.height_get_quality(source) stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()}) hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True} hosters.append(hoster) return hosters
def __get_links_from_xml(self, xml, video): sources = {} try: root = ET.fromstring(xml) for item in root.findall('.//item'): title = item.find('title').text for source in item.findall('{http://rss.jwpcdn.com/}source'): stream_url = source.get('file') label = source.get('label') if self._get_direct_hostname(stream_url) == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) elif label: quality = scraper_utils.height_get_quality(label) else: quality = scraper_utils.blog_get_quality( video, title, '') sources[stream_url] = {'quality': quality, 'direct': True} log_utils.log( 'Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG) except Exception as e: log_utils.log('Exception during 123Movies XML Parse: %s' % (e), log_utils.LOGWARNING) return sources
def __get_gk_links(self, html, page_url): sources = {} match = re.search('{link\s*:\s*"([^"]+)', html) if match: data = {'link': match.group(1)} url = urlparse.urljoin(self.base_url, LINK_URL) headers = {'Referer': page_url} html = self._http_get(url, data=data, headers=headers, cache_limit=.25) js_data = scraper_utils.parse_json(html, url) if 'link' in js_data: for link in js_data['link']: if 'type' in link and link[ 'type'] == 'mp4' and 'link' in link: if self._get_direct_hostname(link['link']) == 'gvideo': quality = scraper_utils.gv_get_quality( link['link']) elif 'label' in link: quality = scraper_utils.height_get_quality( link['label']) else: quality = QUALITIES.HIGH sources[link['link']] = quality return sources
def get_sources(self, video): source_url = self.get_url(video) hosters = [] sources = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=1) iframes = dom_parser2.parse_dom(html, 'iframe', req='src') for attrs, _content in iframes: iframe_url = attrs['src'] if 'docs.google.com' in iframe_url: sources = scraper_utils.parse_google(self, iframe_url) break else: iframe_url = scraper_utils.urljoin(self.base_url, iframe_url) html = self._http_get(iframe_url, cache_limit=1) iframes += dom_parser2.parse_dom(html, 'iframe', req='src') for source in sources: host = scraper_utils.get_direct_hostname(self, source) hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': scraper_utils.gv_get_quality(source), 'views': None, 'rating': None, 'url': source, 'direct': True } hosters.append(hoster) return hosters
def __grab_links(self, grab_url, query, referer): try: sources = {} query['mobile'] = '0' query.update(self.__get_token(query)) grab_url = grab_url + '?' + urllib.urlencode(query) headers = XHR headers['Referer'] = referer html = self._http_get(grab_url, headers=headers, cache_limit=.5) js_data = scraper_utils.parse_json(html, grab_url) if 'data' in js_data: for link in js_data['data']: stream_url = link['file'] if self._get_direct_hostname(stream_url) == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) elif 'label' in link: quality = scraper_utils.height_get_quality( link['label']) else: quality = QUALITIES.HIGH sources[stream_url] = {'direct': False, 'quality': quality} except Exception as e: log_utils.log('9Movies Link Parse Error: %s' % (e), log_utils.LOGWARNING) return sources
def __get_gk_links(self, html): sources = {} match = re.search('{link\s*:\s*"([^"]+)', html) if match: iframe_url = match.group(1) data = {'link': iframe_url} headers = {'Referer': iframe_url} html = self._http_get(self.gk_url, data=data, headers=headers, cache_limit=.5) js_data = scraper_utils.parse_json(html, self.gk_url) links = js_data.get('link', []) if isinstance(links, basestring): links = [{'link': links}] for link in links: stream_url = link['link'] if scraper_utils.get_direct_hostname(self, stream_url) == 'openload.co': quality = scraper_utils.gv_get_quality(stream_url) direct = True elif 'label' in link: quality = scraper_utils.height_get_quality(link['label']) direct = True else: quality = QUALITIES.HIGH direct = False sources[stream_url] = {'quality': quality, 'direct': direct} return sources
def __add_sources(self, sources, video, quality=QUALITIES.HIGH): hosters = [] for source in sources: if self._get_direct_hostname(source) == 'gvideo': host = self._get_direct_hostname(source) quality = scraper_utils.gv_get_quality(source) stream_url = source + '|User-Agent=%s' % ( scraper_utils.get_ua()) direct = True else: host = urlparse.urlparse(source).hostname quality = scraper_utils.get_quality(video, host, quality) stream_url = source direct = False hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct } hosters.append(hoster) return hosters
def __get_json_links(self, html, sub): hosters = [] js_data = scraper_utils.parse_json(html) if 'sources' in js_data: for source in js_data.get('sources', []): stream_url = source.get('file') if stream_url is None: continue host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) elif 'label' in source: quality = scraper_utils.height_get_quality(source['label']) else: quality = QUALITIES.HIGH hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True } hoster['subs'] = sub hosters.append(hoster) return hosters
def __get_links_from_xml(self, url, video, page_url, cookies): sources = {} try: headers = {'Referer': page_url} xml = self._http_get(url, cookies=cookies, headers=headers, cache_limit=.5) root = ET.fromstring(xml) for item in root.findall('.//item'): title = item.find('title').text if title and title.upper() == 'OOPS!': continue for source in item.findall('{http://www2.0123movies.com/ajax/movie_sources/'): stream_url = source.get('file') label = source.get('label') if scraper_utils.get_direct_hostname(self, stream_url) == 'openload': quality = scraper_utils.gv_get_quality(stream_url) elif label: quality = scraper_utils.height_get_quality(label) elif title: quality = scraper_utils.blog_get_quality(video, title, '') else: quality = scraper_utils.blog_get_quality(video, stream_url, '') sources[stream_url] = {'quality': quality, 'direct': True} logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG) except Exception as e: logger.log('Exception during 123Movies XML Parse: %s' % (e), log_utils.LOGWARNING) return sources
def __get_links_from_json2(self, url, page_url, video_type): sources = {} headers = {'Referer': page_url} headers.update(XHR) html = self._http_get(url, headers=headers, cache_limit=0) js_data = scraper_utils.parse_json(html, url) try: playlist = js_data.get('playlist', []) for source in playlist[0].get('sources', []): stream_url = source['file'] if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) elif 'label' in source: quality = scraper_utils.height_get_quality(source['label']) else: if video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(stream_url) else: meta = scraper_utils.parse_episode_link(stream_url) quality = scraper_utils.height_get_quality(meta['height']) sources[stream_url] = {'quality': quality, 'direct': True} logger.log( 'Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG) except Exception as e: logger.log('Exception during ymovies extract: %s' % (e), log_utils.LOGDEBUG) return sources
def __create_source(self, stream_url, height, page_url, subs=False, direct=True): if direct: stream_url = stream_url.replace('\\/', '/') if self.get_name().lower() in stream_url: headers = {'Referer': page_url} redir_url = self._http_get(stream_url, headers=headers, method='HEAD', allow_redirect=False, cache_limit=.25) if redir_url.startswith('http'): stream_url = redir_url stream_url += scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) else: stream_url += scraper_utils.append_headers({ 'User-Agent': scraper_utils.get_ua(), 'Referer': page_url, 'Cookie': self._get_stream_cookies() }) else: stream_url += scraper_utils.append_headers({ 'User-Agent': scraper_utils.get_ua(), 'Referer': page_url }) host = scraper_utils.get_direct_hostname(self, stream_url) else: host = urlparse.urlparse(stream_url).hostname if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) else: quality = scraper_utils.height_get_quality(height) hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct } if subs: hoster['subs'] = 'Turkish Subtitles' return hoster
def get_sources(self, video): hosters = [] sources = {} source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=.5) match = re.search("load_player\('([^']+)", html) if not match: return hosters headers = {'Referer': page_url, 'Server': 'cloudflare-nginx', 'Accept': 'text/html, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Formating': 'application/json, text/javascript', 'Accept-Encoding': 'gzip, deflate'} headers.update(XHR) params = {'id': match.group(1)} player_url = scraper_utils.urljoin(self.base_url, PLAYER_URL) html = self._http_get(player_url, params=params, headers=headers, cache_limit=1) js_data = scraper_utils.parse_json(html, player_url) pl_url = js_data.get('value') or js_data.get('download') if not pl_url: return hosters headers = {'Referer': page_url} if pl_url.startswith('//'): pl_url = 'https:' + pl_url html = self._http_get(pl_url, headers=headers, allow_redirect=False, cache_limit=0) if html.startswith('http'): streams = [(html, '')] else: js_data = scraper_utils.parse_json(html, pl_url) try: streams = [(source['file'], source.get('label', '')) for source in js_data['playlist'][0]['sources']] except: streams = [] for stream in streams: stream_url, label = stream if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': sources[stream_url] = {'quality': scraper_utils.gv_get_quality(stream_url), 'direct': True} else: if label: quality = scraper_utils.height_get_quality(label) else: quality = QUALITIES.HIGH sources[stream_url] = {'quality': quality, 'direct': False} for source, value in sources.iteritems(): direct = value['direct'] quality = value['quality'] if direct: host = scraper_utils.get_direct_hostname(self, source) else: host = urlparse.urlparse(source).hostname stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()}) hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct} hosters.append(hoster) return hosters
def __get_sources(self, html, label): sources = {} for attrs, _label in dom_parser2.parse_dom(html, 'source', req='src'): if scraper_utils.get_direct_hostname(self, attrs['src']) == 'gvideo': quality = scraper_utils.gv_get_quality(attrs['src']) else: quality = Q_MAP.get(label.upper(), QUALITIES.HIGH) sources[attrs['src']] = {'direct': False, 'quality': quality} return sources
def get_sources(self, video): source_url = self.get_url(video) hosters = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(url, cache_limit=10) hosts = [ r.content for r in dom_parser2.parse_dom( html, 'p', {'class': 'server_servername'}) ] links = [ r.content for r in dom_parser2.parse_dom(html, 'p', {'class': 'server_play'}) ] for host, link_frag in zip(hosts, links): stream_url = dom_parser2.parse_dom(link_frag, 'a', req='href') if not stream_url: continue stream_url = stream_url[0].attrs['href'] host = re.sub('^Server\s*', '', host, re.I) host = re.sub('\s*Link\s+\d+', '', host) if host.lower() == 'google': sources = self.__get_gvideo_links(stream_url) else: sources = [{'host': host, 'link': stream_url}] for source in sources: host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) stream_url = source['link'] + scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) direct = True else: stream_url = scraper_utils.pathify_url(source['link']) host = HOST_SUB.get(source['host'].lower(), source['host']) quality = scraper_utils.get_quality( video, host, QUALITIES.HIGH) direct = False hoster = { 'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct } hosters.append(hoster) return hosters
def __get_gk_links2(self, html): sources = {} match = re.search('base64\.decode\("([^"]+)', html, re.I) if match: match = re.search('proxy\.link=tunemovie\*([^&]+)', base64.b64decode(match.group(1))) if match: picasa_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY, match.group(1)) g_links = self._parse_google(picasa_url) for link in g_links: sources[link] = scraper_utils.gv_get_quality(link) return sources
def get_sources(self, video): source_url = self.get_url(video) sources = [] if not source_url or source_url == FORCE_NO_MATCH: return sources page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=8) for attrs, _content in dom_parser2.parse_dom(html, 'img', req=['data-id', 'data-name']): film_id, data_name = attrs['data-id'], attrs['data-name'] data = {'id': film_id, 'n': data_name} server_url = scraper_utils.urljoin(self.base_url, SERVER_URL) server_url = server_url % (film_id) headers = {'Referer': page_url} headers.update(XHR) html = self._http_get(server_url, data=data, headers=headers, cache_limit=.5) for attrs, _content in dom_parser2.parse_dom(html, 'a', req='data-id'): data = {'epid': attrs['data-id']} ep_url = scraper_utils.urljoin(self.base_url, EP_URL) ep_url = ep_url % (attrs['data-id']) headers = {'Referer': page_url} headers.update(XHR) html = self._http_get(ep_url, data=data, headers=headers, cache_limit=.5) js_data = scraper_utils.parse_json(html, ep_url) try: links = [r.attrs['src'] for r in dom_parser2.parse_dom(js_data['link']['embed'], 'iframe', req='src')] except: try: links = js_data['link']['l'] except: links = [] try: heights = js_data['link']['q'] except: heights = [] for stream_url, height in map(None, links, heights): match = re.search('movie_url=(.*)', stream_url) if match: stream_url = match.group(1) host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': page_url}) direct = True else: host = urlparse.urlparse(stream_url).hostname if height: quality = scraper_utils.height_get_quality(height) else: quality = QUALITIES.HD720 direct = False source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct} sources.append(source) return sources
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, require_debrid=True, cache_limit=.5) fragment = dom_parser2.parse_dom(html, 'table', {'class': 'links-table'}) if not fragment: return hosters for _attrs, row in dom_parser2.parse_dom(fragment[0].content, 'tr'): match = re.search( "playVideo\.bind\(.*?'([^']+)(?:[^>]*>){2}(.*?)</td>", row, re.DOTALL) if not match: continue stream_url, release = match.groups() if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo': sources = scraper_utils.parse_google(self, stream_url) else: sources = [stream_url] for source in sources: host = scraper_utils.get_direct_hostname(self, source) if host == 'gvideo': quality = scraper_utils.gv_get_quality(source) direct = True else: host = urlparse.urlparse(source).hostname if video.video_type == VIDEO_TYPES.MOVIE: meta = scraper_utils.parse_movie_link(release) else: meta = scraper_utils.parse_episode_link(release) base_quality = scraper_utils.height_get_quality( meta['height']) quality = scraper_utils.get_quality( video, host, base_quality) direct = False hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': source, 'direct': direct } hosters.append(hoster) return hosters
def __get_gk_links2(self, html): sources = {} match = re.search('proxy\.link=([^"&]+)', html) if match: proxy_link = match.group(1) proxy_link = proxy_link.split('*', 1)[-1] if len(proxy_link) <= 224: vid_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY1, proxy_link) else: vid_url = scraper_utils.gk_decrypt(self.get_name(), GK_KEY2, proxy_link) if scraper_utils.get_direct_hostname(self, vid_url) == 'openload.co': for source in self._parse_gdocs(vid_url): sources[source] = {'quality': scraper_utils.gv_get_quality(source), 'direct': True} return sources
def __get_sources(self, html): sources = scraper_utils.parse_sources_list(self, html) for source in dom_parser2.parse_dom( html, 'source', {'type': 'video/mp4'}, req='src') + dom_parser2.parse_dom(html, 'iframe', req='src'): source = source.attrs['src'] if scraper_utils.get_direct_hostname(self, source) == 'gvideo': quality = scraper_utils.gv_get_quality(source) direct = True else: quality = QUALITIES.HD720 direct = False sources[source] = {'quality': quality, 'direct': direct} return self.__proc_sources(sources)
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=.5) iframe_url = dom_parser2.parse_dom(html, 'iframe', {'id': 'myiframe'}, req='src', exclude_comments=True) if not iframe_url: return hosters iframe_url = iframe_url[0].attrs['src'] html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=.5) for source in dom_parser2.parse_dom(html, 'source', {'type': 'video/mp4'}, req=['src', 'data-res']): stream_url = source.attrs['src'] host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) stream_url += scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) else: quality = scraper_utils.height_get_quality( source.attrs['data-res']) stream_url += scraper_utils.append_headers({ 'User-Agent': scraper_utils.get_ua(), 'Referer': page_url }) source = { 'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True } hosters.append(source) return hosters
def __get_gk_links(self, html, page_url): sources = {} for link in dom_parser.parse_dom(html, 'div', {'class': '[^"]*server_line[^"]*'}): film_id = dom_parser.parse_dom(link, 'a', ret='data-film') name_id = dom_parser.parse_dom(link, 'a', ret='data-name') server_id = dom_parser.parse_dom(link, 'a', ret='data-server') if film_id and name_id and server_id: data = { 'ipplugins': 1, 'ip_film': film_id[0], 'ip_server': server_id[0], 'ip_name': name_id[0] } headers = XHR headers['Referer'] = page_url url = urlparse.urljoin(self.base_url, LINK_URL) html = self._http_get(url, data=data, headers=headers, cache_limit=.25) js_data = scraper_utils.parse_json(html, url) if 's' in js_data: url = urlparse.urljoin(self.base_url, LINK_URL2) params = {'u': js_data['s'], 'w': '100%', 'h': 420} html = self._http_get(url, params=params, data=data, headers=headers, cache_limit=.25) js_data = scraper_utils.parse_json(html, url) if 'data' in js_data and js_data['data']: if isinstance(js_data['data'], basestring): sources[js_data['data']] = QUALITIES.HIGH else: for link in js_data['data']: stream_url = link['files'] if self._get_direct_hostname( stream_url) == 'gvideo': quality = scraper_utils.gv_get_quality( stream_url) elif 'quality' in link: quality = scraper_utils.height_get_quality( link['quality']) else: quality = QUALITIES.HIGH sources[stream_url] = quality return sources
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=0) match = re.search('var\s*video_id\s*=\s*"([^"]+)', html) if not match: return hosters video_id = match.group(1) headers = {'Referer': page_url} headers.update(XHR) _html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0) vid_url = scraper_utils.urljoin(self.base_url, VIDEO_URL) html = self._http_get(vid_url, data={'v': video_id}, headers=headers, cache_limit=0) for source, value in scraper_utils.parse_json(html, vid_url).iteritems(): match = re.search('url=(.*)', value) if not match: continue stream_url = urllib.unquote(match.group(1)) host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) else: quality = scraper_utils.height_get_quality(source) stream_url += scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True } hosters.append(hoster) return hosters
def get_sources(self, video): source_url = self.get_url(video) hosters = [] sources = {} if source_url and source_url != FORCE_NO_MATCH: page_url = urlparse.urljoin(self.base_url, source_url) html = self._http_get(page_url, cache_limit=.5) html = self.__get_players(html, page_url) players = list(set(re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", html))) player_url = urlparse.urljoin(self.base_url, PLAYER_URL) for link_id, height in players: params = {'id': link_id, 'quality': height, '_': int(time.time() * 1000)} player_url2 = player_url + '?' + urllib.urlencode(params) headers = {'Referer': page_url, 'Accept-Encoding': 'gzip, deflate', 'Server': 'cloudflare-nginx', 'Accept-Formating': 'application/json, text/javascript'} headers.update(XHR) html = self._http_get(player_url2, headers=headers, cache_limit=0) js_data = scraper_utils.parse_json(html, player_url) if 'link' in js_data and js_data['link']: link_url = js_data['link'] if 'player_v2.php' in link_url: headers = {'Referer': page_url} html = self._http_get(link_url, headers=headers, allow_redirect=False, method='HEAD', cache_limit=.25) if html.startswith('http'): if self._get_direct_hostname(html) == 'gvideo': quality = scraper_utils.gv_get_quality(html) sources[html] = {'quality': quality, 'direct': True} else: if height != '0': quality = scraper_utils.height_get_quality(height) else: quality = QUALITIES.HIGH sources[html] = {'quality': quality, 'direct': False} if not kodi.get_setting('scraper_url') and Q_ORDER[quality] >= Q_ORDER[QUALITIES.HD720]: break for source in sources: direct = sources[source]['direct'] quality = sources[source]['quality'] if direct: host = self._get_direct_hostname(source) else: host = urlparse.urlparse(source).hostname stream_url = source + '|User-Agent=%s' % (scraper_utils.get_ua()) hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct} hosters.append(hoster) return hosters
def get_sources(self, video): source_url = self.get_url(video) hosters = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters page_url = scraper_utils.urljoin(self.base_url, source_url) if video.video_type == VIDEO_TYPES.EPISODE: html = self.__episode_match(video, source_url) sources = [ r.attrs['data-click'] for r in dom_parser2.parse_dom(html, 'div', req='data-click') + dom_parser2.parse_dom(html, 'li', req='data-click') ] else: sources = self.__get_movie_sources(page_url) sources = [source.strip() for source in sources if source] headers = {'Referer': page_url} for source in sources: if source.startswith('http'): direct = False quality = QUALITIES.HD720 host = urlparse.urlparse(source).hostname else: source = self.__get_linked_source(source, headers) if source is None: continue direct = True host = scraper_utils.get_direct_hostname(self, source) if host == 'gvideo': quality = scraper_utils.gv_get_quality(source) else: pass hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': source, 'direct': direct } hosters.append(hoster) return hosters
def get_sources(self, video): hosters = [] source_url = self.get_url(video) if not source_url or source_url == FORCE_NO_MATCH: return hosters url = scraper_utils.urljoin(self.base_url, source_url) html = self._http_get(url, cache_limit=8) fragment = dom_parser2.parse_dom(html, 'div', {'class': 'playex'}) if fragment: html = fragment[0].content links = scraper_utils.parse_sources_list(self, html) for link in links: stream_url = link if self.base_url in link: redir_url = self._http_get(link, headers={'Referer': url}, allow_redirect=False, method='HEAD') if redir_url.startswith('http'): stream_url = redir_url host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) else: quality = links[link]['quality'] stream_url += scraper_utils.append_headers({ 'User-Agent': scraper_utils.get_ua(), 'Referer': url }) source = { 'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True } hosters.append(source) return hosters
def get_sources(self, video): source_url = self.get_url(video) hosters = [] if not source_url or source_url == FORCE_NO_MATCH: return hosters js_url = scraper_utils.urljoin(self.base_url, '/javascript/movies.js') html = self._http_get(js_url, cache_limit=48) if source_url.startswith('/'): source_url = source_url[1:] pattern = '''getElementById\(\s*"%s".*?play\(\s*'([^']+)''' % ( source_url) match = re.search(pattern, html, re.I) if match: stream_url = match.group(1) if 'drive.google' in stream_url or 'docs.google' in stream_url: sources = scraper_utils.parse_google(self, stream_url) else: sources = [stream_url] for source in sources: stream_url = source + scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) host = scraper_utils.get_direct_hostname(self, source) if host == 'gvideo': quality = scraper_utils.gv_get_quality(source) direct = True elif 'youtube' in stream_url: quality = QUALITIES.HD720 direct = False host = 'youtube.com' else: quality = QUALITIES.HIGH direct = True hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct } hosters.append(hoster) return hosters
def __get_king_links(self, iframe_url): hosters = [] match = re.search('v=(.*)', iframe_url) if match: data = {'ID': match.group(1)} headers = {'Referer': iframe_url} headers.update(XHR) xhr_url = iframe_url.split('?')[0] html = self._http_get(xhr_url, params={'p': 'GetVideoSources'}, data=data, headers=headers, cache_limit=.5) js_data = scraper_utils.parse_json(html, xhr_url) try: for source in js_data['VideoSources']: stream_url = source['file'] + scraper_utils.append_headers( {'User-Agent': scraper_utils.get_ua()}) host = scraper_utils.get_direct_hostname( self, source['file']) label = source.get('label', '') if host == 'gvideo': quality = scraper_utils.gv_get_quality(source['file']) elif re.search('\d+p?', label): quality = scraper_utils.height_get_quality(label) else: quality = QUALITY_MAP.get(label, QUALITIES.HIGH) hoster = { 'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True, 'subs': 'Turkish Subtitles' } hosters.append(hoster) except: pass return hosters
def __get_ajax(self, html, page_url): sources = {} pattern = '\$\.ajax\(\s*"([^"]+)' match = re.search(pattern, html) if not match: return sources post_url = match.group(1) headers = {'Referer': page_url} html = self._http_get(post_url, headers=headers, cache_limit=.5) js_result = scraper_utils.parse_json(html, post_url) for key in js_result: stream_url = js_result[key] host = scraper_utils.get_direct_hostname(self, stream_url) if host == 'gvideo': quality = scraper_utils.gv_get_quality(stream_url) else: quality = scraper_utils.height_get_quality(key) sources[stream_url] = quality return sources