def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0]) headers = {'Referer': ref, 'User-Agent': client.randomagent()} result = client.request(url, headers=headers, post='') result = base64.decodestring(result) result = json.loads(result).get('playinfo', []) if isinstance(result, basestring): result = result.replace('embed.html', 'index.m3u8') base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '', result) r = client.request(result, headers=headers) r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i] r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r] r = [{'quality': i[0], 'url': base_url+i[1]} for i in r] for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False}) elif result: result = [i.get('link_mp4') for i in result] result = [i for i in result if i] for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources ref = urlparse.urljoin(self.base_link, url) url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0]) headers = {'Referer': ref, 'User-Agent': client.randomagent()} result = client.request(url, headers=headers, post='') result = base64.decodestring(result) result = json.loads(result).get('playinfo', []) if isinstance(result, basestring): result = result.replace('embed.html', 'index.m3u8') base_url = re.sub('index\.m3u8\?token=[\w\-]+', '', result) r = client.request(result, headers=headers) r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i] r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r] r = [{'quality': i[0], 'url': base_url+i[1]} for i in r] for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False}) elif result: result = [i.get('link_mp4') for i in result] result = [i for i in result if i] for i in result: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = ast.literal_eval(data['sources']) for i in data['sources']: try: token = str( self.__token({ 'id': i[0], 'update': '0', 'ts': data['ts'], 'server': i[1] })) query = (self.info_path % (data['ts'], token, i[0], i[1])) url = urlparse.urljoin(self.base_link, query) info_response = client.request( url, headers={'Referer': self.base_link}, XHR=True) info_dict = json.loads(info_response) if info_dict['type'] == 'direct': token64 = info_dict['params']['token'] query = (self.grabber_path % (data['ts'], i[0], self.__decode_shift(token64, -18))) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) grabber_dict = json.loads(response) if not grabber_dict['error'] == None: continue sources_list = grabber_dict['data'] for j in sources_list: try: quality = source_utils.label_to_quality( j['label']) link = j['file'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: pass elif info_dict['type'] == 'iframe': # embed = self.__decode_shift(info_dict['target'], -18) embed = info_dict['target'] valid, hoster = source_utils.is_host_valid( embed, hostDict) if not valid: continue headers = {'Referer': self.base_link} embed = embed + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': '720p', # need a better way of identifying quality 'language': 'en', 'url': embed, 'direct': False, 'debridonly': False }) except Exception: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) for i in data['sources']: token = str(self.__token( {'id': i, 'update': 0, 'ts': data['ts']})) query = (self.info_path % (data['ts'], token, i)) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, XHR=True) grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) sources_list = json.loads(response)['data'] for j in sources_list: quality = j['label'] if not j['label'] == '' else 'SD' quality = source_utils.label_to_quality(quality) if 'googleapis' in j['file']: sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False}) continue valid, hoster = source_utils.is_host_valid(j['file'], hostDict) urls, host, direct = source_utils.check_directstreams(j['file'], hoster) for x in urls: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False }) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target'] valid, hoster = source_utils.is_host_valid(url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams(url, hoster) url = urls[0]['url'] if 'cloud.to' in host: headers = { 'Referer': self.base_link } url = url + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = re.findall("[^', u\]\[]+", data['sources']) for i in data['sources']: token = str( self.__token({ 'id': i, 'update': 0, 'ts': data['ts'] })) query = (self.info_path % (data['ts'], token, i)) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, XHR=True) grabber_dict = json.loads(info_response) try: if grabber_dict['type'] == 'direct': token64 = grabber_dict['params']['token'] query = (self.grabber_path % (data['ts'], i, token64)) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) sources_list = json.loads(response)['data'] for j in sources_list: quality = j[ 'label'] if not j['label'] == '' else 'SD' quality = source_utils.label_to_quality(quality) if 'googleapis' in j['file']: sources.append({ 'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False }) continue valid, hoster = source_utils.is_host_valid( j['file'], hostDict) urls, host, direct = source_utils.check_directstreams( j['file'], hoster) for x in urls: sources.append({ 'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': True, 'debridonly': False }) elif not grabber_dict['target'] == '': url = 'https:' + grabber_dict[ 'target'] if not grabber_dict['target'].startswith( 'http') else grabber_dict['target'] valid, hoster = source_utils.is_host_valid( url, hostDict) if not valid: continue urls, host, direct = source_utils.check_directstreams( url, hoster) url = urls[0]['url'] if 'cloud.to' in host: headers = {'Referer': self.base_link} url = url + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return sources
def sources(self, url, hostDict, hostprDict): ''' Loops over site sources and returns a dictionary with corresponding file locker sources and information Keyword arguments: url -- string - url params Returns: sources -- string - a dictionary of source information ''' sources = [] try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) data['sources'] = ast.literal_eval(data['sources']) for i in data['sources']: try: token = str(self.__token( {'id': i[0], 'update': '0', 'ts': data['ts'], 'server': i[1]})) query = (self.info_path % (data['ts'], token, i[0], i[1])) url = urlparse.urljoin(self.base_link, query) info_response = client.request(url, headers={'Referer': self.base_link}, XHR=True) info_dict = json.loads(info_response) if info_dict['type'] == 'direct': token64 = info_dict['params']['token'] query = (self.grabber_path % (data['ts'], i[0], self.__decode_shift(token64, -18))) url = urlparse.urljoin(self.base_link, query) response = client.request(url, XHR=True) grabber_dict = json.loads(response) if not grabber_dict['error'] == None: continue sources_list = grabber_dict['data'] for j in sources_list: try: quality = source_utils.label_to_quality(j['label']) link = j['file'] if 'lh3.googleusercontent' in link: link = directstream.googleproxy(link) sources.append({ 'source': 'gvideo', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': True, 'debridonly': False }) except Exception: pass elif info_dict['type'] == 'iframe': # embed = self.__decode_shift(info_dict['target'], -18) embed = info_dict['target'] valid, hoster = source_utils.is_host_valid(embed, hostDict) if not valid: continue headers = { 'Referer': self.base_link } embed = embed + source_utils.append_headers(headers) sources.append({ 'source': hoster, 'quality': '720p', # need a better way of identifying quality 'language': 'en', 'url': embed, 'direct': False, 'debridonly': False }) except Exception: pass return sources except Exception: return sources