def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources links_m = [] TYPES_QUAL = {'SD':'480p', '3D SD':'480p', '3D FullHD':'1080p'} TYPES_RIP = {'SD':'BRRIP', '3D SD':'3D-BRRIP', '3D FullHD':'3D-BRRIP'} for data_j in url: try: file = data_j['file'] page = data_j['page'] poster = data_j['poster'] label = data_j['label'] sub_url = data_j['srt'] qual = '480p' riptype = 'BRRIP' if label in TYPES_QUAL.keys(): qual = TYPES_QUAL[label] riptype = TYPES_RIP[label] headers = {'Referer': page, 'User-Agent': self.user_agent} try: l = resolvers.createMeta(file, self.name, self.logo, qual, [], key, riptype, testing=testing, sub_url=sub_url, headers=headers, poster=poster) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass if testing == True and len(links_m) > 0: break except: pass for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources links_m = [] for data in url: try: links_m = resolvers.createMeta(data['link'], self.name, self.logo, data['qual'], links_m, key, poster=data['poster'], riptype=data['rip'], testing=testing) if testing == True and len(links_m) > 0: break except: pass for l in links_m: sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: #sources = [] links = [] if url == None: return links for link in url: if re.match('((?!\.part[0-9]).)*$', link['url'], flags=re.IGNORECASE) and '://' in link['url']: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( link['url'].strip().lower()).netloc)[0].split( '.')[0] scheme = urlparse.urlparse(link['url']).scheme #if host in hostDict and scheme: if scheme: if '1080' in link["url"] or '1080' in link['url']: quality = "1080p" elif '720' in link['title'] or '720' in link['url']: quality = 'HD' else: quality = 'SD' #sources.append({ 'source' : host, 'quality' : quality, 'provider': 'alluc', 'url': link['url'] }) links = resolvers.createMeta(link['url'], self.name, self.logo, quality, links, key, lang=link['lang']) if testing and len(links) > 0: break #return sources return links except Exception as e: control.log('ERROR ALLUC %s' % e) return links
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources links_m = [] data = self.flix.get_sources(url) for d in data: vidurl = d['url'] quality = d['quality'] poster = d['poster'] headers = {'Referer':url} try: if client.geturlhost(vidurl) not in self.avoidHosts: l = resolvers.createMeta(vidurl, self.name, self.logo, quality, [], key, poster=poster, testing=testing, headers=headers) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) if testing == True: break except Exception as e: log('ERROR', 'get_sources-0', '%s' % e, dolog=not testing) for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources log('INFO', 'get_sources-1A', url, dolog=False) #result = proxies.request(url, 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = cleantitle.asciiOnly(result) try: poster1 = client.parseDOM(result, 'div', attrs={'class': 'movie_thumb'})[0] poster = client.parseDOM(poster1, 'img', ret='src')[0] if 'www' not in poster: poster = 'http:%s' % poster except: poster = None loc = url.replace(self.base_link + '/', '') url = testjs(result, self.base_link, loc) vidtype = 'Movie' if 'season' in url: vidtype = 'Show' url = url.replace('=tv-', '=watch-').replace('/season', '&season') url = url.replace('season-', 'season=').replace('-episode-', '&episode=') log('INFO', 'get_sources-1B', url, dolog=False) result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = cleantitle.asciiOnly(result) links_m = [] trailers = [] if testing == False: try: matches = re.findall(r'\"(//www.youtube.*?)\"', result) for match in matches: try: #print match if 'youtube.com' in match and '"' not in match: match = match.replace('embed/', 'watch?v=') if 'http' not in match: match = 'http:%s' % match trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: try: l = resolvers.createMeta(trailer, self.name, self.logo, '720p', [], key, vidtype='Trailer', testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass links = client.parseDOM(result, 'tbody') try: riptypex = client.parseDOM(result, 'div', attrs={'class': 'warning_message'})[0] except: riptypex = 'BRRIP' c = 0 for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] url = urlparse.urljoin(self.base_link, url) #print url r = client.request(url) r = cleantitle.asciiOnly(r) links = client.parseDOM(r, 'script') p = False urls_p = [] for l in links: if 'eval' in l: unpacked_code = jsunpack.unpack(l) unpacked_code = jsunpack.unpack(unpacked_code) #print unpacked_code host = re.findall(r"var host=\\'(.*?)\\'", unpacked_code) hosted = re.findall(r"var hosted=\\'(.*?)\\'", unpacked_code)[0] loc = re.findall(r"var loc=\\'(.*?)\\'", unpacked_code)[0] if loc != None and len(loc) > 4: for h in host: if hosted in h: url = h + loc #print url urls_p.append(url) #url = re.findall(r"'(http.*)'",unpacked_code)[0] p = True break if p == False: raise c = c + 1 for url in urls_p: log('INFO', 'get_sources-2A-%s: %s' % (c, url), dolog=False) if 'http' not in url: raise Exception() for u in AVOID_DOMAINS: if u in url: raise Exception() quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': # quality_ts quality = '480p' riptype = 'CAM' elif quality == 'quality_dvd': quality = '720p' riptype = 'BRRIP' else: riptype = riptypex quality = '480p' try: l = resolvers.createMeta(url, self.name, self.logo, quality, [], key, vidtype=vidtype, poster=poster, riptype=riptype, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass except: pass for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = data['year'] aliases = eval(data['aliases']) #cookie = '; approve_search=yes' query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) log(type='INFO', method='get_sources', err='Searching - %s' % query, dolog=False, logToControl=False, doPrint=True) result = client.request(query) #, cookie=cookie) links_m = [] try: if 'episode' in data: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == data['season']][0] url = '%swatch' % url result = client.request(url) url = re.findall('a href=\"(.+?)\" class=\"btn-eps first-ep \">Episode %02d' % int(data['episode']), result)[0] else: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'), client.parseDOM(r, 'img', ret='data-original')) results = [(i[0], i[1], re.findall(r'images/(.*?)-', i[2])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0] except Exception as e: print e url = None pass if (url == None): url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0] url = urlparse.urljoin(url, 'watch') #url = client.request(url, output='geturl') if url == None: raise Exception() except Exception as e: raise Exception('Step 1 Failed: %s > %s' % (url,e)) url = url if 'http' in url else urlparse.urljoin(self.base_link, url) log(type='INFO', method='get_sources', err='Match found - %s' % url, dolog=False, logToControl=False, doPrint=True) result = client.request(url) try: poster = client.parseDOM(result, 'img', attrs={'itemprop':'image'}, ret='src')[0] except: poster = None Juicy = False ss = [] riptype = 'BRRIP' if testing == False: trailer_res = client.parseDOM(result, 'div', attrs={'class':'block-trailer'})[0] trailer_res = client.parseDOM(trailer_res, 'a', ret='href')[0] trailer_res = client.request(trailer_res) trailers = [] try: matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(trailer_res) for match in matches: try: if 'youtube.com' in match: match = match.replace('embed/','watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: try: l = resolvers.createMeta(trailer, self.name, self.logo, '720p', [], key, poster=poster, vidtype='Trailer', testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass if 'streamdor' in result and Juicy == True: src = re.findall('src\s*=\s*"(.*streamdor.co\/video\/\d+)"', result)[0] if src.startswith('//'): src = 'http:'+src episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0] p = client.request(src, referer=url) try: #log(type='INFO', method='get_sources', err='Juicy Code', dolog=False, logToControl=False, doPrint=True) p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"','', p) p = re.sub(r'[^A-Za-z0-9+\\/=]','', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') post = client.encodePostData({'id': episodeId}) p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True, timeout=60) js = json.loads(p2) tok = js['token'] quali = 'SD' try: quali = re.findall(r'label:"(.*?)"',p)[0] except: pass p = re.findall(r'var\s+episode=({[^}]+});',p)[0] js = json.loads(p) try: rtype = js['eName'] if '0p' in rtype.lower() or 'sd' in rtype.lower() or 'hd' in rtype.lower(): raise riptype = rtype except: pass if 'fileEmbed' in js and js['fileEmbed'] != '': ss.append([js['fileEmbed'], quali, riptype]) if 'filePlaylist' in js and js['filePlaylist'] != '': js_data = client.request('https://embed.streamdor.co/play/sources?hash=%s&token=%s'%(js['filePlaylist'],tok), referer=src, XHR=True) js = json.loads(js_data) m_srcs = js['playlist'][0]['sources'] if 'error' not in m_srcs: for m_src in m_srcs: ss.append([m_src['file'], m_src['label'], riptype]) if 'fileHLS' in js and js['fileHLS'] != '': ss.append(['https://hls.streamdor.co/%s%s'%(tok, js['fileHLS']), quali, riptype]) except Exception as e: raise Exception('Step 2 Failed: %s > %s' % (url,e)) else: #log(type='INFO', method='get_sources', err='Embed Code', dolog=False, logToControl=False, doPrint=True) div_s = client.parseDOM(result, 'div', attrs={'id': 'list-eps'})[0] pages = client.parseDOM(div_s, 'a', ret='href') #print pages quals = re.findall(r'>(.*?)</a>',div_s) #print quals c=0 for p in pages: try: p1 = client.request(p, referer=url) file_id = re.findall(r'load_player\.html\?e=(.*?)\"',p1)[0] file_loc = 'https://api.streamdor.co/episode/embed/%s' % file_id js_data = client.request(file_loc, referer=p) js = json.loads(js_data) m_srcs = js['embed'] try: rtype = quals[c] if '0p' in rtype.lower() or 'sd' in rtype.lower() or 'hd' in rtype.lower(): raise riptype = 'CAM' except: pass ss.append([m_srcs, file_quality(quals[c]), riptype]) c=c+1 except: pass for link in ss: #print link try: if 'google' in url: xs = client.googletag(url) for x in xs: try: l = resolvers.createMeta(x['url'], self.name, self.logo, x['quality'], [], key, riptype, poster=poster, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) if testing == True and len(links_m) > 0: break except: pass else: try: l = resolvers.createMeta(link[0], self.name, self.logo, link[1], [], key, link[2], poster=poster, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) if testing == True and len(links_m) > 0: break except: pass except: pass for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources urls = [] vidtype = 'Movie' if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'year' in data: year = data['year'] if 'season' in data: query = { 'keyword': '%s %s %s' % (title, 'season', data['season']) } else: query = {'keyword': title} search_url = urlparse.urljoin(self.base_link, '/search.html') search_url = search_url + '?' + urllib.urlencode(query) result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) r = client.parseDOM(result, 'div', attrs={'class': 'wrapper'}) try: r = r[1] except: raise Exception() r1 = client.parseDOM(r, 'figure') r2 = [] for res in r1: l = client.parseDOM(res, 'a', ret='href')[0] t = client.parseDOM(res, 'div', attrs={'class': 'title'})[0] r = (l, t) r2.append(r) r = r2 if 'season' in data: vidtype = 'Show' episode = int(data['episode']) r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [ i for i in url if cleantitle.get(title) in cleantitle.get(i[1]) ] url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] ep_url = [] for i in url: result = proxies.request( urlparse.urljoin(self.base_link, i[0]), headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) t = client.parseDOM(result, 'div', attrs={'class': 'eps'}) for tt in t: if 'watch' in tt: tt = client.parseDOM( tt, 'div', attrs={'class': 'server'})[0] section_links = client.parseDOM(tt, 'a', ret='href') for a_link in section_links: if episode < 100: f_key = '-episode-%02d-' % episode else: f_key = '-episode-%03d-' % episode if f_key in a_link: log('INFO', 'get_sources', 'episode url = %s' % a_link) ep_url.append(a_link) break for i in ep_url: urls.append(urlparse.urljoin(self.base_link, i)) else: for i in r: if cleantitle.get(title) in cleantitle.get(i[1]): urls.append( urlparse.urljoin(self.base_link, i[0])) except: urls == [self.base_link] links_m = [] page = None for url in urls: try: log('INFO', 'get_sources', 'url == %s' % url, dolog=False, doPrint=True) page_url = url page = result = proxies.request( url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) quality = '480p' type = 'BRRIP' atr = '' qtr = '' try: qtr = client.parseDOM(result, 'span', attrs={'class': 'quanlity'})[0] # q, t = cleantitle.getQuality(atr) # if q != None: # quality = q # type = t except: try: qtr = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0] # q, t = cleantitle.getQuality(atr) # if q != None: # quality = q # type = t except: pass try: quality = source_utils.check_sd_url(qtr) type = source_utils.check_sd_url_rip(qtr) except Exception as e: quality = '480p' type = 'BRRIP' try: atr = client.parseDOM(result, 'span', attrs={'class': 'year'})[0] except: atr = '' try: atr_release = client.parseDOM(result, 'div', attrs={'class': 'meta'})[1] except: atr_release = '' if 'season' in data: vidtype = 'Show' pass else: vidtype = 'Movie' resultx = result if str(int(year)) in atr else None if resultx == None: resultx = result if str( int(year)) in atr_release else None if resultx == None: raise Exception() try: poster = client.parseDOM(page, 'div', attrs={'class': 'detail-l'})[0] poster = client.parseDOM(poster, 'img', ret='src')[0] if 'http' not in poster: poster = 'http:' + poster except: poster = None #print result #r = client.parseDOM(result, 'article', attrs = {'class': 'player current'})[0] #r = client.parseDOM(r, 'iframe', ret='src')[0] #r = r.split('?') try: servers = re.findall(r'link_server_.*\"(.*)\";', page) servers = list(set(servers)) for server in servers: try: if 'http' not in server: server = 'http:' + server result = proxies.request( server, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) server = client.parseDOM(result, 'iframe', ret='src')[0] if len(server) > 0: if 'http' not in server: server = 'http:' + server l = resolvers.createMeta(server, self.name, self.logo, quality, [], key, poster=poster, riptype=type, vidtype=vidtype, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except Exception as e: pass if testing and len(links_m) > 0: break except Exception as e: pass try: servers = re.findall(r'link_server_.*\"(.*)\";', page) servers = list(set(servers)) for server in servers: if server != None: if 'http' not in server: server = 'http:' + server try: l = resolvers.createMeta(server, self.name, self.logo, quality, [], key, poster=poster, riptype=type, vidtype=vidtype, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass except: pass break except: pass for link in links_m: if link != None and 'key' in link.keys(): sources.append(link) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources myts = str(((int(time.time())/3600)*3600)) log('INFO','get_sources-1', 'url: %s' % url, dolog=False) token_error = False urls = [] sub_url = None page_url = None if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] try: year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] except: try: year = data['year'] except: year = None try: episode = data['episode'] except: pass query = {'keyword': title} search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) log('INFO','get_sources-2', '%s' % search_url, dolog=False) rs = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(rs, 'div', attrs = {'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r] if 'season' in data: r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] possible_hits = [] for i in r: if cleantitle.get(title).lower() == cleantitle.get(i[1]).lower(): possible_hits.append((i[0], [[i[1], u'1']])) #title += '%01d' % int(data['season']) url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] for i in possible_hits: url.append(i) url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] if len(url) == 0: url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])] if len(url) == 0: url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]+str(season))] else: url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])] if len(url) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources for urli in url: url = urli[0] url = urlparse.urljoin(self.base_link, url) urls.append(url) except Exception as e: raise Exception(e) vidtype = 'Movie' for url in urls: try: try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass log('INFO','get_sources-3', url, dolog=False) referer = url page_url = url result = resultT = proxies.request(url, headers=self.headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) if 'data-type="series"' not in result: raise Exception('Not a TV-Series') vidtype = 'Show' alina = client.parseDOM(result, 'title')[0] atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1] if 'season' in data: try: season = data['season'] except: pass years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '%s' % str(int(year) + int(season)), '%s' % str(int(year) - int(season))] mychk = False for y in years: if y in atr: mychk = True result = result if mychk == True else None if mychk == True: break else: result = result if year in atr else None if result != None: break except Exception as e: log('FAIL','get_sources-3', '%s : %s' % (url,e), dolog=False) if result == None: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources try: myts = re.findall(r'data-ts="(.*?)"', result)[0] except: log('INFO','get_sources-3', 'could not parse ts ! will use generated one : %s' % myts, dolog=False) trailers = [] links_m = [] if testing == False: try: matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(result) for match in matches: try: if 'youtube.com' in match: match = match.replace('embed/','watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) riptype = None try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = '480p' riptype = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = '720p' riptype = 'BRRIP' else: quality = '480p' riptype = 'BRRIP' result_servers = self.get_servers(url, proxy_options=proxy_options) result_servers = client.parseDOM(result_servers, 'ul', attrs = {'data-range-id':"0"}) #print result_servers #result_servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) result_servers = zip(client.parseDOM(result_servers, 'a', ret='data-id'), client.parseDOM(result_servers, 'a')) #print result_servers result_servers = [(i[0], re.findall('(\d+)', i[1])) for i in result_servers] servers = [(i[0], ''.join(i[1][:1])) for i in result_servers] try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)] except: pass for s in servers[:len(servers)]: try: video_url = None #quality = '360p' if '1080' in s[1]: quality = '1080p' #riptype = 'BRRIP' elif '720' in s[1] or 'hd' in s[1].lower(): quality = '720p' #riptype = 'BRRIP' elif '480' in s[1]: quality = '480p' #riptype = 'BRRIP' elif 'cam' in s[1].lower() or 'ts' in s[1].lower(): quality = '480p' #riptype = 'CAM' else: quality = '480p' #riptype = 'CAM' if video_url == None: headers = {'X-Requested-With': 'XMLHttpRequest'} hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'ts': myts, 'id': s[0], 'update': '0', 'server':'36'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) headers['Referer'] = urlparse.urljoin(url, s[0]) headers['Cookie'] = self.headers['Cookie'] log('INFO','get_sources-4.b', '%s' % hash_url, dolog=False) result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) result = json.loads(result) if 'error' in result and result['error'] == True: token_error = True query.update(self.__get_token(query, token_error=token_error)) hash_url = hash_url + '?' + urllib.urlencode(query) result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) result = json.loads(result) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query, token_error=token_error)) else: token_error = False queryx = {'id': s[0], 'update': '0'} query.update(self.__get_token(queryx)) url = url + '?' + urllib.urlencode(query) #result = client2.http_get(url, headers=headers) log('INFO','get_sources-5', result, dolog=False) if result['target'] != "": pass else: grabber = result['grabber'] grab_data = grabber grabber_url = urlparse.urljoin(self.base_link, self.grabber_api) if '?' in grabber: grab_data = grab_data.split('?') grabber_url = grab_data[0] grab_data = grab_data[1] grab_server = str(urlparse.parse_qs(grab_data)['server'][0]) b, resp = self.decode_t(result['params']['token'], -18) if b == False: raise Exception(resp) token = resp b, resp = self.decode_t(result['params']['options'], -18) if b == False: raise Exception(resp) options = resp grab_query = {'ts':myts, grabber_url:'','id':result['params']['id'],'server':grab_server,'mobile':'0','token':token,'options':options} tk = self.__get_token(grab_query, token_error) if tk == None: raise Exception('video token algo') grab_info = {'token':token,'options':options} del query['server'] query.update(grab_info) query.update(tk) sub_url = result['subtitle'] if sub_url==None or len(sub_url) == 0: sub_url = None if '?' in grabber: grabber += '&' + urllib.urlencode(query) else: grabber += '?' + urllib.urlencode(query) if grabber!=None and not grabber.startswith('http'): grabber = 'http:'+grabber log('INFO','get_sources-6', grabber, dolog=False) result = proxies.request(grabber, headers=headers, referer=url, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) result = json.loads(result) if 'data' in result.keys(): result = [i['file'] for i in result['data'] if 'file' in i] for i in result: video_url = i links_m = resolvers.createMeta(i, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing) else: target = result['target'] b, resp = self.decode_t(target, -18) if b == False: raise Exception(resp) target = resp sub_url = result['subtitle'] if sub_url==None or len(sub_url) == 0: sub_url = None if target!=None and not target.startswith('http'): target = 'http:' + target video_url = target links_m = resolvers.createMeta(target, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing) except Exception as e: log('FAIL', 'get_sources-7','%s' % e, dolog=False) try: if video_url == None and USE_PHANTOMJS == True and control.setting('use_phantomjs') != control.phantomjs_choices[0]: vx_url = '%s/%s' % (page_url,s[0]) log(type='INFO',method='get_sources-4.a.1', err=u'trying phantomjs method: %s' % vx_url) try: v_url, bool = phantomjs.decode(vx_url, js='fmovies.js') if bool == False: ret_error = v_url raise Exception(ret_error) else: video_url = v_url ret_error = '' log(type='SUCCESS',method='get_sources-4.a.2', err=u'*PhantomJS* method is working: %s' % vx_url) links_m = resolvers.createMeta(video_url, self.name, self.logo, quality, links_m, key, riptype, vidtype=vidtype, sub_url=sub_url, testing=testing) except: raise Exception('phantomjs not working') else: raise Exception('phantomjs is disabled') except Exception as e: log(type='FAIL',method='get_sources-4.a.3', err=u'%s' % e) sources += [l for l in links_m] if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources links_m = [] TYPES_QUAL = { 'SD': '480p', '3D SD': '480p', 'HD': '1080p', '3D FullHD': '1080p' } #TYPES_RIP = {'SD':'BRRIP', '3D SD':'3D-BRRIP', 'HD':'3D-BRRIP', '3D FullHD':'3D-BRRIP'} for data_j in url: try: file = data_j['file'] src_file = data_j['src_file'] page = data_j['page'] label = data_j['label'] sub_url = data_j['srt'] poster = data_j['poster'] qual = '480p' riptype = '3D-BRRIP' data_j['file'] = urlparse.urljoin(self.base_link, file) if label in TYPES_QUAL.keys(): qual = TYPES_QUAL[label] data_j['label'] = qual file_data = urllib.urlencode(data_j) links_m = resolvers.createMeta(file_data, self.name, self.logo, qual, links_m, key, riptype, testing=testing, sub_url=sub_url, urlhost=client.geturlhost( self.base_link), poster=poster) if testing == True and len(links_m) > 0: break except: pass for l in links_m: sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if url == None: return sources urls = [] if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) #print data title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'year' in data: year = data['year'] try: episode = data['episode'] except: pass query = {'keyword': title} search_url = urlparse.urljoin(self.base_link, '/search.html') search_url = search_url + '?' + urllib.urlencode(query) #print search_url result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) #self.log('GRABBER','get_sources-2', '%s' % search_url, dolog=False) r = client.parseDOM(result, 'div', attrs={'class': 'wrapper'}) try: r = r[1] except: raise Exception() #print r r1 = client.parseDOM(r, 'figure') r2 = [] for res in r1: l = client.parseDOM(res, 'a', ret='href')[0] t = client.parseDOM(res, 'div', attrs={'class': 'title'})[0] r = (l, t) r2.append(r) r = r2 #print data if 'season' in data: r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] #print r #title += '%01d' % int(data['season']) url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [ i for i in url if cleantitle.get(title) in cleantitle.get(i[1]) ] #for i in url: # print i[2],i[0],i[1] # print '%01d' % int(data['season']) == '%01d' % int(i[2]) url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] for i in url: urls.append(urlparse.urljoin(self.base_link, i[0])) else: for i in r: if cleantitle.get(title) in cleantitle.get(i[1]): urls.append( urlparse.urljoin(self.base_link, i[0])) except: urls == [self.base_link] links_m = [] #print urls page = None for url in urls: try: page = result = proxies.request( url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) quality = '480p' type = 'BRRIP' try: atr = client.parseDOM(result, 'span', attrs={'class': 'quanlity'})[0] q, t = cleantitle.getQuality(atr) if q != None: quality = q type = t except: pass try: atr = client.parseDOM(result, 'span', attrs={'class': 'year'})[0] except: atr = '' #print atr try: atr_release = client.parseDOM(result, 'div', attrs={'class': 'meta'})[1] except: atr_release = '' #print atr_release if 'season' in data: pass else: resultx = result if str(int(year)) in atr or str( int(year) + 1) in atr or str(int(year) - 1) in atr else None if resultx == None: resultx = result if str( int(year)) in atr_release or str( int(year) + 1) in atr_release or str( int(year) - 1) in atr_release else None if resultx == None: raise Exception() #print result r = client.parseDOM(result, 'article', attrs={'class': 'player current'})[0] result = client.parseDOM(r, 'iframe', ret='src')[0] result = result.split('?') #print result data = urlparse.parse_qs(result[1]) var_id = data['id'][0] var_x = data['x'][0] try: server_f1 = self.link_server_f1 % var_id result = proxies.request( server_f1, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) resultd = client.parseDOM(result, 'iframe', ret='src')[0] if 'http' not in resultd: resultd = 'http:' + resultd result = proxies.request( resultd, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) ss = client.parseDOM(result, 'source', ret='src') for s in ss: #print s if testing and len(links_m) > 0: break links_m = resolvers.createMeta( s, self.name, self.logo, quality, links_m, key) if testing and len(links_m) > 0: break except: pass try: server_f2 = self.link_server_f2 % var_id result = proxies.request( server_f2, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) resultd = client.parseDOM(result, 'iframe', ret='src')[0] if 'http' not in resultd: resultd = 'http:' + resultd result = proxies.request( resultd, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) ss = client.parseDOM(result, 'source', ret='src') for s in ss: #print s if testing and len(links_m) > 0: break links_m = resolvers.createMeta( s, self.name, self.logo, quality, links_m, key) if testing and len(links_m) > 0: break except: pass try: all_links = re.findall(r'link_server_.*\"(.*)\";', page) for a_link in all_links: if a_link != None and 'vidnode.net' not in a_link and 'fmovie.io' not in a_link: if 'http' not in a_link: a_link = 'http:' + a_link print a_link try: if testing and len(links_m) > 0: break links_m = resolvers.createMeta( a_link, self.name, self.logo, quality, links_m, key) except: pass except: pass except: pass #print links_m for link in links_m: sources.append(link) self.log('SUCCESS', 'get_sources', 'links : %s' % len(sources), dolog=testing) return sources except Exception as e: self.log('ERROR', 'get_sources', '%s' % e, dolog=testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] year = data['year'] aliases = eval(data['aliases']) #cookie = '; approve_search=yes' query = self.search_link % (urllib.quote_plus(title)) query = urlparse.urljoin(self.base_link, query) log(type='INFO', method='get_sources', err='Searching - %s' % query, dolog=False, logToControl=False, doPrint=True) result = client.request(query) #, cookie=cookie) links_m = [] try: if 'episode' in data: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title')) r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d+)', i[1])) for i in r] r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == data['season'] ][0] url = '%swatch' % url result = client.request(url) url = re.findall( 'a href=\"(.+?)\" class=\"btn-eps first-ep \">Episode %02d' % int(data['episode']), result)[0] else: r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'}) r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'), client.parseDOM(r, 'img', ret='data-original')) results = [(i[0], i[1], re.findall(r'images/(.*?)-', i[2])) for i in r] try: r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0] url = [ i[0] for i in r if self.matchAlias(i[1], aliases) and ( year == i[2]) ][0] except Exception as e: print e url = None pass if (url == None): url = [ i[0] for i in results if self.matchAlias(i[1], aliases) ][0] url = urlparse.urljoin(url, 'watch') #url = client.request(url, output='geturl') if url == None: raise Exception() except Exception as e: raise Exception('Step 1 Failed: %s > %s' % (url, e)) url = url if 'http' in url else urlparse.urljoin( self.base_link, url) result = client.request(url) try: poster = client.parseDOM(result, 'img', attrs={'itemprop': 'image'}, ret='src')[0] except: poster = None src = re.findall('src\s*=\s*"(.*streamdor.co\/video\/\d+)"', result)[0] if src.startswith('//'): src = 'http:' + src episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0] p = client.request(src, referer=url) riptype = 'BRRIP' try: p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0] p = re.sub(r'\"\s*\+\s*\"', '', p) p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p) p = base64.b64decode(p) p = jsunpack.unpack(p) p = unicode(p, 'utf-8') post = client.encodePostData({'id': episodeId}) p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src, XHR=True, timeout=60) js = json.loads(p2) tok = js['token'] quali = 'SD' try: quali = re.findall(r'label:"(.*?)"', p)[0] except: pass p = re.findall(r'var\s+episode=({[^}]+});', p)[0] js = json.loads(p) ss = [] try: rtype = js['eName'] if '0p' in rtype.lower() or 'sd' in rtype.lower( ) or 'hd' in rtype.lower(): raise riptype = rtype except: pass #print js #if 'eName' in js and js['eName'] != '': # quali = source_utils.label_to_quality(js['eName']) if 'fileEmbed' in js and js['fileEmbed'] != '': ss.append([js['fileEmbed'], quali]) if 'filePlaylist' in js and js['filePlaylist'] != '': js_data = client.request( 'https://embed.streamdor.co/play/sources?hash=%s&token=%s' % (js['filePlaylist'], tok), referer=src, XHR=True) js = json.loads(js_data) m_srcs = js['playlist'][0]['sources'] if 'error' not in m_srcs: for m_src in m_srcs: ss.append([m_src['file'], m_src['label']]) if 'fileHLS' in js and js['fileHLS'] != '': ss.append([ 'https://hls.streamdor.co/%s%s' % (tok, js['fileHLS']), quali ]) except Exception as e: raise Exception('Step 2 Failed: %s > %s' % (url, e)) for link in ss: try: if 'google' in url: xs = client.googletag(url) for x in xs: try: links_m = resolvers.createMeta(x['url'], self.name, self.logo, x['quality'], links_m, key, riptype, poster=poster, testing=testing) if testing == True and len(links_m) > 0: break except: pass else: try: links_m = resolvers.createMeta(link[0], self.name, self.logo, link[1], links_m, key, riptype, poster=poster, testing=testing) if testing == True and len(links_m) > 0: break except: pass except: pass for l in links_m: sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources year = None episode = None season = None log('INFO', 'get_sources-1', 'data-items: %s' % url, dolog=False) data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] try: year = re.findall( '(\d{4})', data['premiered'] )[0] if 'tvshowtitle' in data else data['year'] except: try: year = data['year'] except: year = None try: season = data['season'] except: pass try: episode = data['episode'] except: pass queries = [] if season != None: queries = [{ 'keyword': '%s %s' % (title, season) }, { 'keyword': title }] else: queries = [{ 'keyword': '%s %s' % (title, year) }, { 'keyword': title }] rs = [] for query in queries: search_url = urlparse.urljoin(self.base_link, '/search.html') search_url = search_url + '?' + urllib.urlencode(query) log('INFO', 'get_sources-2', 'search-url: %s' % search_url, dolog=False) result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) rs = client.parseDOM(result, 'ul', attrs={'class': 'listing items'}) if len(rs) > 0 and len(rs[0].strip()) > 4: break r = [(urlparse.urljoin(self.base_link, client.parseDOM(i, 'a', ret='href')[0]), client.parseDOM(i, 'div', attrs={'class': 'name'})) for i in rs] ux = None for s in r: ux = s[0] result = proxies.request(ux, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) rs = client.parseDOM(result, 'div', attrs={'class': 'watch infonation'})[0] rs = client.parseDOM(rs, 'ul', attrs={'class': 'three'})[0] if season != None: break if year != None and year in rs: break log('INFO', 'get_sources-3', 'match-page-url: %s' % ux, dolog=False) links_m = [] trailers = [] poster = None vidtype = 'Movie' if season != None: vidtype = 'Show' riptype = 'BRRIP' quality = '720p' sub_url = None try: poster1 = client.parseDOM(result, 'div', attrs={'class': 'picture'}) poster = client.parseDOM(poster1, 'img', ret='src')[0] except: pass links = client.parseDOM(result, 'li', attrs={'class': 'child_episode'}) try: if season == None: rip_qual = client.parseDOM(result, 'div', attrs={'id': 'info_movies'})[0] rip_qual = client.parseDOM(rip_qual, 'div', attrs={'class': 'right'})[0] rip_qual = client.parseDOM(rip_qual, 'a')[0].strip() rip_qual2 = ep_title = client.parseDOM(links[0], 'a', ret='title')[0] if 'HD' not in rip_qual and 'HD' not in rip_qual2: riptype = 'CAM' elif 'CAM' in rip_qual or 'CAM' in rip_qual2: riptype = 'CAM' if riptype == 'CAM': quality = '480p' if '720p' in rip_qual or '720p' in rip_qual2: quality = '720p' elif '1080p' in rip_qual or '1080p' in rip_qual2: quality = '1080p' except: pass mov_url = None for l in links: try: mov_urlx = urlparse.urljoin( self.base_link, client.parseDOM(l, 'a', ret='href')[0]) ep_title = client.parseDOM(l, 'a', ret='title')[0] if season == None: mov_url = mov_urlx else: try: ep_nr = re.findall(r'Episode (.*?) ', ep_title)[0] except: try: ep_nr = re.findall(r'Episode (.*?)-', ep_title)[0] except: try: ep_nr = re.findall(r'Episode (.*?):', ep_title)[0] except: ep_nr = re.findall(r'Episode (.*)', ep_title)[0] ep_nr = ep_nr.replace('-', '').replace(':', '').replace(' ', '') ep_nr = filter(lambda x: x.isdigit(), ep_nr) if int(episode) == int(ep_nr): mov_url = mov_urlx except Exception as e: log('FAIL', 'get_sources-4-A', '%s: %s' % (title, e), dolog=False) if mov_url == None: raise Exception('No match found !') if season == None: log('INFO', 'get_sources-4', 'movie-page-url: %s' % mov_url, dolog=False) else: log('INFO', 'get_sources-4', 'show-episode-url: %s' % mov_url, dolog=False) page_url = mov_url result = proxies.request(mov_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) try: sub_url = re.findall(r'\"(.*vtt)\"', result)[0] except: pass if testing == False: try: matches = re.compile( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' ).findall(result) for match in matches: try: if 'youtube.com' in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: try: l = resolvers.createMeta(trailer, self.name, self.logo, '720p', [], key, poster=poster, vidtype='Trailer', testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass links = client.parseDOM(result, 'div', attrs={'class': 'anime_muti_link'}) links = client.parseDOM(links, 'li', ret='data-video') video_urls = [] for l in links: if 'http' not in l: l = 'http:' + l video_urls.append(l) for video_url in video_urls: try: l = resolvers.createMeta(video_url, self.name, self.logo, quality, [], key, poster=poster, riptype=riptype, vidtype=vidtype, sub_url=sub_url, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources links_m = [] page_url = url r = client.request(url) try: match = re.compile( '<li><a href="(.+?)" rel="nofollow">(.+?)<').findall(r) for url, check in match: quality = '720p' poster = None riptype = 'BRRIP' vidtype = 'Show' sub_url = None l = resolvers.createMeta(url, self.name, self.logo, quality, [], key, poster=poster, riptype=riptype, vidtype=vidtype, sub_url=sub_url, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: return for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources url = urlparse.urljoin(self.base_link, url) #r = client.request(url) req = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = client.parseDOM(req, 'iframe', ret='src') try: r2 = re.findall('data-video=\"(.*?)\"', req) for r2_i in r2: r.append(r2_i) except: pass links = [] for u in r: try: if 'http' not in u: u = 'http:' + u if u.startswith('http') == True: if 'vidstreaming' in u: #url = client.request(u) url = proxies.request(u, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) url = client.parseDOM(url, 'source', ret='src') else: url = [u] for i in url: #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) #except: pass try: qualityt = client.googletag(i)[0]['quality'] except: qualityt = u'720p' try: links = resolvers.createMeta(i, self.name, self.logo, qualityt, links, key, vidtype='Show', testing=testing) except: pass except: pass for i in links: sources.append(i) if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] #print "PRIMEWIRE get_sources %s" % url if url == None: return sources url = urlparse.urljoin(self.base_link, url) #result = proxies.request(url, 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) links_m = [] trailers = [] if testing == False: try: matches = re.findall(r'\"(http[s]?://www.youtube.*?)\"', result) for match in matches: try: #print match if 'youtube.com' in match and '"' not in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer') links = client.parseDOM(result, 'tbody') for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] try: url = urlparse.parse_qs( urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs( urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.parse_qs( urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if 'http' not in url: raise Exception() for u in AVOID_DOMAINS: if u in url: raise Exception() quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM' elif quality == 'quality_dvd': quality = 'SD' else: raise Exception() #print "%s --- %s" % (self.name,url) links_m = resolvers.createMeta(url, self.name, self.logo, quality, links_m, key) sources += [l for l in links_m] if testing and len(sources) > 0: break except: pass self.log('SUCCESS', 'get_sources', 'links : %s' % len(sources), dolog=testing) return sources except Exception as e: self.log('ERROR', 'get_sources', '%s' % e, dolog=testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources url_arr=[] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'episode' in data and 'season' in data: url0 = (data['title'].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() + "/s%s/e%s" % (data['season'],data['episode']) url_arr.append(url0) else: url1 = (data['title'].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() url2 = (data['title'].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() + "-%s" % (data['year']) url_arr.append(url1) url_arr.append(url2) try: title = data['title'] title = title.split(':') title = title[0] url3 = (title.translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() url_arr.append(url3) except: pass if 'episode' in data and 'season' in data: try: url1 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() + "/s%s/e%s" % (data['season'],data['episode']) url_arr.append(url1) except: pass else: try: url4 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower() url5 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,.')).replace(' ', '-').replace('--', '-').lower()+ "-%s" % (data['year']) url_arr.append(url4) url_arr.append(url5) except: pass url_arr_t = [] for u in url_arr: u = u.replace('--', '-') url_arr_t.append(u) url_arr = list(set(url_arr_t)) links_m = [] for url in url_arr: try: #print url url = urlparse.urljoin(self.base_link, self.watch_link % url) #print url r = proxies.request(url, output='geturl', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) #print r if r == None: raise Exception() r = result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) #print "resp ===== %s" % r quality = '720p' r = re.sub(r'[^\x00-\x7F]+',' ', r) if 'episode' not in data or 'season' not in data: y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r) y = y[0] if len(y) > 0 else None #print y if ('year' in data and y != None and data['year'] != y): #print 'year not found' raise Exception() q = client.parseDOM(r, 'title') q = q[0] if len(q) > 0 else None quality = '1080p' if ' 1080' in q else '720p' sub_url = None try: sub_url = urlparse.urljoin(self.base_link, re.findall('\"(\/subs.*?.srt)\"', result)[0]) except: pass try: poster = urlparse.urljoin(self.base_link, client.parseDOM(result, 'img', ret='src', attrs = {'id': 'nameimage'})[0]) except: poster = None #print quality #r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0] #r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'}) r_orig = r try: r = client.parseDOM(r_orig, 'div', attrs = {'id': '1strow'})[0] #print r r = client.parseDOM(r, 'a', ret='href', attrs = {'id': 'dm1'})[0] #print r l = resolvers.createMeta(r, self.name, self.logo, quality, [], key, poster=poster, vidtype='Movie', sub_url=sub_url, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except Exception as e: log('FAIL', 'get_sources-1A', e , dolog=False) try: r = client.parseDOM(r_orig, 'div', attrs = {'id': 'n1strow'})[0] #print r r = client.parseDOM(r, 'a', ret='href', attrs = {'id': 'mega'})[0] #print r l = resolvers.createMeta(r, self.name, self.logo, quality, [], key, poster=poster, vidtype='Movie', sub_url=sub_url, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except Exception as e: log('FAIL', 'get_sources-1B', e , dolog=False) try: r = self.returnFinalLink(url) if r != None: l = resolvers.createMeta(r, self.name, self.logo, quality, [], key, poster=poster, vidtype='Movie', sub_url=sub_url, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except Exception as e: log('FAIL', 'get_sources-2', e , dolog=False) try: r = client.parseDOM(result, 'iframe', ret='src') r2 = [i for i in r if 'g2g' in i or 'ytid' in i] #print r2 for r in r2: try: if 'http' not in r and self.urlhost in r: r = 'http:' + r elif 'http' not in r: r = self.base_link + r #print r r = proxies.request(r, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = re.sub(r'[^\x00-\x7F]+',' ', r) r = client.parseDOM(r, 'iframe', ret='src')[0] part2=False if '.php' in r: r = self.base_link + r rx = r.replace('.php','2.php') r = proxies.request(r, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = re.sub(r'[^\x00-\x7F]+',' ', r) r = client.parseDOM(r, 'iframe', ret='src')[0] try: rx = proxies.request(rx, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) rx = re.sub(r'[^\x00-\x7F]+',' ', rx) rx = client.parseDOM(rx, 'iframe', ret='src')[0] if 'http' not in rx: rx = 'http:' + rx part2=True except: pass if 'http' not in r: r = 'http:' + r #print r if 'youtube' in r: vidtype = 'Trailer' qualityt = '720p' r = r.replace('?showinfo=0','') else: vidtype = 'Movie' qualityt = quality if part2: #print '2-part video' l = resolvers.createMeta(r, self.name, self.logo, qualityt, [], key, poster=poster, vidtype=vidtype, txt='Part-1', sub_url=sub_url, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) l = resolvers.createMeta(rx, self.name, self.logo, qualityt, [], key, poster=poster, vidtype=vidtype, txt='Part-2', sub_url=sub_url, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) else: l = resolvers.createMeta(r, self.name, self.logo, qualityt, [], key, poster=poster, vidtype=vidtype, sub_url=sub_url, testing=testing) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: pass except Exception as e: log('FAIL', 'get_sources-3', e , dolog=False) except Exception as e: log('FAIL', 'get_sources-3.1', e , dolog=False) for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if url == None: log('FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources urls = [] if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data[ 'tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] if 'year' in data: year = data['year'] try: episode = data['episode'] except: pass query = {'keyword': title} search_url = urlparse.urljoin(self.base_link, '/search.html') search_url = search_url + '?' + urllib.urlencode(query) result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) r = client.parseDOM(result, 'div', attrs={'class': 'wrapper'}) try: r = r[1] except: raise Exception() r1 = client.parseDOM(r, 'figure') r2 = [] for res in r1: l = client.parseDOM(res, 'a', ret='href')[0] t = client.parseDOM(res, 'div', attrs={'class': 'title'})[0] r = (l, t) r2.append(r) r = r2 if 'season' in data: r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] #print r #title += '%01d' % int(data['season']) url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [ i for i in url if cleantitle.get(title) in cleantitle.get(i[1]) ] #for i in url: # print i[2],i[0],i[1] # print '%01d' % int(data['season']) == '%01d' % int(i[2]) url = [ i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2]) ] for i in url: urls.append(urlparse.urljoin(self.base_link, i[0])) else: for i in r: if cleantitle.get(title) in cleantitle.get(i[1]): urls.append( urlparse.urljoin(self.base_link, i[0])) except: urls == [self.base_link] links_m = [] page = None for url in urls: try: page = result = proxies.request( url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) quality = '480p' type = 'BRRIP' try: atr = client.parseDOM(result, 'span', attrs={'class': 'quanlity'})[0] q, t = cleantitle.getQuality(atr) if q != None: quality = q type = t except: try: atr = client.parseDOM(result, 'span', attrs={'class': 'quality'})[0] q, t = cleantitle.getQuality(atr) if q != None: quality = q type = t except: pass try: atr = client.parseDOM(result, 'span', attrs={'class': 'year'})[0] except: atr = '' try: atr_release = client.parseDOM(result, 'div', attrs={'class': 'meta'})[1] except: atr_release = '' if 'season' in data: pass else: resultx = result if str(int(year)) in atr or str( int(year) + 1) in atr or str(int(year) - 1) in atr else None if resultx == None: resultx = result if str( int(year)) in atr_release or str( int(year) + 1) in atr_release or str( int(year) - 1) in atr_release else None if resultx == None: raise Exception() #print result #r = client.parseDOM(result, 'article', attrs = {'class': 'player current'})[0] #r = client.parseDOM(r, 'iframe', ret='src')[0] #r = r.split('?') #print r try: servers = re.findall(r'link_server_.*\"(.*)\";', page) for server in servers: try: if 'http' not in server: server = 'http:' + server result = proxies.request( server, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) server = client.parseDOM(result, 'iframe', ret='src')[0] if 'http' not in server: server = 'http:' + server links_m = resolvers.createMeta(server, self.name, self.logo, quality, links_m, key, testing=testing) except Exception as e: pass if testing and len(links_m) > 0: break except Exception as e: pass try: servers = re.findall(r'link_server_.*\'(.*)\';', page) for server in servers: if server != None: if 'http' not in server: server = 'http:' + server try: links_m = resolvers.createMeta( server, self.name, self.logo, quality, links_m, key, testing=testing) except: pass except: pass except: pass for link in links_m: sources.append(link) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources url = urlparse.urljoin(self.base_link, url) #result = proxies.request(url, 'choose_tabs', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) links_m = [] trailers = [] if testing == False: try: matches = re.findall(r'\"(http[s]?://www.youtube.*?)\"', result) for match in matches: try: #print match if 'youtube.com' in match and '"' not in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) links = client.parseDOM(result, 'tbody') try: riptypex = client.parseDOM(result, 'div', attrs={'class': 'warning_message'})[0] except: riptypex = 'BRRIP' for i in links: try: url = client.parseDOM(i, 'a', ret='href')[0] try: url = urlparse.parse_qs( urlparse.urlparse(url).query)['u'][0] except: pass try: url = urlparse.parse_qs( urlparse.urlparse(url).query)['q'][0] except: pass url = urlparse.parse_qs( urlparse.urlparse(url).query)['url'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') if 'http' not in url: raise Exception() for u in AVOID_DOMAINS: if u in url: raise Exception() quality = client.parseDOM(i, 'span', ret='class')[0] if quality == 'quality_cam' or quality == 'quality_ts': # quality_ts quality = '480p' riptype = 'CAM' elif quality == 'quality_dvd': quality = '720p' riptype = 'BRRIP' else: riptype = riptypex quality = '480p' links_m = resolvers.createMeta(url, self.name, self.logo, quality, links_m, key, riptype=riptype, testing=testing) except: pass for l in links_m: sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources processed = [] for link in url: if re.match( '((?!\.part[0-9]).)*$', link['url'], flags=re.IGNORECASE ) and '://' in link['url'] and link['url'] not in processed: host = re.findall( '([\w]+[.][\w]+)$', urlparse.urlparse( link['url'].strip().lower()).netloc)[0].split( '.')[0] scheme = urlparse.urlparse(link['url']).scheme #if host in hostDict and scheme: if scheme: if '1080' in link['title'] or '1080' in link['url']: quality = '1080p' elif '720' in link['title'] or '720' in link['url']: quality = 'HD' else: quality = 'SD' file_ext = '.mp4' if len(link['ext']) > 0 and len( link['ext']) < 4 and len(link['src']) > 0: txt = '%s (.%s)' % (link['src'], link['ext']) file_ext = '.%s' % link['ext'] elif len(link['ext']) > 0 and len( link['ext']) < 4 and len(link['src']) == 0: txt = '%s (.%s)' % (link['src'], link['ext']) file_ext = '.%s' % link['ext'] elif (len(link['ext']) == 0 or len(link['ext']) > 3) and len(link['src']) > 0: txt = '%s' % link['src'] else: txt = '' if 'trailer' in link['title'].lower(): sources = resolvers.createMeta(link['url'], self.name, self.logo, quality, sources, key, lang=link['lang'], txt=txt, file_ext=file_ext, vidtype='Trailer', testing=testing) else: sources = resolvers.createMeta(link['url'], self.name, self.logo, quality, sources, key, lang=link['lang'], txt=txt, file_ext=file_ext, testing=testing) processed.append(link['url']) if self.fetchedtoday > 0: self.msg = 'Fetched today: %s' % str(self.fetchedtoday) log('INFO', 'get_sources', self.msg, dolog=not testing) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def createMeta(self, url, provider, logo, quality, links, key, riptype, vidtype='Movie', lang='en', sub_url=None, txt='', file_ext='.mp4', testing=False, poster=None, headers=None, page_url=None): files_ret = [] orig_url = url if testing == True: links.append(url) return links if control.setting('Host-%s' % name) == False: log('INFO', 'createMeta', 'Host Disabled by User') return links try: if 'vidcloud.icu/load' in url: raise Exception('No mp4 Video found') elif 'vidcloud.icu/download' in url: headersx = {'Referer': url, 'User-Agent': client.agent()} page_data, head, ret, cookie = client.request( url, output='extended', headers=headersx) try: cookie = re.findall(r'Set-Cookie:(.*)', str(ret), re.MULTILINE)[0].strip() except: pass headersx['Cookie'] = cookie mp4_vids = re.findall(r'\"(http.*?.mp4.*?)\"', page_data) items = [] for u in mp4_vids: u = u.strip().replace(' ', '%20').replace('&', '&') fs = client.getFileSize(u, headers=headersx) q = qual_based_on_fs(quality, fs) online = check(u, headers=headersx) urldata = client.b64encode(json.dumps('', encoding='utf-8')) params = client.b64encode(json.dumps('', encoding='utf-8')) if headersx != None: paramsx = {'headers': headers} params = client.b64encode( json.dumps(paramsx, encoding='utf-8')) items.append({ 'quality': q, 'riptype': riptype, 'src': u, 'fs': fs, 'online': online, 'params': params, 'urldata': urldata, 'allowsStreaming': False }) seq = 0 for item in items: durl = url vidurl = item['src'] allowsStreaming = item['allowsStreaming'] quality = item['quality'] riptype = item['riptype'] fs = item['fs'] online = item['online'] params = item['params'] urldata = item['urldata'] try: log(type='INFO', method='createMeta', err=u'durl:%s ; res:%s; fs:%s' % (durl, quality, fs)) files_ret.append({ 'source': self.name, 'maininfo': txt, 'titleinfo': '', 'quality': quality, 'vidtype': vidtype, 'rip': riptype, 'provider': provider, 'orig_url': orig_url, 'url': vidurl, 'durl': durl, 'urldata': urldata, 'params': params, 'logo': logo, 'online': online, 'allowsDownload': self.allowsDownload, 'resumeDownload': self.resumeDownload, 'allowsStreaming': allowsStreaming, 'key': key, 'enabled': True, 'fs': int(fs), 'file_ext': file_ext, 'ts': time.time(), 'lang': lang, 'sub_url': sub_url, 'poster': poster, 'subdomain': client.geturlhost(url), 'page_url': page_url, 'misc': { 'player': 'iplayer', 'gp': True }, 'seq': seq }) except Exception as e: log(type='ERROR', method='createMeta', err=u'%s' % e) files_ret.append({ 'source': urlhost, 'maininfo': txt, 'titleinfo': '', 'quality': quality, 'vidtype': vidtype, 'rip': 'Unknown', 'provider': provider, 'orig_url': orig_url, 'url': vidurl, 'durl': durl, 'urldata': urldata, 'params': params, 'logo': logo, 'online': online, 'allowsDownload': self.allowsDownload, 'resumeDownload': self.resumeDownload, 'allowsStreaming': allowsStreaming, 'key': key, 'enabled': True, 'fs': int(fs), 'file_ext': file_ext, 'ts': time.time(), 'lang': lang, 'sub_url': sub_url, 'poster': poster, 'subdomain': client.geturlhost(url), 'page_url': page_url, 'misc': { 'player': 'iplayer', 'gp': True }, 'seq': seq }) seq += 1 elif url != None: online = True result = client.request(orig_url, httpsskip=True) if 'Sorry, this video reuploading' in result: online = False if online == True: vids = client.parseDOM( result, 'ul', attrs={'class': 'list-server-items'})[0] vids = client.parseDOM(vids, 'li', attrs={'class': 'linkserver'}, ret='data-video') vids = list(set(vids)) for video_url in vids: video_urlx = video_url if 'http' not in video_urlx: video_urlx = 'http:' + video_urlx if video_urlx != None and 'vidcloud.icu/load' not in video_urlx: log(type='INFO', method='createMeta', err=u'url:%s requires additional processing' % video_urlx) video_url1 = '%s' % client.request( video_urlx, followredirect=True, httpsskip=True, output='geturl') if video_url1 != None and 'http' in video_url1 and 'vidcloud.icu' not in video_url1: try: files_ret = resolvers.createMeta( video_url1, provider, logo, quality, files_ret, key, poster=poster, riptype=riptype, vidtype=vidtype, sub_url=sub_url, testing=testing, headers=headers, page_url=page_url) except Exception as e: log(type='ERROR', method='createMeta', err=u'%s' % e) elif video_urlx != None and 'vidcloud.icu/load' in video_urlx: log(type='INFO', method='createMeta', err=u'url:%s requires additional processing' % video_urlx) id = re.findall(r'id=(.*?)&', video_urlx)[0] u = 'https://vidcloud.icu/download?id=%s' % id res = client.request(u) mp4_vids = re.findall(r'http.*?mp4', res) if len(mp4_vids) > 0: try: files_ret = resolvers.createMeta( u, provider, logo, quality, files_ret, key, poster=poster, riptype=riptype, vidtype=vidtype, sub_url=sub_url, testing=testing, headers=headers, page_url=page_url, urlhost='vidcloud.icu') except Exception as e: log(type='ERROR', method='createMeta', err=u'%s' % e) elif len(mp4_vids) == 0 and video_url == vids[ len(vids) - 1] and len(files_ret) == 0: raise Exception('No mp4 Video found') except Exception as e: log('FAIL', 'createMeta', '%s' % e) for fr in files_ret: if fr != None and 'key' in fr.keys(): control.setPartialSource(fr, self.name) links.append(fr) if len(files_ret) > 0: log('SUCCESS', 'createMeta', 'Successfully processed %s link >>> %s' % (provider, orig_url), dolog=self.init) else: log('FAIL', 'createMeta', 'Failed in processing %s link >>> %s' % (provider, orig_url), dolog=self.init) log('INFO', 'createMeta', 'Completed', dolog=self.init) return links
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources # get IMDb item page result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = client.parseDOM(result, 'div', attrs={'class': 'aux-content-widget-3'})[1] # get types of videos available types = { 'content_type-trailer': 'Trailer', 'content_type-clip': 'Clip', 'content_type-interview': 'Interviews', 'content_type-other': 'Misc.', 'content_type-featurette': 'Featurette' } re_map_types = { 'Featurette': 'Featurette', 'Clip': 'Trailer', 'Trailer': 'Trailer', 'Interviews': 'Interviews', 'Misc.': 'Misc.' } r1 = client.parseDOM(r, 'a', ret='href') types_map = {} for r1_url in r1: type = 'Trailer' for t in types.keys(): if t in r1_url: type = types[t] break if type not in types_map.keys(): types_map[type] = [] result_r1 = proxies.request(urlparse.urljoin( self.base_link, r1_url), proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r2 = client.parseDOM(result_r1, 'div', attrs={'class': 'search-results'})[0] r2a = client.parseDOM(r2, 'a', ret='href') for r2a1 in r2a: if 'ref_' in r2a1: types_map[type].append( urlparse.urljoin(self.base_link, r2a1)) links = [] quality = u'720p' selection_map = {} for vidtype in types_map.keys(): page_links = types_map[vidtype] for page_link in page_links: try: res = proxies.request(page_link, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) vidurls = re.findall(r'encodings\":(.*?\])', res)[0] poster = client.parseDOM(res, 'meta', attrs={'itemprop': 'image'}, ret='content')[0] vidurls_json = json.loads(vidurls) txt = re.findall(r'<title>(.*?)</title>', res)[0] txt = txt.replace('"', '') for viddata in vidurls_json: try: vidurl = viddata['videoUrl'] if '.mp4' in vidurl: if txt not in selection_map.keys(): selection_map[txt] = {} quality = viddata['definition'] vidtype = re_map_types[vidtype] try: l = resolvers.createMeta( vidurl, self.name, self.logo, quality, [], key, vidtype=vidtype, testing=testing, txt=txt, poster=poster) l = l[0] if l['quality'] in selection_map[ txt].keys(): selection_map[txt][ l['quality']].append({ 'fs': int(l['fs']), 'src': l }) else: selection_map[txt][ l['quality']] = [{ 'fs': int(l['fs']), 'src': l }] if testing == True: links.append(l) break except Exception as e: log('ERROR', 'get_sources-0', '%s' % e, dolog=not testing) except Exception as e: log('ERROR', 'get_sources-1', '%s' % e, dolog=not testing) except Exception as e: log('ERROR', 'get_sources-2', '%s' % e, dolog=not testing) if testing == True and len(links) > 0: break if testing == True and len(links) > 0: break #print selection_map for sel_titles in selection_map.keys(): for sel in selection_map[sel_titles].keys(): qls = selection_map[sel_titles][sel] files = sorted(qls, key=lambda k: k['fs'], reverse=True) file = files[0] links.append(file['src']) for link in links: if link != None and 'key' in link.keys(): sources.append(link) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if url == None or self.testparser == False: return sources myts = str(((int(time.time())/3600)*3600)) #self.log('GRABBER','get_sources-1', '%s' % url, dolog=False) if not str(url).startswith('http'): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] try: episode = data['episode'] except: pass query = {'keyword': title} search_url = urlparse.urljoin(self.base_link, '/search') search_url = search_url + '?' + urllib.urlencode(query) result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) #self.log('GRABBER','get_sources-2', '%s' % search_url, dolog=False) #print result r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0] r = client.parseDOM(r, 'div', attrs = {'class': 'item'}) r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r] r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r] if 'season' in data: r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] #title += '%01d' % int(data['season']) url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])] #for i in url: #print i[2],i[0],i[1] #print '%01d' % int(data['season']) == '%01d' % int(i[2]) url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] else: url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])] #print("r1", cleantitle.get(title),url,r) url = url[0][0] url = urlparse.urljoin(self.base_link, url) r2 = url.split('.')[-1] except: raise Exception() try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass #self.log('GRABBER','get_sources-3', '%s' % url, dolog=False) referer = url #result = client.request(url, limit='0') result = proxies.request(url, headers=self.headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) try: myts = re.findall(r'data-ts="(.*?)"', result)[0] #print myts #myts = result.xpath(".//body[@class='watching']//@data-ts")[0] except: print "could not parse ts ! will use generated one." print myts trailers = [] links_m = [] if testing == False: try: matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(result) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/','watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer') #self.log('GRABBER','get_sources-3.1', '%s' % url, dolog=False) #hash_url = urlparse.urljoin(self.base_link, '/user/ajax/menu-bar') # int(time.time()) #query = {'ts': myts} #query.update(self.__get_token(query)) #hash_url = hash_url + '?' + urllib.urlencode(query) #r1, headers, content, cookie2 = proxies.request(hash_url, headers=self.headers, limit='0', output='extended', cookie=cookie1, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) #print "%s" % cookie1 #print "%s" % cookie2 #self.log('GRABBER','get_sources-3.2', '%s' % hash_url, dolog=False) alina = client.parseDOM(result, 'title')[0] atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1] if 'season' in data: years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)] mychk = False for y in years: if y in atr: mychk = True result = result if mychk ==True else None else: result = result if year in atr else None #print("r3",result) try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs = {'data-range-id':"0"}) #print result servers = [] #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] servers = [(i[0], ''.join(i[1][:1])) for i in servers] #print("r3",servers) try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)] except: pass for s in servers[:4]: try: headers = {'X-Requested-With': 'XMLHttpRequest'} hash_url = urlparse.urljoin(self.base_link, self.hash_link) query = {'ts': myts, 'id': s[0], 'update': '0'} query.update(self.__get_token(query)) hash_url = hash_url + '?' + urllib.urlencode(query) headers['Referer'] = urlparse.urljoin(url, s[0]) headers['Cookie'] = self.headers['Cookie'] #self.log('GRABBER','get_sources-3.9', '%s' % hash_url, dolog=False) result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) #self.log('GRABBER','get_sources-4', '%s' % result, dolog=False) result = json.loads(result) if 'error' in result and result['error'] == True: query.update(self.__get_token(query, token_error=True)) hash_url = hash_url + '?' + urllib.urlencode(query) result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = json.loads(result) query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query, token_error=True)) else: query = {'id': s[0], 'update': '0'} query.update(self.__get_token(query)) url = url + '?' + urllib.urlencode(query) #result = client2.http_get(url, headers=headers) quality = 'SD' if s[1] == '1080': quality = '1080p' if s[1] == '720': quality = 'HD' if s[1] == 'CAM': quality == 'CAM' if result['target'] != "": pass else: query = {'id':result['params']['id'], 'token':result['params']['token']} grabber = result['grabber'] if '?' in grabber: grabber += '&' + urllib.urlencode(query) else: grabber += '?' + urllib.urlencode(query) if grabber!=None and not grabber.startswith('http'): grabber = 'http:'+grabber #self.log('GRABBER','url', '%s' % grabber, dolog=False) result = proxies.request(grabber, headers=headers, referer=url, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired) result = json.loads(result) #print result if 'data' in result.keys(): result = [i['file'] for i in result['data'] if 'file' in i] for i in result: links_m = resolvers.createMeta(i, self.name, self.logo, quality, links_m, key) else: target = result['target'] if target!=None and not target.startswith('http'): target = 'http:' + target links_m = resolvers.createMeta(target, self.name, self.logo, quality, links_m, key) if testing and len(links_m) > 0: break except Exception as e: print e pass sources += [l for l in links_m] self.log('SUCCESS', 'get_sources','links : %s' % len(sources), dolog=testing) return sources except Exception as e: self.log('ERROR', 'get_sources','%s' % e, dolog=testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): #try: try: sources = [] #print url if url is None: return sources base_link = self.base_link try: if url[0].startswith('http'): base_link = url[0] mid = re.findall('-(\d+)', url[0])[-1] except: if url.startswith('http'): base_link = url mid = re.findall('-(\d+)', url)[-1] try: if len(url[1]) > 0: episode = url[1] else: episode = None except: episode = None #print mid links_m = [] trailers = [] headers = {'Referer': self.base_link} if testing == False: try: u = urlparse.urljoin(self.base_link, url[0]) print u r = client.request(u, headers=headers, IPv4=True) #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" #matches = re.finditer(regex, r, re.MULTILINE) matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(r) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/','watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer') try: u = urlparse.urljoin(self.base_link, self.server_link % mid) #print u r = client.request(u, headers=headers, XHR=True, IPv4=True) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: #print r try: try: ep = re.findall('episode.*?(\d+):.*?',eid[2].lower())[0] except: ep = 0 if (episode is None) or (int(ep) == int(episode)): url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid)) script = client.request(url, IPv4=True) #print script if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith('()'): params = self.uncensored2(script) elif '_x=' in script and '_y=' in script: params = self.uncensored3(script) else: raise Exception() u = urlparse.urljoin(self.base_link, self.sourcelink % (eid[0], params['x'], params['y'])) r = client.request(u, IPv4=True) url = json.loads(r)['playlist'][0]['sources'] url = [i['file'] for i in url if 'file' in i] url = [client.googletag(i) for i in url] url = [i[0] for i in url if i] for s in url: links_m = resolvers.createMeta(s['url'], self.name, self.logo, '720p', links_m, key, vidtype='Movie') if testing and len(links_m) > 0: break except: pass except: pass sources += [l for l in links_m] return sources except Exception as e: control.log('Error %s > get_sources %s' % (self.name, e)) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO','get_sources','Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL','get_sources','url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources UA = client.agent() # get TA JSON data from tadata api result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) resultx = json.loads(str(result)) ta_url = resultx['url'] poster = resultx['image'] if 'image' in resultx else None #print ta_url result = proxies.request(ta_url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) # get types of videos available types = {'trailer':'Trailer', 'feature_trailer':'Trailer', 'theatrical_trailer':'Trailer', 'behind_the_scenes':'Behind the scenes', 'deleted_scene':'Deleted Scenes', 'featurette':'Featurette', 'featured_box':'Featurette', 'music_video':'Music Video', 'misc_scene':'Misc.'} quality_maps = {'4k':'4K','2k':'2K','1080p':'1080p', 'HD':'720p', 'M':'480p', 'S':'360p'} extras = [] items = client.parseDOM(result, 'div', attrs = {'id':'featured_c'})[0] m_title = client.parseDOM(items, 'div', attrs = {'class':'movie_info'}) #print m_title fail_bool = False for video in m_title: try: time.sleep(0.1) video = video.replace('rttttttttttt','') video = video.replace('rtttttttttt','') video = video.replace('\r','') video = video.replace('\t','') video = video.replace('\n','') title = client.parseDOM(video, 'a', attrs = {'class':'m_title'})[0] ta_tage_url = client.parseDOM(video, 'a', ret = 'href')[0] if 'http' not in ta_tage_url: ta_tage_url = urlparse.urljoin(self.base_link, ta_tage_url) try: vid_date = client.parseDOM(video, 'span', attrs = {'class':'m_date'})[0] vid_date = vid_date.replace(',','') except: vid_date = '' # Trailers if title.lower() == 'trailer': extra_type = 'trailer' elif title.lower() == 'feature trailer': extra_type = 'feature_trailer' elif title.lower() == 'theatrical trailer': extra_type = 'theatrical_trailer' # Behind the scenes elif 'behind the scenes' in title.lower(): extra_type = 'behind_the_scenes' # Featurette elif 'featurette' in title.lower(): extra_type = 'featurette' # Music Video elif 'music video' in title.lower(): extra_type = 'music_video' # Interview elif 'interview' in title.lower(): extra_type = 'interview' if title.lower().startswith('interview') or title.lower().startswith('generic interview'): title = title.split('nterview - ')[-1].split('nterview- ')[-1] # Deleted scene elif 'deleted scene' in title.lower(): extra_type = 'deleted_scene' # Trailers elif 'trailer' in title.lower(): extra_type = 'trailer' else: extra_type = 'misc_scene' # process ta_tage_url #print ta_tage_url result = proxies.request(ta_tage_url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) data = None js = re.findall(r'eval\(function\(w,i,s,e\).*;', result) if len(js) > 0: data = js[0] else: try: jsd = re.findall(r'src="/util/client.js?c=(.*?)"><', result)[0].strip() except: try: jsd = re.findall(r'</style>rttr<!-- (.*?) -->rrttrtt<div id=\"embed_box\">', result)[0].strip() except: jsd = re.findall(r'</style>.*<!-- (.*?) -->.*<div id=\"embed_box\">', result, flags=re.DOTALL)[0].strip() jsd_url = tau % (urllib.quote_plus(jsd), client.b64encode(str(int(time.time()))), client.b64encode(ta_tage_url), client.b64encode(UA), control.setting('ver'), client.b64encode(control.setting('ca'))) data = proxies.request(jsd_url) if data == None: log('ERROR', 'get_sources-1', '%s' % jsd_url, dolog=True) if data != None: if str(data) == '423': fail_bool = True raise Exception("Helper site is currently unavailable !") try: data = unwise2.unwise_process(data) except: raise Exception("unwise2 could not process data") else: raise Exception("URL Post Data Unavailable") files = re.findall(r'source src="([^"]+)"', data) quals = re.findall(r'res=\"(.*?)\"', data) processed = [] for i in range(0, len(files)): v_file = files[i] if quals[i] in quality_maps.keys(): quality = quality_maps[quals[i]] else: quality = '720p' #print extra_type if quality not in processed: #print v_file processed.append(quality) extras.append( {'etype': extra_type, 'date': vid_date, 'type': types[extra_type], 'url' : v_file, 'quality': quality, 'title': title, 'thumb': poster} ) if testing == True and len(extras) > 0: break except Exception as e: log('ERROR', 'get_sources-2', '%s' % e, dolog=True) if fail_bool == True: raise Exception("%s" % e) links = [] #print extras for extra in extras: links = resolvers.createMeta(extra['url'], self.name, self.logo, extra['quality'], links, key, vidtype=extra['type'], testing=testing, txt=extra['title'], poster=extra['thumb']) if testing == True and len(links) > 0: break for i in links: sources.append(i) if len(sources) == 0: log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): #try: try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources links_m = [] trailers = [] headers = self.headers headers = {'Referer': self.base_link} sub_url = None u = url[0] ep = url[1] #r = client.request(u, headers=headers IPv4=True) r = proxies.request(u, headers=self.headers, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) if testing == False: try: #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" #matches = re.finditer(regex, r, re.MULTILINE) matches = re.compile( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' ).findall(r) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) try: if ep == None: srcs = client.parseDOM(r, 'a', ret='player-data') else: srcs = client.parseDOM(r, 'a', ret='player-data', attrs={'episode-data': str(ep)}) try: elem = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0] qual = source_utils.check_sd_url(elem) riptype = source_utils.check_sd_url_rip(elem) except Exception as e: qual = '480p' riptype = 'BRRIP' try: poster = client.parseDOM(r, 'div', attrs={'class': 'dm-thumb'})[0] poster = client.parseDOM(poster, 'img', ret='src')[0] except: poster = None for s in srcs: try: if s.startswith('//'): s = 'https:%s' % s links_m = resolvers.createMeta(s, self.name, self.logo, qual, links_m, key, poster=poster, riptype=riptype, vidtype='Movie', sub_url=sub_url, testing=testing) if testing == True and len(links_m) > 0: break except: pass except: pass sources += [l for l in links_m] if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources UA = client.agent() # get TA JSON data from tadata api result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) resultx = json.loads(str(result)) extras = resultx['video'] # get types of videos available types = {'trailer': 'Trailer', 'featurette': 'Featurette'} quality = '720p' links = [] for extra in extras: vidtype_e = extra['title'] vidtype = 'Misc.' for t in types: if t in vidtype_e.lower(): vidtype = types[t] break links = resolvers.createMeta(extra['url'], self.name, self.logo, quality, links, key, vidtype=vidtype, testing=testing, txt=extra['title'], poster=extra['thumb']) if testing == True and len(links) > 0: break for i in links: sources.append(i) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): #try: try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources base_link = self.base_link try: if url[0].startswith('http'): base_link = url[0] mid = re.findall('-(\d+)', url[0])[-1] except: if url.startswith('http'): base_link = url mid = re.findall('-(\d+)', url)[-1] try: if len(url[1]) > 0: episode = url[1] else: episode = None except: episode = None #print mid links_m = [] trailers = [] headers = {'Referer': self.base_link} u = urlparse.urljoin(self.base_link, url[0]) #print u #r = client.request(u, headers=headers, IPv4=True) r = proxies.request(u, headers=headers, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) try: elem = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0] qual = source_utils.check_sd_url(elem) riptype = source_utils.check_sd_url_rip(elem) except Exception as e: qual = '480p' riptype = 'BRRIP' try: poster = client.parseDOM(r, 'div', attrs={'class': 'dm-thumb'})[0] poster = client.parseDOM(poster, 'img', ret='src')[0] except: poster = None if testing == False: try: #regex = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" #matches = re.finditer(regex, r, re.MULTILINE) matches = re.compile( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' ).findall(r) for match in matches: try: #print match if 'youtube.com' in match: match = match.replace('embed/', 'watch?v=') trailers.append(match) except: pass except Exception as e: pass for trailer in trailers: links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) try: u = urlparse.urljoin(self.base_link, self.server_link % mid) #print u #r = client.request(u, headers=headers, XHR=True, IPv4=True) r = proxies.request(u, headers=headers, XHR=True, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) r = json.loads(r)['html'] r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'}) ids = client.parseDOM(r, 'li', ret='data-id') servers = client.parseDOM(r, 'li', ret='data-server') labels = client.parseDOM(r, 'a', ret='title') r = zip(ids, servers, labels) for eid in r: #print r try: sub_url = None try: ep = re.findall('episode.*?(\d+):.*?', eid[2].lower())[0] except: ep = 0 if (episode is None) or (int(ep) == int(episode)): url = urlparse.urljoin( self.base_link, self.token_link % (eid[0], mid)) #script = client.request(url, IPv4=True) script = proxies.request( url, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) #print script if '$_$' in script: params = self.uncensored1(script) elif script.startswith('[]') and script.endswith( '()'): params = self.uncensored2(script) elif '_x=' in script and '_y=' in script: params = self.uncensored3(script) else: raise Exception() u = urlparse.urljoin( self.base_link, self.sourcelink % (eid[0], params['x'], params['y'])) #print u #r = client.request(u, IPv4=True) r = proxies.request( u, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) if r == None or len(r) == 0: u = urlparse.urljoin( self.base_link, self.embed_link % (eid[0])) #print u #r = client.request(u, IPv4=True) r = proxies.request( u, IPv4=True, proxy_options=proxy_options, use_web_proxy=self.proxyrequired) try: url = json.loads(r)['playlist'][0]['sources'] except: url = [{'file': json.loads(r)['src']}] try: url = [i['file'] for i in url] except: url = [url['file']] try: sub_url = json.loads( r)['playlist'][0]['tracks'][0]['file'] except: pass vidtype = 'Movie' if int(ep) > 0: vidtype = 'Show' for s in url: links_m = resolvers.createMeta(s, self.name, self.logo, qual, links_m, key, poster=poster, riptype=riptype, vidtype=vidtype, sub_url=sub_url, testing=testing) except: pass except: pass sources += [l for l in links_m] if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) return sources log('SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e, dolog=not testing) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) #r = client.request(url) r = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = client.parseDOM(r, 'iframe', ret='src') links = [] for u in r: try: if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() #url = client.request(u) url = proxies.request(u, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) url = client.parseDOM(url, 'source', ret='src') for i in url: #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) #except: pass try: qualityt = client.googletag(i)[0]['quality'] except: qualityt = u'720p' try: links = resolvers.createMeta(i, self.name, self.logo, qualityt, links, key, vidtype='Show') except: pass if testing and len(links) > 0: break except: pass for i in links: sources.append(i) return sources except Exception as e: control.log('ERROR %s get_sources > %s' % (self.name, e)) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] #print '%s ---------- %s' % (self.name,url) if url == None: return sources url_arr=[] data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) if 'episode' in data and 'season' in data: url0 = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() + "/s%s/e%s" % (data['season'],data['episode']) url_arr.append(url0) else: url1 = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() url2 = (data['title'].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() + "-%s" % (data['year']) url_arr.append(url1) url_arr.append(url2) try: title = data['title'] title = title.split(':') title = title[0] url3 = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() url_arr.append(url3) except: pass if 'episode' in data and 'season' in data: try: url1 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() + "/s%s/e%s" % (data['season'],data['episode']) url_arr.append(url1) except: pass else: try: url4 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower() url5 = (data['title'].split(':')[0].translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()+ "-%s" % (data['year']) url_arr.append(url4) url_arr.append(url5) except: pass url_arr = list(set(url_arr)) links = [] for url in url_arr: try: #print url url = urlparse.urljoin(self.base_link, self.watch_link % url) #print url r = proxies.request(url, output='geturl', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) #print r if r == None: raise Exception() r = result = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) #print "resp ===== %s" % r quality = '720p' r = re.sub(r'[^\x00-\x7F]+',' ', r) if 'episode' not in data or 'season' not in data: y = re.findall('Date\s*:\s*.+?>.+?(\d{4})', r) y = y[0] if len(y) > 0 else None #print y if ('year' in data and y != None and data['year'] != y): #print 'year not found' raise Exception() q = client.parseDOM(r, 'title') q = q[0] if len(q) > 0 else None quality = '1080p' if ' 1080' in q else '720p' #print quality #r = client.parseDOM(r, 'div', attrs = {'id': '5throw'})[0] #r = client.parseDOM(r, 'a', ret='href', attrs = {'rel': 'nofollow'}) try: r = client.parseDOM(result, 'iframe', ret='src') r2 = [i for i in r if 'g2g' in i or 'ytid' in i] #print r2 for r in r2: try: if 'http' not in r and self.urlhost in r: r = 'http:' + r elif 'http' not in r: r = self.base_link + r #print r r = proxies.request(r, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = re.sub(r'[^\x00-\x7F]+',' ', r) r = client.parseDOM(r, 'iframe', ret='src')[0] part2=False if '.php' in r: r = self.base_link + r rx = r.replace('.php','2.php') r = proxies.request(r, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) r = re.sub(r'[^\x00-\x7F]+',' ', r) r = client.parseDOM(r, 'iframe', ret='src')[0] try: rx = proxies.request(rx, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) rx = re.sub(r'[^\x00-\x7F]+',' ', rx) rx = client.parseDOM(rx, 'iframe', ret='src')[0] if 'http' not in rx: rx = 'http:' + rx part2=True except: pass if 'http' not in r: r = 'http:' + r #print r if 'youtube' in r: vidtype = 'Trailer' qualityt = '720p' r = r.replace('?showinfo=0','') else: vidtype = 'Movie' qualityt = quality if part2: #print '2-part video' links = resolvers.createMeta(r, self.name, self.logo, qualityt, links, key, vidtype=vidtype, txt='Part-1') links = resolvers.createMeta(rx, self.name, self.logo, qualityt, links, key, vidtype=vidtype, txt='Part-2') else: links = resolvers.createMeta(r, self.name, self.logo, qualityt, links, key, vidtype=vidtype) except: pass except Exception as e: control.log('ERROR %s get_sources3 > %s' % (self.name, e.args)) except: pass for i in links: sources.append(i) #print sources return sources except Exception as e: control.log('ERROR %s get_sources > %s' % (self.name, e)) return sources
def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): try: sources = [] if control.setting('Provider-%s' % name) == False: log('INFO', 'get_sources', 'Provider Disabled by User') log('INFO', 'get_sources', 'Completed') return sources if url == None: log('FAIL', 'get_sources', 'url == None. Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) log('INFO', 'get_sources', 'Completed') return sources REMAP_TYPE = { 'trailer': 'Trailer', 'feature_trailer': 'Trailer', 'theatrical_trailer': 'Trailer', 'behind_the_scenes': 'Behind the scenes', 'deleted_scene': 'Deleted Scenes', 'featurette': 'Featurette', 'featured_box': 'Featurette', 'music-video': 'Music Video', 'clip': 'Misc.' } year = None episode = None season = None log('INFO', 'get_sources-1', 'data-items: %s' % url, dolog=False) data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = cleantitle.simpletitle(title) try: year = re.findall( '(\d{4})', data['premiered'] )[0] if 'tvshowtitle' in data else data['year'] except: try: year = data['year'] except: year = None title_s = title.split(' ') queries = [] for ts in range(len(title_s)): titles = ('+'.join( str(x) for x in title_s[:len(title_s) - ts])) queries.append('%s+%s' % (titles, year)) queries.append(titles) rs = [] for q in queries: page_count = 1 search_url = self.base_link + '/movie/results/' + '?lang=hindi&page=' + str( page_count) + '&query=%s' % q log('INFO', 'get_sources-2', 'Searching: %s' % search_url) r, res = request_einthusan(search_url) try: movies = client.parseDOM(res, 'section', attrs={'id': 'UIMovieSummary'})[0] movies = client.parseDOM(movies, 'li') for block in movies: try: blocka = client.parseDOM(block, 'div', attrs={'class': 'block1'})[0] loc = self.base_link + client.parseDOM( blocka, 'a', ret='href')[0] poster = "http:" + client.parseDOM( blocka, 'img', ret='src')[0] titlex = client.parseDOM(block, 'h3')[0] yearx = client.parseDOM(block, 'div', attrs={'class': 'info'})[0] yearx = client.parseDOM(yearx, 'p')[0] if str(year) in str(yearx): rs.append([titlex, yearx, loc, poster]) log('INFO', 'get_sources-3', 'match-page-url: %s | %s' % (loc, titlex)) break except: pass if len(rs) > 0: break except: pass if len(rs) > 0: links_m = [] vidtype = 'Movie' riptype = 'BRRIP' quality = '720p' for r in rs: video_urls = [] trailers = [] music_vids = [] poster = r[3] page_url = r[2] eindata1, htm = GetEinthusanData(page_url) eindata1 = json.loads(json.dumps(eindata1)) log('INFO', 'get_sources-4-A', 'GetEinthusanData: %s' % eindata1) video_urls.append(eindata1['MP4Link']) video_urls.append(eindata1['HLSLink']) if testing == False: try: matches = re.compile( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' ).findall(htm) matches = list(set(matches)) for match in matches: try: if 'youtube.com' in match: match = match.replace( 'embed/', 'watch?v=') trailers.append(match) log('INFO', 'get_sources-4-B', 'trailers: %s' % match) except: pass except Exception as e: log('FAIL', 'get_sources-4', '%s' % e) if testing == False: try: musicblock = client.parseDOM( htm, 'section', attrs={'id': 'UICompactMovieClipList'})[0] musicblock = client.parseDOM(musicblock, 'li') music_vids = [] locx = None for block in musicblock: try: music_vids_s = [] locx = self.base_link + client.parseDOM( block, 'a', attrs={'class': 'title'}, ret='href')[0] thumbx = "http:" + client.parseDOM( block, 'img', ret='src')[0] titlex = client.parseDOM( block, 'a', attrs={'class': 'title'})[0] titlex = client.parseDOM(titlex, 'h5')[0] eindata1, htm1 = GetEinthusanData(locx) eindata1 = json.loads(json.dumps(eindata1)) log('INFO', 'get_sources-4-C', 'GetEinthusanData: %s' % eindata1) type = eindata1['type'] if type in REMAP_TYPE.keys(): type = REMAP_TYPE[type] else: type = REMAP_TYPE['clip'] music_vids_s.append( [eindata1['MP4Link'], type]) music_vids_s.append( [eindata1['HLSLink'], type]) music_vids.append( [titlex, thumbx, music_vids_s, locx]) except Exception as e: log('FAIL', 'get_sources-5A', '%s : %s' % (e, locx)) except Exception as e: log('FAIL', 'get_sources-5B', '%s' % e) for vid in trailers: try: l = resolvers.createMeta(vid, self.name, self.logo, '720p', [], key, poster=poster, vidtype='Trailer', testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: log('FAIL', 'get_sources-6', 'Could not add: %s' % vid) for vid in music_vids: try: for v in vid[2]: l = resolvers.createMeta(v[0], self.name, self.logo, '720p', [], key, poster=vid[1], vidtype=v[1], testing=testing, txt=vid[0], page_url=vid[3]) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: log('FAIL', 'get_sources-7', 'Could not add: %s' % v[0]) for vid in video_urls: try: l = resolvers.createMeta(vid, self.name, self.logo, quality, [], key, poster=poster, riptype=riptype, vidtype=vidtype, testing=testing, page_url=page_url) for ll in l: if ll != None and 'key' in ll.keys(): links_m.append(ll) except: log('FAIL', 'get_sources-8', 'Could not add: %s' % vid) for l in links_m: if l != None and 'key' in l.keys(): sources.append(l) if len(sources) == 0: log( 'FAIL', 'get_sources', 'Could not find a matching title: %s' % cleantitle.title_from_key(key)) else: log( 'SUCCESS', 'get_sources', '%s sources : %s' % (cleantitle.title_from_key(key), len(sources))) log('INFO', 'get_sources', 'Completed') return sources except Exception as e: log('ERROR', 'get_sources', '%s' % e) log('INFO', 'get_sources', 'Completed') return sources