def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) url = urlparse.urljoin(self.base_link, self.search_link_2 % (self.search_link % (clean_title, int(year)))) return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: items = [] clean_title = cleantitle.geturl(title) + '-' + year search_url = urlparse.urljoin( self.base_link, self.search_link % clean_title.replace('-', '+')) r = cache.get(client.request, 1, search_url) r = client.parseDOM(r, 'div', {'class': 'col-sm-12'}) r = client.parseDOM(r, 'div', {'class': 'col-sm-2.+?'}) r1 = client.parseDOM(r, 'h3') r1 = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in r1] y = [re.findall('</i>\s*(\d{4})</span>', i) for i in r] items += [(r1[i], y[i]) for i in range(len(y))] r = [(i[0][0], i[1][0], i[0][1]) for i in items if (cleantitle.get(i[0][1]) == cleantitle.get(title) and i[1][0] == year)] url = r[0][0] return url except Exception: return
def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return urldata = urlparse.parse_qs(url) urldata = dict((i, urldata[i][0]) for i in urldata) clean_title = cleantitle.geturl(urldata['title']) start_url = self.search_link % (self.base_link, clean_title) data = self.scraper.get(start_url).content r = dom_parser2.parse_dom(data, 'button', {'id': 'iframelink'}) links = [i.attrs['value'] for i in r] for i in links: try: valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue sources.append({ 'source': host, 'quality': '1080p', 'language': 'en', 'url': i, 'info': [], 'direct': False, 'debridonly': False }) except: pass return sources except: return sources
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) clean_title = cleantitle.geturl(data['tvshowtitle']) url = urlparse.urljoin(self.base_link, self.search_link_2 % (self.search_tv % (clean_title, int(season), int(episode)))) return url except: return
def __get_movie_url(self, data): clean_title = cleantitle.geturl(data['title']) query = self.movie_path % clean_title url = urlparse.urljoin(self.base_link, query) html = client.request(url) token = re.findall('\/?watch-token=(.*?)\"', html)[0] return url + ('?watch-token=%s' % token)
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: searchTitle = cleantitle.geturl(url) url = self.base_link + "/episodes/%s-%sx%s" % (searchTitle, season, episode) req = self.scraper.get(url) url = self.ajax_call(req) return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] year = data['year'] h = {'User-Agent': client.randomagent()} v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year) url = '/watch?v=%s' % v url = urlparse.urljoin(self.base_link, url) #c = client.request(url, headers=h, output='cookie') #c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url) #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie') post = urllib.urlencode({'v': v}) u = urlparse.urljoin(self.base_link, '/video_info/iframe') #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url) r = client.request(u, post=post, headers=h, XHR=True, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({ 'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False }) except: pass return sources except: return sources
def searchMovie(self, title, year, aliases, headers): try: title = cleantitle.normalize(title) url = self.api_link % cleantitle.geturl(title) r = client.request(url) r = json.loads(r)['content'] r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', attrs={'class': 'ss-title'})) url = [i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1])][0] return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: clean_title = cleantitle.geturl(tvshowtitle) search_url = urlparse.urljoin(self.base_link, self.tv_search_link % clean_title.replace('-', '+')) r = client.request(search_url) r = json.loads(r) url = [(r[i]['url']) for i in r if (cleantitle.get(r[i]['title']) == cleantitle.get(tvshowtitle))] url = source_utils.strip_domain(url[0]) return url except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title).replace('-', '+') url = { 'title': title, 'year': year, 'imdb': imdb, 'clean_title': clean_title } return urllib.urlencode(url) except Exception: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: data = { 'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb, 'clean_title': cleantitle.geturl(tvshowtitle).replace('-', '+') } return urllib.urlencode(data) except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): try: self.basetester() url = urlparse.urljoin( self.base_link, self.search_link % cleantitle.geturl(title).replace('-', '+')) r = client.request(url, cookie='check=2') m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'}) m = dom_parser.parse_dom(m, 'a', req='href') m = [(i.attrs['href']) for i in m if i.content == title] url = urlparse.urljoin(self.base_link, m[0]) return url except: return
def searchMovie(self, title, year, aliases, headers): try: clean_title = cleantitle.geturl(title).replace('-','+') url = urlparse.urljoin(self.base_link, self.search_link % ('%s' %clean_title)) r = self.scraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'list_movies'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href']) for i in r if i.content == '%s (%s)' %(title,year)] return r[0] except: return
def searchShow(self, title, season, year, aliases, headers): try: clean_title = cleantitle.geturl(title).replace('-','+') url = urlparse.urljoin(self.base_link, self.search_link % ('%s+Season+%01d' % (clean_title, int(season)))) r = self.scraper.get(url).content r = client.parseDOM(r, 'div', attrs={'class': 'list_movies'}) r = dom_parser.parse_dom(r, 'a', req='href') r = [(i.attrs['href']) for i in r if '%s - Season %01d' % (title, int(season)) in i.content] return r[0] except: return
def searchShow(self, title, season, episode, aliases, headers): try: for alias in aliases: url = '%s/show/%s/season/%01d/episode/%01d' % ( self.base_link, cleantitle.geturl(title), int(season), int(episode)) url = client.request(url, headers=headers, output='geturl', timeout='10') if not url == None and url != self.base_link: break return url except: return
def __get_episode_url(self, data): try: clean_title = cleantitle.geturl(data['tvshowtitle']) query = self.episode_path % (clean_title, data['season'], data['episode']) url = urlparse.urljoin(self.base_link, query) html = client.request(url) token = re.findall('\/?watch-token=(.*?)\"', html)[0] return url + ('?watch-token=%s' % token) except Exception: return
def sources(self, url, hostDict, hostprDict): sources = [] try: with requests.Session() as s: episode_link = "http://beetv.to/" + cleantitle.geturl( url['tvshowtitle'] ) + "-s" + url['season'] + "-e" + url['episode'] p = s.get(episode_link) soup = BeautifulSoup(p.text, 'html.parser') iframes = soup.findAll('iframe') for i in iframes: if 'thevideo' in i.get('src'): sources.append({ 'source': "thevideo.me", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '', 'direct': False, 'debridonly': False }) if 'openload' in i['src']: sources.append({ 'source': "openload.co", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '', 'direct': False, 'debridonly': False }) if 'vshare' in i['src']: sources.append({ 'source': "vshare.eu", 'quality': 'SD', 'language': "en", 'url': i['src'], 'info': '', 'direct': False, 'debridonly': False }) print(sources) return sources except: print("Unexpected error in Beetv Script: source", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return url
def searchMovie(self, title, year, aliases, headers): try: for alias in aliases: url = '%s/full-movie/%s' % (self.base_link, cleantitle.geturl(alias['title'])) url = client.request(url, headers=headers, output='geturl', timeout='10') if not url == None and url != self.base_link: break if url == None: for alias in aliases: url = '%s/full-movie/%s-%s' % (self.base_link, cleantitle.geturl( alias['title']), year) url = client.request(url, headers=headers, output='geturl', timeout='10') if not url == None and url != self.base_link: break return url except: return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: self.basetester() query = urlparse.urljoin(self.base_link, self.search_link %cleantitle.geturl(tvshowtitle).replace('-','+')) result = client.request(query) t = [tvshowtitle] + source_utils.aliases_to_array(aliases) t = [cleantitle.get(i) for i in set(t) if i] result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result) for i in result: if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0] url = url.encode('utf-8') return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: t = cleantitle.geturl(title).replace('-', '+') p = urllib.urlencode({'keyword': t, 'id': 1}) r = client.request(self.search_link, post=p, XHR=True) try: r = json.loads(r) except: r = None r = dom_parser.parse_dom(r['content'], 'a', attrs={'class': 'ss-title'}) url = '%s%s-e0.html' % (self.base_link, r[0].attrs['href'].replace( 'serie', 'episode')) return url except: return
def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title) search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+')) r = client.request(search_url) r = client.parseDOM(r, 'div', {'class': 'result-item'}) r = [(dom_parser2.parse_dom(i, 'a', req='href')[0], re.sub('<.*?>', '' , re.findall('alt=\"(.*?)\"', i)[0]), dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if (cleantitle.get(i[1]) == cleantitle.get(title) and i[2][0].content == year)] url = r[0][0] return url except Exception: return
def searchShow(self, title, season, aliases, headers): try: title = cleantitle.normalize(title) search = '%s Season %s' % (title, season) url = urlparse.urljoin( self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search))) r = client.request(url) url = re.findall( '<a href=\"(.+?\/movie\/%s-season-%s-.+?\.html)\"' % (cleantitle.geturl(title), season), r)[0] return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) url = urlparse.urljoin( self.base_link, self.search_link % cleantitle.geturl(data['tvshowtitle']).replace('-', '+')) r = client.request(url, cookie='check=2') m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'}) m = dom_parser.parse_dom(m, 'a', req='href') m = [(i.attrs['href']) for i in m if i.content == data['tvshowtitle']] query = '%s/season-%s/episode-%s/' % (m[0], season, episode) url = urlparse.urljoin(self.base_link, query) return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) q = cleantitle.geturl(url['tvshowtitle']).replace('-', '+') t = q + '+season+%s' % season p = urllib.urlencode({'keyword': t, 'id': 1}) r = client.request(self.search_link, post=p, XHR=True) try: r = json.loads(r) except: r = None r = dom_parser.parse_dom(r['content'], 'a', attrs={'class': 'ss-title'}) url = '%s%s-e%s.html' % (self.base_link, r[0].attrs['href'].replace( 'serie', 'episode'), episode) return url except: return
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources year = url['year'] h = {'User-Agent': client.randomagent()} title = cleantitle.geturl(url['title']).replace('-', '+') url = urlparse.urljoin(self.base_link, self.search_link % title) r = requests.get(url, headers=h) r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'}) r = r.find('a')['href'] r = requests.get(r, headers=h) r = BeautifulSoup(r.content, 'html.parser') quality = r.find('span', {'class': 'calidad2'}).text url = r.find('div', {'class': 'movieplay'}).find('iframe')['src'] if not quality in ['1080p', '720p']: quality = 'SD' valid, host = source_utils.is_host_valid(url, hostDict) sources.append({ 'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False }) return sources except: print("Unexpected error in Furk Script: check_api", sys.exc_info()[0]) exc_type, exc_obj, exc_tb = sys.exc_info() print(exc_type, exc_tb.tb_lineno) return sources
def movie(self, imdb, title, localtitle, aliases, year): ''' Takes movie information and returns a set name value pairs, encoded as url params. These params include ts (a unqiue identifier, used to grab sources) and list of source ids Keyword arguments: imdb -- string - imdb movie id title -- string - name of the movie localtitle -- string - regional title of the movie year -- string - year the movie was released Returns: url -- string - url encoded params ''' try: clean_title = cleantitle.geturl(title).replace('-', '+') query = (self.search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) r = client.parseDOM(search_response, 'div', attrs={'class': 'row movie-list'})[0] r = dom_parser.parse_dom(r, 'a', req='href') url = [(i.attrs['href']) for i in r if cleantitle.get(title) in cleantitle.get(i.content)][0] r = client.request(url) quality = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0] r = client.parseDOM(r, 'div', attrs={'class': 'mt row'})[0] sources_list = [] try: if client.parseDOM(r, 'div', ret='data-streamgo')[0]: sources_list.append( 'https://streamgo.me/player/%s' % client.parseDOM(r, 'div', ret='data-streamgo')[0]) except Exception: pass try: if client.parseDOM(r, 'div', ret='data-server_openload')[0]: sources_list.append( 'https://openload.co/embed/%s' % client.parseDOM( r, 'div', ret='data-server_openload')[0]) except Exception: pass data = { 'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year, 'quality': quality, 'sources': sources_list } url = urllib.urlencode(data) return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): ''' Takes episode information, finds the ts and list sources, encodes it as name value pairs, and returns a string of url params Keyword arguments: url -- string - url params imdb -- string - imdb tv show id tvdb -- string - tvdb tv show id title -- string - episode title premiered -- string - date the episode aired (format: year-month-day) season -- string - the episodes season episode -- string - the episode number Returns: url -- string - url encoded params ''' try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) clean_title = cleantitle.geturl(data['tvshowtitle']).replace( '-', '+') query = (self.search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) r = client.parseDOM(search_response, 'div', attrs={'class': 'row movie-list'})[0] r = dom_parser.parse_dom(r, 'a', req='href') url = [(i.attrs['href']) for i in r if '%s - Season %01d' % (data['tvshowtitle'], int(season)) in i.content][0] r = client.request(url) r = client.parseDOM(r, 'div', attrs={'id': 'player'})[0] url = client.parseDOM(r, 'a', ret='href')[0] film_response = client.request(url) servers = client.parseDOM(film_response, 'div', attrs={'id': 'servers'})[0] r = dom_parser.parse_dom(servers, 'a', req='title') url = [(i) for i in r if 'Episode %02d' % (int(episode)) in i.attrs['title']] sources_list = [] for i in url: try: if i.attrs['data-streamgo']: sources_list.append('https://streamgo.me/player/%s' % i.attrs['data-streamgo']) except Exception: pass try: if i.attrs['data-openload']: sources_list.append('https://openload.co/embed/%s' % i.attrs['data-openload']) except Exception: pass quality = client.parseDOM(film_response, 'span', attrs={'class': 'quality'})[0] data.update({ 'title': title, 'premiered': premiered, 'season': season, 'episode': episode, 'quality': quality, 'sources': sources_list }) url = urllib.urlencode(data) return url except: return
def episode(self, url, imdb, tvdb, title, premiered, season, episode): ''' Takes episode information, finds the ts and list sources, encodes it as name value pairs, and returns a string of url params Keyword arguments: url -- string - url params imdb -- string - imdb tv show id tvdb -- string - tvdb tv show id title -- string - episode title premiered -- string - date the episode aired (format: year-month-day) season -- string - the episodes season episode -- string - the episode number Returns: url -- string - url encoded params ''' try: data = urlparse.parse_qs(url) data = dict((i, data[i][0]) for i in data) clean_title = cleantitle.geturl(data['tvshowtitle']) query = (self.movie_search_path % clean_title) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results_list = client.parseDOM(search_response, 'div', attrs={'class': 'items'})[0] film_id = [] film_tries = [ '\/' + (clean_title + '-0' + season) + '[^-0-9](.+?)\"', '\/' + (clean_title + '-' + season) + '[^-0-9](.+?)\"', '\/' + clean_title + '[^-0-9](.+?)\"' ] for i in range(len(film_tries)): if not film_id: film_id = re.findall(film_tries[i], results_list) else: break film_id = film_id[0] query = (self.film_path % film_id) url = urlparse.urljoin(self.base_link, query) film_response = client.request(url) ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1] sources_dom_list = client.parseDOM( film_response, 'ul', attrs={'class': 'episodes range active'}) if not re.findall('([^\/]*)\">' + episode + '[^0-9]', sources_dom_list[0]): episode = '%02d' % int(episode) sources_list = [] for i in sources_dom_list: source_id = re.findall(('([^\/]*)\">' + episode + '[^0-9]'), i)[0] sources_list.append(source_id) data.update({ 'title': title, 'premiered': premiered, 'season': season, 'episode': episode, 'ts': ts, 'sources': sources_list, 'id': film_id }) url = urllib.urlencode(data) return url except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): ''' Takes movie information and returns a set name value pairs, encoded as url params. These params include ts (a unqiue identifier, used to grab sources) and list of source ids Keyword arguments: imdb -- string - imdb movie id title -- string - name of the movie localtitle -- string - regional title of the movie year -- string - year the movie was released Returns: url -- string - url encoded params ''' try: clean_title = cleantitle.geturl(title) query = (self.movie_search_path % (clean_title)) url = urlparse.urljoin(self.base_link, query) search_response = client.request(url) results_list = client.parseDOM(search_response, 'div', attrs={'class': 'item'})[0] film_id = re.findall('(\/watch\/)([^\"]*)', results_list)[0][1] query = (self.film_path % film_id) url = urlparse.urljoin(self.base_link, query) film_response = client.request(url) ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1] sources_dom_list = client.parseDOM( film_response, 'ul', attrs={'class': 'episodes range active'}) sources_list = [] for i in sources_dom_list: source_id = re.findall('([\/])(.{0,6})(\">)', i)[0][1] sources_list.append(source_id) servers_dom_list = client.parseDOM(film_response, 'div', attrs={'class': 'server row'}) servers_list = [] data = { 'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year, 'ts': ts, 'sources': sources_list, 'id': film_id } url = urllib.urlencode(data) return url except Exception: return
def movie(self, imdb, title, localtitle, aliases, year): searchTitle = cleantitle.geturl(title + "-" + year) url = '/movies/%s/' % searchTitle req = self.scraper.get(self.base_link + url) url = self.ajax_call(req) return url