def get_source(self, start_url, title, year, season, episode, start_time): try: #print 'URL PASSED OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK'+start_url count = 0 headers = {'User-Agent': client.agent()} r = client.request(start_url, headers=headers) #print r Endlinks = re.compile( 'class="iaconbox center floatright".+?title="Torrent magnet link" href="(.+?)">.+?class="cellMainLink">(.+?)</a>.+?class="nobr center">(.+?)</span></td>', re.DOTALL).findall(r) #print 'scraperchk - scrape_movie - EndLinks: '+str(Endlinks) for Magnet, quality, size in Endlinks: Magnet = Magnet.replace('https://mylink.me.uk/?url=', '') qual = quality_tags.check_sd_url(quality) #print Magnet + '<><><><><>' count += 1 self.sources.append({ 'source': 'Torrent', 'quality': size + ' ' + qual, 'scraper': self.name, 'url': Magnet, 'direct': False, 'debridonly': True }) if dev_log == 'true': end_time = time.time() - start_time send_log(self.name, end_time, count, title, year) except Exception, argument: if dev_log == 'true': error_log(self.name, argument) return []
def get_source(self,item_url,title,year,start_time): try: #print 'PASSEDURL >>>>>>'+item_url count = 0 headers = {'User-Agent': client.agent()} OPEN = client.request(item_url, headers=headers) frame = client.parseDOM(OPEN, 'iframe', ret='src')[0] data = client.request(frame, headers=headers) data = client.parseDOM(data, 'ul', attrs={'class': 'menuPlayer'})[0] links = client.parseDOM(data, 'a', ret='href') for link in links: #print link+'<<<<<<<<<<<<<<<<<<<<<<<<<<' qual = quality_tags.check_sd_url(link) if qual == 'SD' and 'openload' in link: data = client.request(link, headers=headers) data = client.parseDOM(data, 'meta', ret='content')[0] qual2, info = quality_tags.get_release_quality(data, None) else: qual2 = qual count += 1 host = link.split('//')[1].replace('www.','') host = host.split('/')[0].split('.')[0].title() self.sources.append({'source':host, 'quality':qual2, 'scraper': self.name, 'url':link, 'direct':False}) if dev_log=='true': end_time = time.time() - start_time send_log(self.name,end_time,count,title,year) except Exception, argument: if dev_log=='true': error_log(self.name, argument) #hdvix().scrape_movie('Black Panther', '2018', 'tt1825683', False)
def get_source(self, item_url, title, year, start_time): try: #print 'PASSEDURL >>>>>>'+item_url count = 0 headers = {'User-Agent': client.agent()} OPEN = client.request(item_url, headers=headers) #print OPEN Endlinks = dom_parser.parse_dom(OPEN, 'a', req='player-data') Endlinks = [(i.attrs['player-data'], i.content) for i in Endlinks if i] if 'Season' in year: Endlinks = [(i[0], 'SD') for i in Endlinks if i[1] in year] else: Endlinks = [(i[0], i[1]) for i in Endlinks if i] #print 'series8 - scrape_movie - EndLinks: '+str(Endlinks) for link, quality in Endlinks: qual = quality_tags.check_sd_url(quality) if 'vidcloud' in link: link = 'https:' + link if link.startswith('//') else link data = client.request(link, headers=headers) link = re.findall( '''file\s*:\s*['"](.+?)['"].+?type['"]\s*:\s*['"](.+?)['"]''', data, re.DOTALL)[0] host = link[1] link = link[ 0] + '|User-Agent=%s&Referer=https://vidcloud.icu/' % urllib.quote( client.agent()) direct = True else: host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() direct = False count += 1 self.sources.append({ 'source': host, 'quality': qual, 'scraper': self.name, 'url': link, 'direct': direct }) if dev_log == 'true': end_time = time.time() - start_time send_log(self.name, end_time, count, title, year) return self.sources except Exception, argument: if dev_log == 'true': error_log(self.name, argument) return self.sources #seriesonline8().scrape_movie('Black Panther', '2018', 'tt1825683', False) #seriesonline8().scrape_episode('Suits','2011','','8','5','','')
def get_source(self, item_url, title, year, start_time): try: count = 0 headers = {'User-Agent': client.agent()} OPEN = client.request(item_url, headers=headers) Endlinks = dom_parser.parse_dom(OPEN, 'a', req='player-data') Endlinks = [(i.attrs['player-data'], i.content) for i in Endlinks if i] if 'Season' in year: Endlinks = [(i[0], 'SD') for i in Endlinks if i[1] in year] else: Endlinks = [(i[0], i[1]) for i in Endlinks if i] for link, quality in Endlinks: qual = quality_tags.check_sd_url(quality) if 'vidcloud' in link: link = 'https:' + link if link.startswith('//') else link data = client.request(link, headers=headers) link = re.findall( '''file\s*:\s*['"](.+?)['"].+?type['"]\s*:\s*['"](.+?)['"]''', data, re.DOTALL)[0] host = link[1] link = link[ 0] + '|User-Agent=%s&Referer=https://vidcloud.icu/' % client.agent( ) direct = True else: host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() direct = False count += 1 self.sources.append({ 'source': host, 'quality': qual, 'scraper': self.name, 'url': link, 'direct': direct }) if dev_log == 'true': end_time = time.time() - start_time send_log(self.name, end_time, count, title, year) except Exception, argument: if dev_log == 'true': error_log(self.name, argument) return []
def get_source(self, item_url, title, year, start_time): try: print 'PASSEDURL >>>>>>'+item_url count = 0 headers = {'User-Agent': client.agent()} OPEN = client.request(item_url, headers=headers) #print OPEN Endlinks = dom_parser.parse_dom(OPEN, 'a', req='player-data') Endlinks = [(i.attrs['player-data'], i.content) for i in Endlinks if i] if 'Season' in year: Endlinks = [(i[0], 'SD') for i in Endlinks if i[1] in year] else: Endlinks = [(i[0], i[1]) for i in Endlinks if i] #print 'series8 - scrape_movie - EndLinks: '+str(Endlinks) for link, quality in Endlinks: qual = quality_tags.check_sd_url(quality) if 'vidcloud' in link: link = 'https:' + link if link.startswith('//') else link data = client.request(link, headers=headers) link = re.findall('''file\s*:\s*['"](.+?)['"].+?type['"]\s*:\s*['"](.+?)['"]''', data, re.DOTALL)[0] host = link[1] link = link[0] + '|User-Agent=%s&Referer=https://vidcloud.icu/' % client.agent() direct = True else: host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() direct = False count += 1 self.sources.append({'source': host, 'quality': qual, 'scraper': self.name, 'url': link, 'direct': direct}) if dev_log == 'true': end_time = time.time() - start_time send_log(self.name, end_time, count, title, year) except Exception, argument: if dev_log=='true': error_log(self.name, argument) return[] #seriesonline8().scrape_movie('Black Panther', '2018', 'tt1825683', False) #seriesonline8().scrape_episode('Suits','2011','','8','5','','')
def get_source(self, item_url, title, year, start_time): try: #print 'PASSEDURL >>>>>>'+item_url count = 0 headers = {'User-Agent': client.agent()} OPEN = client.request(item_url, headers=headers) frame = client.parseDOM(OPEN, 'iframe', ret='src')[0] data = client.request(frame, headers=headers) data = client.parseDOM(data, 'ul', attrs={'class': 'menuPlayer'})[0] links = client.parseDOM(data, 'a', ret='href') for link in links: #print link+'<<<<<<<<<<<<<<<<<<<<<<<<<<' qual = quality_tags.check_sd_url(link) if qual == 'SD' and 'openload' in link: data = client.request(link, headers=headers) data = client.parseDOM(data, 'meta', ret='content')[0] qual2, info = quality_tags.get_release_quality(data, None) else: qual2 = qual count += 1 host = link.split('//')[1].replace('www.', '') host = host.split('/')[0].split('.')[0].title() self.sources.append({ 'source': host, 'quality': qual2, 'scraper': self.name, 'url': link, 'direct': False }) if dev_log == 'true': end_time = time.time() - start_time send_log(self.name, end_time, count, title, year) except Exception, argument: if dev_log == 'true': error_log(self.name, argument) #hdvix().scrape_movie('Black Panther', '2018', 'tt1825683', False)
def get_source(self,start_url,title,year,season,episode,start_time): try: #print 'URL PASSED OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK'+start_url count = 0 headers = {'User-Agent': client.agent()} r = client.request(start_url, headers=headers) #print r Endlinks=re.compile('class="imagnet icon16" href="(.+?)">.+?<font color=#004E98>(.+?)</font>.+?><b>(.+?)</b></a',re.DOTALL).findall(r) #print 'scraperchk - scrape_movie - EndLinks: '+str(Endlinks) for Magnet,size, quality in Endlinks: #Magnet=Magnet.replace('https://mylink.me.uk/?url=', '') qual = quality_tags.check_sd_url(quality) #print Magnet + '<><><><><>' count+=1 self.sources.append({'source':'Torrent', 'quality':qual+' '+size, 'scraper':self.name, 'url':Magnet, 'direct':False, 'debridonly': True}) if dev_log=='true': end_time = time.time() - start_time send_log(self.name,end_time,count,title,year) except Exception, argument: if dev_log=='true': error_log(self.name,argument) return[]