def __init__(self): self.priority = 25 self.language = ['en'] self.domains = ['ororo.tv'] self.base_link = 'https://ororo.tv' self.moviesearch_link = '/api/v2/movies' self.tvsearch_link = '/api/v2/shows' self.movie_link = '/api/v2/movies/%s' self.show_link = '/api/v2/shows/%s' self.episode_link = '/api/v2/episodes/%s' self.user = control.setting('ororo.user') self.password = control.setting('ororo.pass') self.headers = { 'Authorization': self._get_auth(), 'User-Agent': 'Placenta for Kodi' }
def __init__(self): control.monitor_class.__init__(self) control.setUndesirables() window.setProperty('fenomscrapers.debug.reversed', control.setting('debug.reversed')) xbmc.log( '[ script.module.fenomscrapers ] Settings Monitor Service Starting...', LOGNOTICE)
def get_api(self): try: user_name = control.setting('furk.user_name') user_pass = control.setting('furk.user_pass') api_key = control.setting('furk.api') if api_key == '': if user_name == '' or user_pass == '': return s = requests.Session() link = (self.base_link + self.login_link % (user_name, user_pass)) p = s.post(link) p = jsloads(p.text) if p['status'] == 'ok': api_key = p['api_key'] control.setSetting('furk.api', api_key) else: pass return api_key except: source_utils.scraper_error('FURK')
def main(): while not control.monitor.abortRequested(): xbmc.log('[ script.module.fenomscrapers ] Service Started', LOGNOTICE) CheckSettingsFile().run() SyncMyAccounts().run() if control.setting('checkAddonUpdates') == 'true': AddonCheckUpdate().run() if control.isVersionUpdate(): control.clean_settings() xbmc.log( '[ script.module.fenomscrapers ] Settings file cleaned complete', LOGNOTICE) SettingsMonitor().waitForAbort() xbmc.log('[ script.module.fenomscrapers ] Service Stopped', LOGNOTICE) break
def sources(self, url, hostDict): sources = [] if not url: return sources try: if cloudflare_worker_url == '': return sources results = getResults(url) if not results: return sources if control.setting('gdrive.title.chk') == 'true': simpleQuery = get_simple(url) results = filteredResults(results, simpleQuery) except: source_utils.scraper_error('GDRIVE') return sources for result in results: try: link = result["link"] name = unquote(link.rsplit("/")[-1]) # name_info = source_utils.info_from_name(name, title, year, hdlr, episode_title) # needs a decent rewrite to get this release_title = name.lower().replace('&', 'and').replace("'", "") release_title = re.sub(r'[^a-z0-9]+', '.', release_title) quality, info = source_utils.get_release_quality( release_title, link) try: size = str(result["size_gb"]) + ' GB' dsize, isize = source_utils._size(size) info.insert(0, isize) except: source_utils.scraper_error('GDRIVE') dsize = 0 info = ' | '.join(info) sources.append({ 'provider': 'gdrive', 'source': 'Google Drive', 'quality': quality, 'name': name, 'language': 'en', 'info': info, 'url': link, 'direct': True, 'debridonly': False, 'size': dsize }) except: source_utils.scraper_error('GDRIVE') return sources
def sources(self, url, hostDict): sources = [] if not url: return sources try: api_key = control.setting('filepursuit.api') if api_key == '': return sources headers = { "x-rapidapi-host": "filepursuit.p.rapidapi.com", "x-rapidapi-key": api_key } data = parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else year query = '%s %s' % (title, hdlr) query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query) url = self.search_link % quote_plus(query) url = urljoin(self.base_link, url) # log_utils.log('url = %s' % url, log_utils.LOGDEBUG) r = client.request(url, headers=headers) if not r: return sources r = jsloads(r) if 'not_found' in r['status']: return sources results = r['files_found'] except: source_utils.scraper_error('FILEPURSUIT') return sources for item in results: try: url = item['file_link'] try: size = int(item['file_size_bytes']) except: size = 0 try: name = item['file_name'] except: name = item['file_link'].split('/')[-1] name = source_utils.clean_name(name) if not source_utils.check_title(title, aliases, name, hdlr, year): continue name_info = source_utils.info_from_name( name, title, year, hdlr, episode_title) if source_utils.remove_lang(name_info): continue # link_header = client.request(url, output='headers', timeout='5') # to slow to check validity of links # if not any(value in str(link_header) for value in ['stream', 'video/mkv']): # continue quality, info = source_utils.get_release_quality( name_info, url) try: dsize, isize = source_utils.convert_size(size, to='GB') if isize: info.insert(0, isize) except: dsize = 0 info = ' | '.join(info) sources.append({ 'provider': 'filepursuit', 'source': 'direct', 'quality': quality, 'name': name, 'name_info': name_info, 'language': "en", 'url': url, 'info': info, 'direct': True, 'debridonly': False, 'size': dsize }) except: source_utils.scraper_error('FILEPURSUIT') return sources
# -*- coding: utf-8 -*- # (updated 05-19-2021) ''' Fenomscrapers Project ''' import re import requests try: #Py2 from urllib import unquote, quote_plus, unquote_plus except ImportError: #Py3 from urllib.parse import unquote, quote_plus, unquote_plus from fenomscrapers.modules import control from fenomscrapers.modules import source_utils cloudflare_worker_url = control.setting('gdrive.cloudflare_url').strip() def getResults(searchTerm): url = '{}/searchjson/{}'.format(cloudflare_worker_url, searchTerm) if not url.startswith("https://"): url = "https://" + url # log_utils.log('query url = %s' % url) results = requests.get(url).json() return results def get_simple(title): title = title.lower() if "/" in title: title = title.split("/")[-1] title = unquote_plus(title)