def sources(self, data, hostDict): sources = [] if not data: return sources try: title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None self.year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year self.season = str(data['season']) if 'tvshowtitle' in data else None self.episode = str(data['episode']) if 'tvshowtitle' in data else None query_list = self.episode_query_list() if 'tvshowtitle' in data else self.year_query_list() # log_utils.log('query_list = %s' % query_list) cloud_folders = realdebrid.RealDebrid().user_torrents() if not cloud_folders: return sources cloud_folders = [i for i in cloud_folders if i['status'] == 'downloaded'] if not cloud_folders: return sources ignoreM2ts = getSetting('rd_cloud.ignore.m2ts') == 'true' extras_filter = cloud_utils.extras_filter() except: from resources.lib.modules import log_utils log_utils.error('RD_CLOUD: ') return sources for folder in cloud_folders: is_m2ts = False try: folder_name = folder.get('filename', '') if not cloud_utils.cloud_check_title(title, aliases, folder_name): continue id = folder.get('id', '') torrent_info = realdebrid.RealDebrid().torrent_info(id) folder_files = torrent_info['files'] folder_files = [i for i in folder_files if i['selected'] == 1] except: from resources.lib.modules import log_utils log_utils.error('RD_CLOUD: ') return sources for file in folder_files: try: name = file.get('path').lstrip('/') rt = cloud_utils.release_title_format(name) if not name.lower().endswith(tuple(supported_video_extensions())): continue if any(value in rt for value in extras_filter): continue if name.endswith('m2ts'): if ignoreM2ts: continue name = folder_name rt = cloud_utils.release_title_format(name) if name in str(sources): continue if all(not bool(re.search(i, rt)) for i in query_list): continue # check if this newly added causes any movie titles that do not have the year to get dropped is_m2ts = True largest = sorted(folder_files, key=lambda k: k['bytes'], reverse=True)[0] index_pos = folder_files.index(largest) size = largest['bytes'] try: link = torrent_info['links'][index_pos] except: link = torrent_info['links'][0] else: if all(not bool(re.search(i, rt)) for i in query_list): if 'tvshowtitle' in data: season_folder_list = self.season_folder_list() nl = name.lower() if all(not bool(re.search(i, nl)) for i in season_folder_list): continue episode_list = self.episode_list() if all(not bool(re.search(i, rt)) for i in episode_list): continue else: if all(not bool(re.search(i, folder_name)) for i in query_list): continue name = folder_name if file.get('bytes') < 52428800: continue name = name.split('/') name = name[len(name)-1] index_pos = folder_files.index(file) link = torrent_info['links'][index_pos] size = file.get('bytes', '') name_info = fs_utils.info_from_name(name, title, self.year, hdlr, episode_title) hash = folder.get('hash', '') quality, info = fs_utils.get_release_quality(name_info, name) try: dsize, isize = fs_utils.convert_size(size, to='GB') info.insert(0, isize) except: dsize = 0 if is_m2ts: info.append('M2TS') info = ' / '.join(info) sources.append({'provider': 'rd_cloud', 'source': 'cloud', 'debrid': 'Real-Debrid', 'seeders': '', 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': True, 'debridonly': True, 'size': dsize}) except: from resources.lib.modules import log_utils log_utils.error('RD_CLOUD: ') return sources return sources
def resolve_magnet(self, magnet_url, info_hash, season, episode, title): from resources.lib.modules.source_utils import seas_ep_filter, extras_filter from resources.lib.cloud_scrapers.cloud_utils import cloud_check_title try: torrent_id = None rd_url = None match = False reason = '' extensions = supported_video_extensions() extras_filtering_list = extras_filter() info_hash = info_hash.lower() if not season: compare_title = re.sub(r'[^A-Za-z0-9]+', '.', title.replace('\'', '').replace('&', 'and').replace('%', '.percent')).lower() torrent_files = self._get(check_cache_url + '/' + info_hash) if not info_hash in torrent_files: return None torrent_id = self.add_magnet(magnet_url) # add_magent() returns id torrent_files = torrent_files[info_hash]['rd'] torrent_files = [item for item in torrent_files if self.video_only(item, extensions)] if not season: m2ts_check = self.m2ts_check(torrent_files) if m2ts_check: m2ts_key, torrent_files = self.m2ts_key_value(torrent_files) for item in torrent_files: try: correct_file_check = False item_values = [i['filename'] for i in item.values()] if season: for value in item_values: if '.m2ts' in value: log_utils.log('Real-Debrid: Can not resolve .m2ts season disk episode', level=log_utils.LOGDEBUG) continue correct_file_check = seas_ep_filter(season, episode, value) if correct_file_check: break if not correct_file_check: reason = value + ' :no matching video filename' continue elif not m2ts_check: for value in item_values: filename = re.sub(r'[^A-Za-z0-9]+', '.', value.replace('\'', '').replace('&', 'and').replace('%', '.percent')).lower() filename_info = filename.replace(compare_title, '') if any(x in filename_info for x in extras_filtering_list): continue aliases = self.get_aliases(title) correct_file_check = cloud_check_title(title, aliases, filename) if correct_file_check: break if not correct_file_check: reason = filename + ' :no matching video filename' continue torrent_keys = item.keys() if len(torrent_keys) == 0: continue torrent_keys = ','.join(torrent_keys) self.add_torrent_select(torrent_id, torrent_keys) torrent_info = self.torrent_info(torrent_id) if 'error' in torrent_info: continue selected_files = [(idx, i) for idx, i in enumerate([i for i in torrent_info['files'] if i['selected'] == 1])] if season: correct_files = [] append = correct_files.append correct_file_check = False for value in selected_files: correct_file_check = seas_ep_filter(season, episode, value[1]['path']) if correct_file_check: append(value[1]) break if len(correct_files) == 0: continue episode_title = re.sub(r'[^A-Za-z0-9]+', '.', title.replace("\'", '').replace('&', 'and').replace('%', '.percent')).lower() for i in correct_files: compare_link = seas_ep_filter(season, episode, i['path'], split=True) compare_link = re.sub(episode_title, '', compare_link) if any(x in compare_link for x in extras_filtering_list): continue else: match = True break if match: index = [i[0] for i in selected_files if i[1]['path'] == correct_files[0]['path']][0] break elif m2ts_check: match, index = True, [i[0] for i in selected_files if i[1]['id'] == m2ts_key][0] else: match = False for value in selected_files: filename = re.sub(r'[^A-Za-z0-9]+', '.', value[1]['path'].rsplit('/', 1)[1].replace('\'', '').replace('&', 'and').replace('%', '.percent')).lower() filename_info = filename.replace(compare_title, '') if any(x in filename_info for x in extras_filtering_list): continue aliases = self.get_aliases(title) match = cloud_check_title(title, aliases, filename) if match: index = value[0] break if match: break except: log_utils.error() if match: rd_link = torrent_info['links'][index] file_url = self.unrestrict_link(rd_link) if file_url.endswith('rar'): file_url = None if not any(file_url.lower().endswith(x) for x in extensions): file_url = None if not self.store_to_cloud: self.delete_torrent(torrent_id) return file_url else: log_utils.log('Real-Debrid: FAILED TO RESOLVE MAGNET : "%s": %s' % (magnet_url, reason), __name__, log_utils.LOGWARNING) self.delete_torrent(torrent_id) except: log_utils.error('Real-Debrid: Error RESOLVE MAGNET %s : ' % magnet_url) if torrent_id: self.delete_torrent(torrent_id) return None
def sources(self, data, hostDict): sources = [] if not data: return sources try: title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None self.year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else self.year self.season = str( data['season']) if 'tvshowtitle' in data else None self.episode = str( data['episode']) if 'tvshowtitle' in data else None query_list = self.episode_query_list( ) if 'tvshowtitle' in data else self.year_query_list() # log_utils.log('query_list = %s' % query_list) cloud_files = premiumize.Premiumize().my_files_all() if not cloud_files: return sources cloud_files = [ i for i in cloud_files if i['path'].lower().endswith( tuple(supported_video_extensions())) ] # this only lets folder names thru with known video extensions..? if not cloud_files: return sources ignoreM2ts = getSetting('pm_cloud.ignore.m2ts') == 'true' extras_filter = cloud_utils.extras_filter() except: from resources.lib.modules import log_utils log_utils.error('PM_CLOUD: ') return sources for item in cloud_files: is_m2ts = False try: name = item.get('name', '') invalids = ('.img', '.bin', '.dat', '.mpls', '.mpl', '.bdmv', '.bdm', '.disc') if name.lower().endswith(invalids): continue path = item.get('path', '').lower() if not cloud_utils.cloud_check_title(title, aliases, path): continue rt = cloud_utils.release_title_format(name) if any(value in rt for value in extras_filter): continue if name.endswith('m2ts'): if ignoreM2ts: continue name = item.get('path', '').split('/')[0] if name in str(sources): continue if all(not bool(re.search(i, rt)) for i in query_list): continue # check if this newly added causes any movie titles that do not have the year to get dropped is_m2ts = True m2ts_files = [ i for i in cloud_files if name in i.get('path') ] largest = sorted(m2ts_files, key=lambda k: k['size'], reverse=True)[0] url_id = largest.get('id', '') size = largest.get('size', '') else: if all(not bool(re.search(i, rt)) for i in query_list): if 'tvshowtitle' in data: season_folder_list = self.season_folder_list() if all(not bool(re.search(i, path)) for i in season_folder_list): continue episode_list = self.episode_list() if all(not bool(re.search(i, rt)) for i in episode_list): continue else: if all(not bool(re.search(i, path)) for i in query_list): continue name = item.get('path', '').split('/')[0] if item.get('size') < 52428800: continue url_id = item.get('id', '') size = item.get('size', '') name_info = fs_utils.info_from_name(name, title, self.year, hdlr, episode_title) quality, info = fs_utils.get_release_quality(name_info, name) try: dsize, isize = fs_utils.convert_size(size, to='GB') info.insert(0, isize) except: dsize = 0 if is_m2ts: info.append('M2TS') info = ' | '.join(info) sources.append({ 'provider': 'pm_cloud', 'source': 'cloud', 'debrid': 'Premiumize.me', 'seeders': '', 'hash': '', 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': url_id, 'info': info, 'direct': True, 'debridonly': True, 'size': dsize }) except: from resources.lib.modules import log_utils log_utils.error('PM_CLOUD: ') return sources return sources
def sources(self, data, hostDict): sources = [] if not data: return sources try: title = data['tvshowtitle'] if 'tvshowtitle' in data else data[ 'title'] title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU') aliases = data['aliases'] episode_title = data['title'] if 'tvshowtitle' in data else None self.year = data['year'] hdlr = 'S%02dE%02d' % (int(data['season']), int( data['episode'])) if 'tvshowtitle' in data else self.year self.season = str( data['season']) if 'tvshowtitle' in data else None self.episode = str( data['episode']) if 'tvshowtitle' in data else None query_list = self.episode_query_list( ) if 'tvshowtitle' in data else self.year_query_list() # log_utils.log('query_list = %s' % query_list) try: cloud_folders = alldebrid.AllDebrid().user_cloud()['magnets'] except: return sources if not cloud_folders: return sources cloud_folders = [i for i in cloud_folders if i['statusCode'] == 4] if not cloud_folders: return sources ignoreM2ts = getSetting('ad_cloud.ignore.m2ts') == 'true' extras_filter = cloud_utils.extras_filter() except: from resources.lib.modules import log_utils log_utils.error('AD_CLOUD: ') return sources for folder in cloud_folders: is_m2ts = False try: folder_name = folder.get('filename') if not cloud_utils.cloud_check_title(title, aliases, folder_name): continue files = folder.get('links', '') # files = [i for i in files if i['filename'].lower().endswith(tuple(supported_video_extensions()))] if not files: continue except: from resources.lib.modules import log_utils log_utils.error('AD_CLOUD: ') return sources for file in files: try: name = file.get('filename', '') if name.lower().endswith(invalid_extensions): continue path = folder.get('filename', '').lower() rt = cloud_utils.release_title_format(name) if any(value in rt for value in extras_filter): continue if '.m2ts' in str(file.get('files')): if ignoreM2ts: continue if name in str(sources): continue if all(not bool(re.search(i, rt)) for i in query_list): continue # check if this newly added causes any movie titles that do not have the year to get dropped is_m2ts = True m2ts_files = [ i for i in files if name == i.get('filename') ] largest = sorted(m2ts_files, key=lambda k: k['size'], reverse=True)[0] link = largest.get('link', '') size = largest.get('size', '') else: if all(not bool(re.search(i, rt)) for i in query_list): if 'tvshowtitle' in data: season_folder_list = self.season_folder_list() if all(not bool(re.search(i, path)) for i in season_folder_list): continue episode_list = self.episode_list() if all(not bool(re.search(i, rt)) for i in episode_list): continue else: if all(not bool(re.search(i, path)) for i in query_list): continue name = folder.get('filename', '') link = file.get('link', '') size = file.get('size', '') name_info = fs_utils.info_from_name( name, title, self.year, hdlr, episode_title) hash = folder.get('hash', '') seeders = folder.get('seeders', '') quality, info = fs_utils.get_release_quality( name_info, name) try: dsize, isize = fs_utils.convert_size(size, to='GB') info.insert(0, isize) except: dsize = 0 if is_m2ts: info.append('M2TS') info = ' / '.join(info) sources.append({ 'provider': 'ad_cloud', 'source': 'cloud', 'debrid': 'AllDebrid', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': True, 'debridonly': True, 'size': dsize }) except: from resources.lib.modules import log_utils log_utils.error('AD_CLOUD: ') return sources return sources