Beispiel #1
0
 def results(self, info):
     try:
         self.info = info
         self.db_type = self.info.get("db_type")
         self.title = self.info.get("title")
         self.year = self.info.get("year")
         if self.year: self.rootname = '%s (%s)' % (self.title, self.year)
         else: self.rootname = self.title
         self.season = self.info.get("season", None)
         self.episode = self.info.get("episode", None)
         self.extensions = supported_video_extensions()
         self.folder_query = clean_title(normalize(self.title))
         self.file_query = clean_title(normalize(self.title))
         self.query_list = self._year_query_list(
         ) if self.db_type == 'movie' else self._episode_query_list()
         self._scrape_cloud()
         if not self.scrape_results: return self.sources
         self.label_settings = label_settings(self.info['scraper_settings'],
                                              self.scrape_provider)
         for item in self.scrape_results:
             try:
                 file_name = normalize(item['name'])
                 file_dl = item['url_link']
                 size = float(int(item['size'])) / 1073741824
                 video_quality = get_release_quality(file_name)
                 details = get_file_info(file_name)
                 label, multiline_label = build_internal_scrapers_label(
                     self.label_settings, file_name, details, size,
                     video_quality)
                 self.sources.append({
                     'name': file_name,
                     'label': label,
                     'multiline_label': multiline_label,
                     'title': file_name,
                     'quality': video_quality,
                     'size': size,
                     'url_dl': file_dl,
                     'id': file_dl,
                     'downloads': False,
                     'direct': True,
                     'source': self.scrape_provider,
                     'scrape_provider': self.scrape_provider
                 })
             except:
                 pass
         window.setProperty('ad-cloud_source_results',
                            json.dumps(self.sources))
     except Exception as e:
         from modules.utils import logger
         logger('FEN alldebrid scraper Exception', e)
     return self.sources
Beispiel #2
0
 def _scrape_downloads(self):
     try:
         my_downloads = RealDebrid.downloads()
         my_downloads = [
             i for i in my_downloads
             if i['download'].lower().endswith(tuple(self.extensions))
         ]
         append = self.scrape_results.append
         for item in my_downloads:
             match = False
             normalized = normalize(item['filename'])
             filename = clean_title(normalized)
             if self.db_type == 'movie':
                 if any(x in filename for x in self._year_query_list()):
                     match = True
             else:
                 if seas_ep_filter(self.season, self.episode, normalized):
                     match = True
             if match and self.folder_query in filename:
                 item = self.make_downloads_item(item)
                 if item['path'] not in [
                         d['path'] for d in self.scrape_results
                 ]:
                     append(item)
     except:
         pass
 def _scrape_directory(self, folder_name):
     folder_files = []
     folder_results = []
     dirs, files = xbmcvfs.listdir(folder_name)
     for i in dirs:
         folder_files.append((i, 'folder'))
     for i in files:
         folder_files.append((i, 'file'))
     for item in folder_files:
         file_type = item[1]
         item_name = clean_title(normalize(item[0]))
         if file_type == 'file':
             ext = os.path.splitext(urlparse(item[0]).path)[-1]
             if ext in self.extensions:
                 if self.db_type == 'movie':
                     if self.title_query in item_name:
                         url_path = self.url_path(folder_name, item[0])
                         self.scrape_results.append((item[0], url_path))
                 elif any(x in item_name for x in self.file_query):
                     if not folder_name in self.folder_path:
                         url_path = self.url_path(folder_name, item[0])
                         self.scrape_results.append((item[0], url_path))
                     elif self.title_query in item_name:
                         url_path = self.url_path(folder_name, item[0])
                         self.scrape_results.append((item[0], url_path))
         elif file_type == 'folder':
             if self.title_query in item_name or any(
                     x in item_name for x in self.folder_query):
                 new_folder = os.path.join(folder_name, item[0])
                 folder_results.append(new_folder)
     if not folder_results: return
     return self._scraper_worker(folder_results)
Beispiel #4
0
 def _scrape_directory(self, folder_name):
     folder_results = []
     dirs, files = xbmcvfs.listdir(folder_name)
     cloud_files = files + dirs
     for item in cloud_files:
         ext = os.path.splitext(urlparse(item).path)[-1]
         file_type = 'file' if ext else 'folder'
         item_name = clean_title(normalize(item))
         if file_type == 'file' and ext in self.extensions:
             if self.db_type == 'movie':
                 if self.title_query in item_name:
                     url_path = self.url_path(folder_name, item)
                     self.scrape_results.append((item, url_path))
             elif any(x in item_name for x in self.file_query):
                 if not folder_name == self.download_path:
                     url_path = self.url_path(folder_name, item)
                     self.scrape_results.append((item, url_path))
                 elif self.title_query in item_name:
                     url_path = self.url_path(folder_name, item)
                     self.scrape_results.append((item, url_path))
         elif file_type == 'folder':
             if self.title_query in item_name or any(
                     x in item_name for x in self.folder_query):
                 new_folder = os.path.join(folder_name, item)
                 folder_results.append(new_folder)
     if not folder_results: return
     return self._scraper_worker(folder_results)
Beispiel #5
0
 def _scrape_cloud(self):
     try:
         threads = []
         cloud_files = []
         try:
             my_cloud_files = AllDebrid.user_cloud()
         except:
             return self.sources
         try:
             for k, v in my_cloud_files.iteritems():
                 if isinstance(v, dict):
                     cloud_files.append(v)
         except:
             for k, v in my_cloud_files.items():
                 if isinstance(v, dict):
                     cloud_files.append(v)
         my_cloud_files = [i for i in cloud_files if i['statusCode'] == 4]
         for item in my_cloud_files:
             folder_name = clean_title(normalize(item['filename']))
             assigned_content = self._assigned_content(
                 normalize(item['filename']))
             if assigned_content:
                 if assigned_content == self.rootname:
                     self.folder_results.append(
                         (normalize(item['filename']), item, True))
             elif self.folder_query in folder_name or not folder_name:
                 self.folder_results.append(
                     (normalize(item['filename']), item, False))
         if not self.folder_results: return self.sources
         for i in self.folder_results:
             threads.append(Thread(target=self._scrape_folders, args=(i, )))
         [i.start() for i in threads]
         [i.join() for i in threads]
     except:
         pass
Beispiel #6
0
 def _scrape_cloud(self):
     try:
         try:
             my_cloud_files = RealDebrid.user_cloud()
         except:
             return self.sources
         append = self.folder_results.append
         for item in my_cloud_files:
             normalized = normalize(item['filename'])
             folder_name = clean_title(normalized)
             assigned_content = self._assigned_content(normalized)
             if assigned_content:
                 if assigned_content == self.rootname:
                     append((normalized, item['id'], True))
             elif self.folder_query in folder_name or not folder_name:
                 append((normalized, item['id'], False))
         if not self.folder_results: return self.sources
         threads = []
         append = threads.append
         for i in self.folder_results:
             append(Thread(target=self._scrape_folders, args=(i, )))
         [i.start() for i in threads]
         [i.join() for i in threads]
     except:
         pass
def rd_external_browser(magnet, filtering_list):
    import re
    try:
        from HTMLParser import HTMLParser
    except ImportError:
        from html.parser import HTMLParser
    try:
        episode_match = False
        torrent_id = None
        torrent_keys = []
        extensions = supported_video_extensions()
        magnet_url = HTMLParser().unescape(magnet)
        r = re.search('''magnet:.+?urn:([a-zA-Z0-9]+):([a-zA-Z0-9]+)''',
                      str(magnet), re.I)
        infoHash = r.group(2).lower()
        torrent_files = RealDebrid.check_hash(infoHash)
        torrent_files = torrent_files[infoHash]['rd'][0]
        try:
            files_tuple = sorted([
                (k, v['filename'].lower()) for k, v in torrent_files.items()
                if v['filename'].lower().endswith(tuple(extensions))
            ])
        except:
            return None
        files_tuple = sorted(files_tuple)
        for i in files_tuple:
            if any(x in i[1] for x in filtering_list):
                episode_match = True
            torrent_keys.append(i[0])
        if not episode_match: return None
        if not torrent_keys: return None
        torrent_keys = ','.join(torrent_keys)
        torrent = RealDebrid.add_magnet(magnet_url)
        torrent_id = torrent['id']
        RealDebrid.add_torrent_select(torrent_id, torrent_keys)
        torrent_files = RealDebrid.user_cloud_info(torrent_id)
        file_info = [
            i for i in torrent_files['files']
            if i['path'].lower().endswith(tuple(extensions))
        ]
        file_urls = torrent_files['links']
        pack_info = [
            dict(i.items() + [('url_link', file_urls[c])])
            for c, i in enumerate(file_info)
        ]
        pack_info = sorted(pack_info, key=lambda k: k['path'])
        for item in pack_info:
            filename = clean_title(item['path'])
            if any(x in filename for x in filtering_list):
                correct_result = item
                break
        url_link = correct_result['url_link']
        RealDebrid.delete_torrent(torrent_id)
        return resolve_rd(url_link, play=False)
    except:
        if torrent_id: RealDebrid.delete_torrent(torrent_id)
        return None
Beispiel #8
0
 def _scrape_history(self):
     try:
         my_downloads = RealDebrid.downloads()
         my_downloads = [i for i in my_downloads if i['download'].lower().endswith(tuple(self.extensions))]
         for item in my_downloads:
             filename = clean_title(normalize(item['filename']))
             if any(x in filename for x in self.query_list):
                 if self.folder_query in filename:
                     item = self.make_history_item(item)
                     if item['path'] not in [d['path'] for d in self.scrape_results]:
                         self.scrape_results.append(item)
     except: pass
Beispiel #9
0
def get_filename_match(title, url, name=None):
    from modules.utils import clean_file_name
    if name: return clean_file_name(name)
    from modules.utils import clean_title, normalize
    title_match = None
    try:
        title = clean_title(normalize(title))
        name_url = unquote(url)
        try:
            file_name = clean_title(name_url.split('/')[-1])
        except:
            return title_match
        test = name_url.split('/')
        for item in test:
            test_url = string(clean_title(normalize(item)))
            if title in test_url:
                title_match = clean_file_name(string(item)).replace(
                    'html', ' ').replace('+', ' ')
                break
    except:
        pass
    return title_match
Beispiel #10
0
 def getFilename(self):
     if self.final_name: final_name = self.final_name
     elif self.action == 'meta.pack':
         name = self.params_get('pack_files')['filename']
         final_name = os.path.splitext(
             urlparse(name).path)[0].split('/')[-1]
     elif self.action == 'image':
         final_name = self.title
     else:
         name_url = unquote(self.url)
         file_name = clean_title(name_url.split('/')[-1])
         if clean_title(self.title).lower() in file_name.lower():
             final_name = os.path.splitext(
                 urlparse(name_url).path)[0].split('/')[-1]
         else:
             try:
                 final_name = self.name.translate(None,
                                                  r'\/:*?"<>|').strip('.')
             except:
                 final_name = os.path.splitext(
                     urlparse(name_url).path)[0].split('/')[-1]
     self.final_name = to_utf8(safe_string(remove_accents(final_name)))
Beispiel #11
0
 def results(self, info):
     try:
         self.info = info
         self.db_type = self.info.get("db_type")
         self.download_path = settings.download_directory(self.db_type)
         self.title = self.info.get("title")
         self.year = self.info.get("year")
         self.season = self.info.get("season")
         self.episode = self.info.get("episode")
         self.title_query = clean_title(self.title)
         self.folder_query = self._season_query_list(
         ) if self.db_type == 'episode' else self._year_query_list()
         self.file_query = self._episode_query_list(
         ) if self.db_type == 'episode' else self._year_query_list()
         self._scrape_directory(self.download_path)
         if not self.scrape_results: return self.sources
         self.label_settings = label_settings(self.info['scraper_settings'],
                                              self.scrape_provider)
         for item in self.scrape_results:
             try:
                 file_name = item[0]
                 file_dl = item[1]
                 size = self._get_size(file_dl)
                 details = get_file_info(file_name)
                 video_quality = get_release_quality(file_name, file_dl)
                 label, multiline_label = build_internal_scrapers_label(
                     self.label_settings, file_name, details, size,
                     video_quality)
                 self.sources.append({
                     'name': file_name,
                     'label': label,
                     'multiline_label': multiline_label,
                     'title': file_name,
                     'quality': video_quality,
                     'size': size,
                     'url_dl': file_dl,
                     'id': file_dl,
                     'downloads': True,
                     'direct': True,
                     'source': self.scrape_provider,
                     'scrape_provider': self.scrape_provider
                 })
             except:
                 pass
         window.setProperty('downloads_source_results',
                            json.dumps(self.sources))
     except Exception as e:
         from modules.utils import logger
         logger('FEN downloads scraper Exception', e)
     return self.sources
def getFileNameMatch(title, url):
    try:
        from urllib import unquote
    except ImportError:
        from urllib.parse import unquote
    from modules.utils import clean_title, normalize, clean_file_name
    title_match = None
    try:
        title = clean_title(normalize(title))
        if 'magnet' in url:
            url = url.split('&dn=')[1]
        name_url = unquote(url)
        try:
            file_name = clean_title(name_url.split('/')[-1])
        except:
            return title_match
        test = name_url.split('/')
        for item in test:
            test_url = str(clean_title(normalize(item)))
            if title in test_url:
                title_match = clean_file_name(str(item)).replace(
                    '&tr=udp:', '').replace('&tr=http:', '').replace(
                        '&tr=udp',
                        '').replace('&tr=http',
                                    '').replace('html', ' ').replace('+', ' ')
                try:
                    title_match = title_match.split('&xl=')[0]
                except:
                    pass
                try:
                    title_match = title_match.split(' p=')[0]
                except:
                    pass
                break
    except:
        pass
    return title_match
Beispiel #13
0
 def _scrape_cloud(self):
     try:
         cloud_files = Premiumize.user_cloud_all()['files']
         cloud_files = [
             i for i in cloud_files
             if i['path'].lower().endswith(tuple(self.extensions))
         ]
         cloud_files = sorted(cloud_files, key=lambda k: k['name'])
     except:
         return self.sources
     for item in cloud_files:
         item_name = clean_title(normalize(item['name']))
         if self.query in item_name:
             if any(x in item_name for x in self.file_query):
                 self.scrape_results.append(item)
Beispiel #14
0
 def _scrape_folders(self, folder_info):
     try:
         assigned_folder = folder_info[2]
         folder_files = RealDebrid.user_cloud_info(folder_info[1])
         file_info = [i for i in folder_files['files'] if i['path'].lower().endswith(tuple(self.extensions))]
         file_urls = folder_files['links']
         for c, i in enumerate(file_info):
             try: i.update({'folder_name': folder_info[0], 'url_link': file_urls[c]})
             except: pass
         contents = sorted(file_info, key=lambda k: k['path'])
         for item in contents:
             filename = clean_title(normalize(item['path']))
             if any(x in filename for x in self.query_list):
                 if assigned_folder:
                     self.scrape_results.append(item)
                 elif self.folder_query in filename:
                     self.scrape_results.append(item)
     except: pass
Beispiel #15
0
 def _scrape_folders(self, folder_info):
     try:
         final_files = []
         extensions = supported_video_extensions()
         assigned_folder = folder_info[2]
         torrent_folder = folder_info[1]
         links = torrent_folder['links']
         total_size = torrent_folder['size']
         try:
             links_count = len([
                 v for k, v in links.items()
                 if v.lower().endswith(tuple(self.extensions))
             ])
             for k, v in links.items():
                 if v.lower().endswith(tuple(self.extensions)):
                     size = total_size / links_count
                     final_files.append({
                         'name': v,
                         'url_link': k,
                         'size': size
                     })
         except:
             links_count = len([
                 v for k, v in links.iteritems()
                 if v.lower().endswith(tuple(self.extensions))
             ])
             for k, v in links.iteritems():
                 if v.lower().endswith(tuple(self.extensions)):
                     size = total_size / links_count
                     final_files.append({
                         'name': v,
                         'url_link': k,
                         'size': size
                     })
         for item in final_files:
             filename = clean_title(normalize(item['name']))
             if any(x in filename for x in self.query_list):
                 if assigned_folder:
                     self.scrape_results.append(item)
                 elif self.folder_query in filename:
                     self.scrape_results.append(item)
     except:
         return
Beispiel #16
0
 def _scrape_folders(self, folder_info):
     try:
         assigned_folder = folder_info[2]
         folder_files = RealDebrid.user_cloud_info(folder_info[1])
         contents = [
             i for i in folder_files['files']
             if i['path'].lower().endswith(tuple(self.extensions))
         ]
         file_urls = folder_files['links']
         append = self.scrape_results.append
         for c, i in enumerate(contents):
             try:
                 i.update({
                     'folder_name': folder_info[0],
                     'url_link': file_urls[c]
                 })
             except:
                 pass
         contents.sort(key=lambda k: k['path'])
         for item in contents:
             match = False
             normalized = normalize(item['path'])
             filename = clean_title(normalized)
             if assigned_folder and self.db_type == 'movie': match = True
             else:
                 if self.db_type == 'movie':
                     if any(x in filename for x in self._year_query_list()
                            ) and self.folder_query in filename:
                         match = True
                 else:
                     if assigned_folder:
                         if any(x in normalized.lower()
                                for x in self.seas_ep_query_list):
                             match = True
                     elif seas_ep_filter(self.season, self.episode,
                                         normalized):
                         match = True
             if match:
                 if assigned_folder: item['assigned_folder'] = True
                 append(item)
     except:
         pass
Beispiel #17
0
 def _scrape_cloud(self):
     try:
         cloud_files = Premiumize.user_cloud_all()['files']
         cloud_files = [
             i for i in cloud_files
             if i['path'].lower().endswith(tuple(self.extensions))
         ]
         cloud_files.sort(key=lambda k: k['name'])
     except:
         return self.sources
     append = self.scrape_results.append
     for item in cloud_files:
         normalized = normalize(item['name'])
         item_name = clean_title(normalized)
         if self.query in item_name:
             if self.db_type == 'movie':
                 if any(x in item['name'] for x in self._year_query_list()):
                     append(item)
             elif seas_ep_filter(self.season, self.episode, normalized):
                 append(item)
Beispiel #18
0
def download(url):
    from modules.nav_utils import hide_busy_dialog, notification
    from modules.utils import clean_file_name, clean_title
    from modules import settings
    # from modules.utils import logger
    if url == None:
        hide_busy_dialog()
        notification('No URL found for Download. Pick another Source', 6000)
        return
    params = dict(parse_qsl(sys.argv[2].replace('?','')))
    json_meta = params.get('meta')
    if json_meta:
        meta = json.loads(json_meta)
        db_type = meta.get('vid_type')
        title = meta.get('search_title')
        year = meta.get('year')
        image = meta.get('poster')
        season = meta.get('season')
        episode = meta.get('episode')
        name = params.get('name')
    else:
        db_type = params.get('db_type')
        image = params.get('image')
        title = params.get('name')
    title = clean_file_name(title)
    media_folder = settings.download_directory(db_type)
    if not media_folder:
        hide_busy_dialog()
        resp = xbmcgui.Dialog().yesno(
            "No Download folder set!",
            "Fen requires you to set Download Folders.",
            "Would you like to set a folder now?")
        if resp:
            from modules.nav_utils import open_settings
            return open_settings('7.0')
        else:
            return
    if db_type in ('movie', 'episode'):
        folder_rootname = '%s (%s)' % (title, year)
        folder = os.path.join(media_folder, folder_rootname + '/')
    else:
        folder = media_folder
    if db_type == 'episode':
        folder = os.path.join(folder, 'Season ' + str(season))
    try: headers = dict(parse_qsl(url.rsplit('|', 1)[1]))
    except: headers = dict('')
    dest = None
    url = url.split('|')[0]
    if not 'http' in url:
        from apis.furk_api import FurkAPI
        from indexers.furk import filter_furk_tlist, seas_ep_query_list
        t_files = FurkAPI().t_files(url)
        t_files = [i for i in t_files if 'video' in i['ct'] and 'bitrate' in i]
        name, url = filter_furk_tlist(t_files, (None if db_type == 'movie' else seas_ep_query_list(season, episode)))[0:2]
        dest = os.path.join(folder, name)
    if db_type == 'archive':
        ext = 'zip'
    if db_type == 'audio':
        ext = os.path.splitext(urlparse(url).path)[1][1:]
        if not ext in ['wav', 'mp3', 'ogg', 'flac', 'wma', 'aac']: ext = 'mp3'
    else:
        ext = os.path.splitext(urlparse(url).path)[1][1:]
        if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
    if not dest:
        name_url = unquote(url)
        file_name = clean_title(name_url.split('/')[-1])
        if clean_title(title).lower() in file_name.lower():
            transname = os.path.splitext(urlparse(name_url).path)[0].split('/')[-1]
        else:
            try: transname = name.translate(None, '\/:*?"<>|').strip('.')
            except: transname = os.path.splitext(urlparse(name_url).path)[0].split('/')[-1]
        dest = os.path.join(folder, transname + '.' + ext)
    sysheaders = quote_plus(json.dumps(headers))
    sysurl = quote_plus(url)
    systitle = quote_plus(transname)
    sysimage = quote_plus(image)
    sysfolder = quote_plus(folder)
    sysdest = quote_plus(dest)
    script = inspect.getfile(inspect.currentframe())
    cmd = 'RunScript(%s, %s, %s, %s, %s, %s, %s)' % (script, sysurl, sysdest, systitle, sysimage, sysfolder, sysheaders)

    xbmc.executebuiltin(cmd)
Beispiel #19
0
def filter_furk_tlist(t_files, filtering_list=None):
    from modules.utils import clean_title, normalize
    t_files = [i for i in t_files if 'video' in i['ct'] and any(x in clean_title(normalize(i['name'])) for x in filtering_list) and not any(x in i['name'].lower() for x in ['furk320', 'sample'])][0] if filtering_list else [i for i in t_files if 'is_largest' in i][0]
    return t_files['name'], t_files['url_dl'], t_files['size']
Beispiel #20
0
    def results(self, info):
        try:
            if not enabled_debrids_check('pm'):
                return internal_results(self.scrape_provider, self.sources)
            self.title_filter = filter_by_name(self.scrape_provider)
            self.sources, self.scrape_results = [], []
            self.db_type = info.get('db_type')
            self.title = info.get('title')
            self.year = info.get('year')
            if self.year: self.rootname = '%s (%s)' % (self.title, self.year)
            else: self.rootname = self.title
            self.season = info.get('season')
            self.episode = info.get('episode')
            self.query = clean_title(self.title)
            self.extensions = supported_video_extensions()
            self._scrape_cloud()
            if not self.scrape_results:
                return internal_results(self.scrape_provider, self.sources)
            self.aliases = get_aliases_titles(info.get('aliases', []))

            def _process():
                for item in self.scrape_results:
                    try:
                        file_name = normalize(item['name'])
                        if self.title_filter:
                            if not check_title(self.title, file_name,
                                               self.aliases, self.year,
                                               self.season, self.episode):
                                continue
                        URLName = clean_file_name(file_name).replace(
                            'html', ' ').replace('+', ' ').replace('-', ' ')
                        path = item['path']
                        file_dl = item['id']
                        size = round(float(item['size']) / 1073741824, 2)
                        video_quality, details = get_file_info(
                            name_info=release_info_format(file_name))
                        source_item = {
                            'name': file_name,
                            'title': file_name,
                            'URLName': URLName,
                            'quality': video_quality,
                            'size': size,
                            'size_label': '%.2f GB' % size,
                            'extraInfo': details,
                            'url_dl': file_dl,
                            'id': file_dl,
                            'downloads': False,
                            'direct': True,
                            'source': self.scrape_provider,
                            'scrape_provider': self.scrape_provider
                        }
                        yield source_item
                    except:
                        pass

            self.sources = list(_process())
        except Exception as e:
            from modules.kodi_utils import logger
            logger('FEN premiumize scraper Exception', e)
        internal_results(self.scrape_provider, self.sources)
        return self.sources
 def results(self, info):
     try:
         self.info = info
         self.db_type = self.info.get("db_type")
         self.folder_path = settings.source_folders_directory(
             self.db_type, self.scrape_provider)
         if not self.folder_path: return self.sources
         self.title = self.info.get("title")
         self.year = self.info.get("year")
         self.season = self.info.get("season")
         self.episode = self.info.get("episode")
         self.title_query = clean_title(self.title)
         self.folder_query = self._season_query_list(
         ) if self.db_type == 'episode' else self._year_query_list()
         self.file_query = self._episode_query_list(
         ) if self.db_type == 'episode' else self._year_query_list()
         cache_name = 'fen_%s_SCRAPER_%s_%s_%s_%s' % (
             self.scrape_provider, self.title, self.year, self.season,
             self.episode)
         cache = _cache.get(cache_name)
         if cache:
             self.scrape_results = cache
         else:
             self._scrape_directory(self.folder_path)
             _cache.set(cache_name,
                        self.scrape_results,
                        expiration=datetime.timedelta(hours=2))
         if not self.scrape_results: return self.sources
         self.label_settings = label_settings(self.info['scraper_settings'],
                                              'folders', self.scraper_name)
         for item in self.scrape_results:
             try:
                 file_name = item[0]
                 file_dl = item[1]
                 size = self._get_size(
                     file_dl) if not file_dl.endswith('.strm') else 'strm'
                 details = get_file_info(file_name)
                 video_quality = get_release_quality(file_name, file_dl)
                 label, multiline_label = build_internal_scrapers_label(
                     self.label_settings, file_name, details, size,
                     video_quality)
                 self.sources.append({
                     'name': file_name,
                     'label': label,
                     'multiline_label': multiline_label,
                     'title': file_name,
                     'quality': video_quality,
                     'size': size,
                     'url_dl': file_dl,
                     'id': file_dl,
                     self.scrape_provider: True,
                     'direct': True,
                     'source': self.scrape_provider,
                     'scrape_provider': self.scrape_provider
                 })
             except:
                 pass
         window.setProperty('%s_source_results' % self.scrape_provider,
                            json.dumps(self.sources))
     except Exception as e:
         from modules.utils import logger
         logger('FEN folders scraper Exception', e)
     return self.sources