def playback_episode( self, channel, episode): self.history_mark_played( episode.url) filename=episode.local_filename() if gpodder.interface == gpodder.MAEMO and not self.config.maemo_allow_custom_player: # Use the built-in Nokia Mediaplayer here filename=filename.encode('utf-8') osso_rpc=osso.Rpc(self.osso_c) service='com.nokia.mediaplayer' path='/com/nokia/mediaplayer' osso_rpc.rpc_run(service, path, service, 'mime_open', ('file://'+filename,)) return (True, service) # Determine the file type and set the player accordingly. file_type=util.file_type_by_extension(util.file_extension_from_url(episode.url)) if file_type == 'video': player=self.config.videoplayer elif file_type == 'audio': player=self.config.player else: log('Non-audio or video file type, using xdg-open for %s', filename, sender=self) player='xdg-open' command_line=shlex.split(util.format_desktop_command(player, filename).encode('utf-8')) log( 'Command line: [ %s ]', ', '.join( [ '"%s"' % p for p in command_line ]), sender=self) try: subprocess.Popen( command_line) except: return ( False, command_line[0] ) return ( True, command_line[0] )
def iter_set_downloading_columns( self, model, iter, new_episodes=[]): global ICON_AUDIO_FILE, ICON_VIDEO_FILE, ICON_BITTORRENT global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW url=model.get_value( iter, 0) local_filename=model.get_value( iter, 8) played=not gl.history_is_played(url) locked=gl.history_is_locked(url) if gl.config.episode_list_descriptions: icon_size=32 else: icon_size=16 if os.path.exists( local_filename): file_type=util.file_type_by_extension( util.file_extension_from_url(url)) if file_type == 'audio': status_icon=util.get_tree_icon(ICON_AUDIO_FILE, played, locked, self.icon_cache, icon_size) elif file_type == 'video': status_icon=util.get_tree_icon(ICON_VIDEO_FILE, played, locked, self.icon_cache, icon_size) elif file_type == 'torrent': status_icon=util.get_tree_icon(ICON_BITTORRENT, played, locked, self.icon_cache, icon_size) else: status_icon=util.get_tree_icon('unknown', played, locked, self.icon_cache, icon_size) elif services.download_status_manager.is_download_in_progress(url): status_icon=util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size) elif gl.history_is_downloaded(url): status_icon=util.get_tree_icon(ICON_DELETED, icon_cache=self.icon_cache, icon_size=icon_size) elif url in [e.url for e in new_episodes]: status_icon=util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size) else: status_icon=None model.set( iter, 4, status_icon)
def file_type(self): # Assume all YouTube/Vimeo links are video files if youtube.is_video_link(self.url) or vimeo.is_video_link( self.url) or escapist_videos.is_video_link(self.url): return 'video' return util.file_type_by_extension(self.extension())
def from_feedparser_entry( entry, channel): episode=podcastItem( channel) episode.title=entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', '')))) episode.link=entry.get( 'link', '') episode.description=util.remove_html_tags( entry.get( 'summary', entry.get( 'link', entry.get( 'title', '')))) episode.guid=entry.get( 'id', '') if entry.get( 'updated_parsed', None): episode.pubDate=util.updated_parsed_to_rfc2822( entry.updated_parsed) if episode.title == '': log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender=episode) enclosure=None if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0: enclosure=entry.enclosures[0] if len(entry.enclosures) > 1: for e in entry.enclosures: if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')): if util.normalize_feed_url(e.href) is not None: log( 'Selected enclosure: %s', e.href, sender=episode) enclosure=e break episode.url=util.normalize_feed_url( enclosure.get( 'href', '')) elif hasattr(entry, 'link'): extension=util.file_extension_from_url(entry.link) file_type=util.file_type_by_extension(extension) if file_type is not None: log('Adding episode with link to file type "%s".', file_type, sender=episode) episode.url=entry.link if not episode.url: raise ValueError( 'Episode has an invalid URL') if not episode.pubDate: metainfo=episode.get_metainfo() if 'pubdate' in metainfo: episode.pubDate=metainfo['pubdate'] if hasattr( enclosure, 'length'): try: episode.length=int(enclosure.length) except: episode.length=-1 # For episodes with a small length amount, try to find it via HTTP HEAD if episode.length <= 100: metainfo=episode.get_metainfo() if 'length' in metainfo: episode.length=metainfo['length'] if hasattr( enclosure, 'type'): episode.mimetype=enclosure.type if episode.title == '': ( filename, extension )=os.path.splitext( os.path.basename( episode.url)) episode.title=filename return episode
def from_podcastparser_entry(cls, entry, channel): episode = cls(channel) episode.guid = entry['guid'] episode.title = entry['title'] episode.link = entry['link'] episode.description = entry['description'] if entry.get('description_html'): episode.description_html = entry['description_html'] # TODO: This really should be handled in podcastparser and not here. elif util.is_html(entry['description']): episode.description_html = entry['description'] episode.description = util.remove_html_tags(entry['description']) episode.total_time = entry['total_time'] episode.published = entry['published'] episode.payment_url = entry['payment_url'] audio_available = any(enclosure['mime_type'].startswith('audio/') for enclosure in entry['enclosures']) video_available = any(enclosure['mime_type'].startswith('video/') for enclosure in entry['enclosures']) for enclosure in entry['enclosures']: episode.mime_type = enclosure['mime_type'] # Skip images in feeds if audio or video is available (bug 979) # This must (and does) also look in Media RSS enclosures (bug 1430) if episode.mime_type.startswith('image/') and (audio_available or video_available): continue # If we have audio or video available later on, skip # 'application/octet-stream' data types (fixes Linux Outlaws) if episode.mime_type == 'application/octet-stream' and (audio_available or video_available): continue episode.url = util.normalize_feed_url(enclosure['url']) if not episode.url: continue episode.file_size = enclosure['file_size'] return episode # Brute-force detection of the episode link episode.url = util.normalize_feed_url(entry['link']) if not episode.url: return None if any(mod.is_video_link(episode.url) for mod in (youtube, vimeo, escapist_videos)): return episode # Check if we can resolve this link to a audio/video file filename, extension = util.filename_from_url(episode.url) file_type = util.file_type_by_extension(extension) # The link points to a audio or video file - use it! if file_type is not None: return episode return None
def _get_enclosure_url(self, episode_dict): url = episode_dict.get('link') if url is not None: base, extension = util.filename_from_url(url) if util.file_type_by_extension(extension) in ('audio', 'video'): logger.debug('Using link for enclosure URL: %s', url) return url return None
def extension(self, may_call_local_filename=True): filename, ext = util.filename_from_url(self.url) if may_call_local_filename: filename = self.local_filename(create=False) if filename is not None: filename, ext = os.path.splitext(filename) # if we can't detect the extension from the url fallback on the mimetype if ext == "" or util.file_type_by_extension(ext) is None: ext = util.extension_from_mimetype(self.mime_type) return ext
def extension(self, may_call_local_filename=True): filename, ext = util.filename_from_url(self.url) if may_call_local_filename: filename = self.local_filename(create=False) if filename is not None: filename, ext = os.path.splitext(filename) # if we can't detect the extension from the url fallback on the mimetype if ext == '' or util.file_type_by_extension(ext) is None: ext = util.extension_from_mimetype(self.mime_type) return ext
def from_podcastparser_entry(cls, entry, channel): episode = cls(channel) episode.guid = entry["guid"] episode.title = entry["title"] episode.link = entry["link"] episode.description = entry["description"] episode.total_time = entry["total_time"] episode.published = entry["published"] episode.payment_url = entry["payment_url"] audio_available = any(enclosure["mime_type"].startswith("audio/") for enclosure in entry["enclosures"]) video_available = any(enclosure["mime_type"].startswith("video/") for enclosure in entry["enclosures"]) for enclosure in entry["enclosures"]: episode.mime_type = enclosure["mime_type"] # Skip images in feeds if audio or video is available (bug 979) # This must (and does) also look in Media RSS enclosures (bug 1430) if episode.mime_type.startswith("image/") and (audio_available or video_available): continue # If we have audio or video available later on, skip # 'application/octet-stream' data types (fixes Linux Outlaws) if episode.mime_type == "application/octet-stream" and (audio_available or video_available): continue episode.url = util.normalize_feed_url(enclosure["url"]) if not episode.url: continue episode.file_size = enclosure["file_size"] return episode # Brute-force detection of the episode link episode.url = util.normalize_feed_url(entry["link"]) if not episode.url: return None if any(mod.is_video_link(episode.url) for mod in (youtube, vimeo, escapist_videos)): return episode # Check if we can resolve this link to a audio/video file filename, extension = util.filename_from_url(episode.url) file_type = util.file_type_by_extension(extension) # The link points to a audio or video file - use it! if file_type is not None: return episode return None
def file_type(self): resolved_type = registry.content_type.resolve(self, None) if resolved_type is not None: return resolved_type return util.file_type_by_extension(self.extension())
def from_feedparser_entry(cls, entry, channel): episode = cls(channel) episode.guid = entry.get('id', '') # Replace multi-space and newlines with single space (Maemo bug 11173) episode.title = re.sub('\s+', ' ', entry.get('title', '')) episode.link = entry.get('link', '') if 'content' in entry and len(entry['content']) and \ entry['content'][0].get('type', '') == 'text/html': episode.description = entry['content'][0].value else: episode.description = entry.get('summary', '') # Fallback to subtitle if summary is not available if not episode.description: episode.description = entry.get('subtitle', '') try: total_time = 0 # Parse iTunes-specific podcast duration metadata itunes_duration = entry.get('itunes_duration', '') if itunes_duration: total_time = util.parse_time(itunes_duration) # Parse time from YouTube descriptions if it's a YouTube feed if youtube.is_youtube_guid(episode.guid): result = re.search(r'Time:<[^>]*>\n<[^>]*>([:0-9]*)<', episode.description) if result: youtube_duration = result.group(1) total_time = util.parse_time(youtube_duration) episode.total_time = total_time except: pass episode.published = feedcore.get_pubdate(entry) enclosures = entry.get('enclosures', []) media_rss_content = entry.get('media_content', []) audio_available = any(e.get('type', '').startswith('audio/') \ for e in enclosures + media_rss_content) video_available = any(e.get('type', '').startswith('video/') \ for e in enclosures + media_rss_content) # XXX: Make it possible for hooks/extensions to override this by # giving them a list of enclosures and the "self" object (podcast) # and letting them sort and/or filter the list of enclosures to # get the desired enclosure picked by the algorithm below. filter_and_sort_enclosures = lambda x: x # read the flattr auto-url, if exists payment_info = [link['href'] for link in entry.get('links', []) if link['rel'] == 'payment'] if payment_info: episode.payment_url = sorted(payment_info, key=get_payment_priority)[0] # Enclosures for e in filter_and_sort_enclosures(enclosures): episode.mime_type = e.get('type', 'application/octet-stream') if episode.mime_type == '': # See Maemo bug 10036 logger.warn('Fixing empty mimetype in ugly feed') episode.mime_type = 'application/octet-stream' if '/' not in episode.mime_type: continue # Skip images in feeds if audio or video is available (bug 979) # This must (and does) also look in Media RSS enclosures (bug 1430) if episode.mime_type.startswith('image/') and \ (audio_available or video_available): continue # If we have audio or video available later on, skip # 'application/octet-stream' data types (fixes Linux Outlaws) if episode.mime_type == 'application/octet-stream' and \ (audio_available or video_available): continue episode.url = util.normalize_feed_url(e.get('href', '')) if not episode.url: continue try: episode.file_size = int(e.length) or -1 except: episode.file_size = -1 return episode # Media RSS content for m in filter_and_sort_enclosures(media_rss_content): episode.mime_type = m.get('type', 'application/octet-stream') if '/' not in episode.mime_type: continue # Skip images in Media RSS if we have audio/video (bug 1444) if episode.mime_type.startswith('image/') and \ (audio_available or video_available): continue episode.url = util.normalize_feed_url(m.get('url', '')) if not episode.url: continue try: episode.file_size = int(m.get('filesize', 0)) or -1 except: episode.file_size = -1 try: episode.total_time = int(m.get('duration', 0)) or 0 except: episode.total_time = 0 return episode # Brute-force detection of any links for l in entry.get('links', ()): episode.url = util.normalize_feed_url(l.get('href', '')) if not episode.url: continue if (youtube.is_video_link(episode.url) or \ vimeo.is_video_link(episode.url)): return episode # Check if we can resolve this link to a audio/video file filename, extension = util.filename_from_url(episode.url) file_type = util.file_type_by_extension(extension) if file_type is None and hasattr(l, 'type'): extension = util.extension_from_mimetype(l.type) file_type = util.file_type_by_extension(extension) # The link points to a audio or video file - use it! if file_type is not None: return episode return None
def check_download_folder(self): """Check the download folder for externally-downloaded files This will try to assign downloaded files with episodes in the database. This will also cause missing files to be marked as deleted. """ known_files = set() for episode in self.get_episodes(gpodder.STATE_DOWNLOADED): if episode.was_downloaded(): filename = episode.local_filename(create=False) if filename is None: # No filename has been determined for this episode continue if not os.path.exists(filename): # File has been deleted by the user - simulate a # delete event (also marks the episode as deleted) logger.debug('Episode deleted: %s', filename) episode.delete_from_disk() continue known_files.add(filename) existing_files = set(filename for filename in \ glob.glob(os.path.join(self.save_dir, '*')) \ if not filename.endswith('.partial')) ignore_files = [ 'folder' + ext for ext in coverart.CoverDownloader.EXTENSIONS ] external_files = existing_files.difference( list(known_files) + [ os.path.join(self.save_dir, ignore_file) for ignore_file in ignore_files ]) if not external_files: return all_episodes = self.get_all_episodes() for filename in external_files: found = False basename = os.path.basename(filename) existing = [ e for e in all_episodes if e.download_filename == basename ] if existing: existing = existing[0] logger.info('Importing external download: %s', filename) existing.on_downloaded(filename) continue for episode in all_episodes: wanted_filename = episode.local_filename(create=True, \ return_wanted_filename=True) if basename == wanted_filename: logger.info('Importing external download: %s', filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break wanted_base, wanted_ext = os.path.splitext(wanted_filename) target_base, target_ext = os.path.splitext(basename) if wanted_base == target_base: # Filenames only differ by the extension wanted_type = util.file_type_by_extension(wanted_ext) target_type = util.file_type_by_extension(target_ext) # If wanted type is None, assume that we don't know # the right extension before the download (e.g. YouTube) # if the wanted type is the same as the target type, # assume that it's the correct file if wanted_type is None or wanted_type == target_type: logger.info('Importing external download: %s', filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break if not found and not util.is_system_file(filename): logger.warn('Unknown external file: %s', filename)
def check_download_folder(self): """Check the download folder for externally-downloaded files This will try to assign downloaded files with episodes in the database and (failing that) will move downloaded files into the "Unknown" subfolder in the download directory, so that the user knows that gPodder doesn't know to which episode the file belongs (the "Unknown" folder may be used by external tools or future gPodder versions for better import support). This will also cause missing files to be marked as deleted. """ known_files = set() for episode in self.get_downloaded_episodes(): if episode.was_downloaded(): filename = episode.local_filename(create=False) if not os.path.exists(filename): # File has been deleted by the user - simulate a # delete event (also marks the episode as deleted) logger.debug('Episode deleted: %s', filename) episode.delete_from_disk() continue known_files.add(filename) existing_files = set(filename for filename in \ glob.glob(os.path.join(self.save_dir, '*')) \ if not filename.endswith('.partial')) external_files = existing_files.difference(list(known_files) + \ [os.path.join(self.save_dir, x) \ for x in ('folder.jpg', 'Unknown')]) if not external_files: return all_episodes = self.get_all_episodes() for filename in external_files: found = False basename = os.path.basename(filename) existing = [e for e in all_episodes if e.download_filename == basename] if existing: existing = existing[0] logger.info('Importing external download: %s', filename) existing.on_downloaded(filename) continue for episode in all_episodes: wanted_filename = episode.local_filename(create=True, \ return_wanted_filename=True) if basename == wanted_filename: logger.info('Importing external download: %s', filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break wanted_base, wanted_ext = os.path.splitext(wanted_filename) target_base, target_ext = os.path.splitext(basename) if wanted_base == target_base: # Filenames only differ by the extension wanted_type = util.file_type_by_extension(wanted_ext) target_type = util.file_type_by_extension(target_ext) # If wanted type is None, assume that we don't know # the right extension before the download (e.g. YouTube) # if the wanted type is the same as the target type, # assume that it's the correct file if wanted_type is None or wanted_type == target_type: logger.info('Importing external download: %s', filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break if not found: logger.warn('Unknown external file: %s', filename) target_dir = os.path.join(self.save_dir, 'Unknown') if util.make_directory(target_dir): target_file = os.path.join(target_dir, basename) logger.info('Moving %s => %s', filename, target_file) try: shutil.move(filename, target_file) except Exception, e: logger.error('Could not move file: %s', e, exc_info=True)
def from_feedparser_entry(entry, channel): episode = PodcastEpisode(channel) episode.title = entry.get('title', '') episode.link = entry.get('link', '') episode.description = entry.get('summary', '') # Fallback to subtitle if summary is not available0 if not episode.description: episode.description = entry.get('subtitle', '') episode.guid = entry.get('id', '') if entry.get('updated_parsed', None): episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,)) # Enclosures for e in entry.get('enclosures', ()): episode.mimetype = e.get('type', 'application/octet-stream') if '/' not in episode.mimetype: continue episode.url = util.normalize_feed_url(e.get('href', '')) if not episode.url: continue try: episode.length = int(e.length) or -1 except: episode.length = -1 return episode # Media RSS content for m in entry.get('media_content', ()): episode.mimetype = m.get('type', 'application/octet-stream') if '/' not in episode.mimetype: continue episode.url = util.normalize_feed_url(m.get('url', '')) if not episode.url: continue try: episode.length = int(m.fileSize) or -1 except: episode.length = -1 return episode # Brute-force detection of any links for l in entry.get('links', ()): episode.url = util.normalize_feed_url(l.get('href', '')) if not episode.url: continue if youtube.is_video_link(episode.url): return episode # Check if we can resolve this link to a audio/video file filename, extension = util.filename_from_url(episode.url) file_type = util.file_type_by_extension(extension) if file_type is None and hasattr(l, 'type'): extension = util.extension_from_mimetype(l.type) file_type = util.file_type_by_extension(extension) # The link points to a audio or video file - use it! if file_type is not None: return episode # Scan MP3 links in description text mp3s = re.compile(r'http://[^"]*\.mp3') for content in entry.get('content', ()): html = content.value for match in mp3s.finditer(html): episode.url = match.group(0) return episode return None
def file_type( self): return util.file_type_by_extension( self.extension() )
def from_feedparser_entry(entry, channel, mimetype_prefs=''): episode = PodcastEpisode(channel) #log("from_feedparser_entry(%s)" % entry.get('title','')) # Replace multi-space and newlines with single space (Maemo bug 11173) episode.title = re.sub('\s+', ' ', entry.get('title', '')) episode.link = entry.get('link', '') #print("summary=%s" % entry.summary) if 'content' in entry and len(entry['content']) and \ entry['content'][0].get('type', '') == 'text/html': episode.description = entry['content'][0].value else: episode.description = entry.get('summary', '') try: # Parse iTunes-specific podcast duration metadata total_time = util.parse_time(entry.get('itunes_duration', '')) episode.total_time = total_time except: pass # Fallback to subtitle if summary is not available0 if not episode.description: episode.description = entry.get('subtitle', '') #print("episode %s description=%s" % (episode.title,episode.description)) episode.guid = entry.get('id', '') if entry.get('updated_parsed', None): episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,)) enclosures = entry.get('enclosures', []) media_rss_content = entry.get('media_content', []) audio_available = any(e.get('type', '').startswith('audio/') \ for e in enclosures + media_rss_content) video_available = any(e.get('type', '').startswith('video/') \ for e in enclosures + media_rss_content) # Create the list of preferred mime types mimetype_prefs = mimetype_prefs.split(',') def calculate_preference_value(enclosure): """Calculate preference value of an enclosure This is based on mime types and allows users to prefer certain mime types over others (e.g. MP3 over AAC, ...) """ mimetype = enclosure.get('type', None) try: # If the mime type is found, return its (zero-based) index return mimetype_prefs.index(mimetype) except ValueError: # If it is not found, assume it comes after all listed items return len(mimetype_prefs) # Enclosures for e in sorted(enclosures, key=calculate_preference_value): episode.mimetype = e.get('type', 'application/octet-stream') if episode.mimetype == '': # See Maemo bug 10036 log('Fixing empty mimetype in ugly feed', sender=episode) episode.mimetype = 'application/octet-stream' if '/' not in episode.mimetype: continue # Skip images in feeds if audio or video is available (bug 979) # This must (and does) also look in Media RSS enclosures (bug 1430) if episode.mimetype.startswith('image/') and \ (audio_available or video_available): continue episode.url = util.normalize_feed_url(e.get('href', '')) if not episode.url: continue try: episode.length = int(e.length) or -1 except: episode.length = -1 return episode # Media RSS content for m in sorted(media_rss_content, key=calculate_preference_value): episode.mimetype = m.get('type', 'application/octet-stream') if '/' not in episode.mimetype: continue # Skip images in Media RSS if we have audio/video (bug 1444) if episode.mimetype.startswith('image/') and \ (audio_available or video_available): continue episode.url = util.normalize_feed_url(m.get('url', '')) if not episode.url: continue try: episode.length = int(m.fileSize) or -1 except: episode.length = -1 return episode # Brute-force detection of any links for l in entry.get('links', ()): episode.url = util.normalize_feed_url(l.get('href', '')) if not episode.url: continue if youtube.is_video_link(episode.url): return episode # Check if we can resolve this link to a audio/video file filename, extension = util.filename_from_url(episode.url) file_type = util.file_type_by_extension(extension) if file_type is None and hasattr(l, 'type'): extension = util.extension_from_mimetype(l.type) file_type = util.file_type_by_extension(extension) # The link points to a audio or video file - use it! if file_type is not None: return episode # Scan MP3 links in description text mp3s = re.compile(r'http://[^"]*\.mp3') for content in entry.get('content', ()): html = content.value for match in mp3s.finditer(html): episode.url = match.group(0) return episode #don't return None : for non-podcast channels episode.state = gpodder.STATE_NORMAL episode.url = '' return episode
def file_type(self): # Assume all YouTube/Vimeo links are video files if youtube.is_video_link(self.url) or vimeo.is_video_link(self.url) or escapist_videos.is_video_link(self.url): return "video" return util.file_type_by_extension(self.extension())
def file_type(self): # Assume all YouTube links are video files if youtube.is_video_link(self.url): return 'video' return util.file_type_by_extension(self.extension())
def file_type( self): return util.file_type_by_extension( util.file_extension_from_url( self.url))
def check_download_folder(self): """Check the download folder for externally-downloaded files This will try to assign downloaded files with episodes in the database. This will also cause missing files to be marked as deleted. """ known_files = set() for episode in self.get_episodes(gpodder.STATE_DOWNLOADED): if episode.was_downloaded(): filename = episode.local_filename(create=False) if filename is None: # No filename has been determined for this episode continue if not os.path.exists(filename): # File has been deleted by the user - simulate a # delete event (also marks the episode as deleted) logger.debug("Episode deleted: %s", filename) episode.delete_from_disk() continue known_files.add(filename) existing_files = set( filename for filename in glob.glob(os.path.join(self.save_dir, "*")) if not filename.endswith(".partial") ) ignore_files = ["folder" + ext for ext in coverart.CoverDownloader.EXTENSIONS] external_files = existing_files.difference( list(known_files) + [os.path.join(self.save_dir, ignore_file) for ignore_file in ignore_files] ) if not external_files: return all_episodes = self.get_all_episodes() for filename in external_files: found = False basename = os.path.basename(filename) existing = [e for e in all_episodes if e.download_filename == basename] if existing: existing = existing[0] logger.info("Importing external download: %s", filename) existing.on_downloaded(filename) continue for episode in all_episodes: wanted_filename = episode.local_filename(create=True, return_wanted_filename=True) if basename == wanted_filename: logger.info("Importing external download: %s", filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break wanted_base, wanted_ext = os.path.splitext(wanted_filename) target_base, target_ext = os.path.splitext(basename) if wanted_base == target_base: # Filenames only differ by the extension wanted_type = util.file_type_by_extension(wanted_ext) target_type = util.file_type_by_extension(target_ext) # If wanted type is None, assume that we don't know # the right extension before the download (e.g. YouTube) # if the wanted type is the same as the target type, # assume that it's the correct file if wanted_type is None or wanted_type == target_type: logger.info("Importing external download: %s", filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break if not found and not util.is_system_file(filename): logger.warn("Unknown external file: %s", filename)
def check_download_folder(self): """Check the download folder for externally-downloaded files This will try to assign downloaded files with episodes in the database. This will also cause missing files to be marked as deleted. """ known_files = set() for episode in self.get_episodes(gpodder.STATE_DOWNLOADED): if episode.state == gpodder.STATE_DOWNLOADED: filename = episode.local_filename(create=False) if not os.path.exists(filename): # File has been deleted by the user - simulate a # delete event (also marks the episode as deleted) logger.debug('Episode deleted: %s', filename) episode.delete_download() continue known_files.add(filename) known_files.update(os.path.join(self.save_dir, 'folder' + ext) for ext in coverart.CoverDownloader.EXTENSIONS) existing_files = {filename for filename in glob.glob(os.path.join(self.save_dir, '*')) if not filename.endswith('.partial')} external_files = existing_files.difference(known_files) if not external_files: return for filename in external_files: found = False basename = os.path.basename(filename) existing = next((episode for episode in self.episodes if episode.download_filename == basename), None) if existing is not None: logger.info('Importing external download: %s', filename) existing.on_downloaded(filename) continue for episode in self.episodes: wanted_filename = episode.local_filename(create=True, return_wanted_filename=True) if basename == wanted_filename: logger.info('Importing external download: %s', filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break wanted_base, wanted_ext = os.path.splitext(wanted_filename) target_base, target_ext = os.path.splitext(basename) if wanted_base == target_base: # Filenames only differ by the extension wanted_type = util.file_type_by_extension(wanted_ext) target_type = util.file_type_by_extension(target_ext) # If wanted type is None, assume that we don't know # the right extension before the download (e.g. YouTube) # if the wanted type is the same as the target type, # assume that it's the correct file if wanted_type is None or wanted_type == target_type: logger.info('Importing external download: %s', filename) episode.download_filename = basename episode.on_downloaded(filename) found = True break if not found: logger.warn('Unknown external file: %s', filename)