def __init__(self): super(ReplayGainPlugin, self).__init__() self.import_stages = [self.imported] # default backend is 'command' for backward-compatibility. self.config.add({ 'overwrite': False, 'auto': True, 'backend': u'command', 'targetlevel': 89, }) self.overwrite = self.config['overwrite'].get(bool) self.automatic = self.config['auto'].get(bool) backend_name = self.config['backend'].get(unicode) if backend_name not in self.backends: raise ui.UserError( u"Selected ReplayGain backend {0} is not supported. " u"Please select one of: {1}".format( backend_name, u', '.join(self.backends.keys()))) try: self.backend_instance = self.backends[backend_name](self.config) except (ReplayGainError, FatalReplayGainError) as e: raise ui.UserError( 'An error occurred in backend initialization: {0}'.format(e))
def __init__(self): super(RewritePlugin, self).__init__() self.config.add({}) # Gather all the rewrite rules for each field. rules = defaultdict(list) for key, view in self.config.items(): value = view.get(unicode) try: fieldname, pattern = key.split(None, 1) except ValueError: raise ui.UserError("invalid rewrite specification") if fieldname not in library.Item._fields: raise ui.UserError("invalid field name (%s) in rewriter" % fieldname) self._log.debug(u'adding template field {0}', key) pattern = re.compile(pattern.lower()) rules[fieldname].append((pattern, value)) if fieldname == 'artist': # Special case for the artist field: apply the same # rewrite for "albumartist" as well. rules['albumartist'].append((pattern, value)) # Replace each template field with the new rewriter function. for fieldname, fieldrules in rules.iteritems(): getter = rewriter(fieldname, fieldrules) self.template_fields[fieldname] = getter if fieldname in library.Album._fields: self.album_template_fields[fieldname] = getter
def __init__(self): super(ReplayGainPlugin, self).__init__() # default backend is 'command' for backward-compatibility. self.config.add({ 'overwrite': False, 'auto': True, 'backend': u'command', 'targetlevel': 89, }) self.overwrite = self.config['overwrite'].get(bool) backend_name = self.config['backend'].as_str() if backend_name not in self.backends: raise ui.UserError( u"Selected ReplayGain backend {0} is not supported. " u"Please select one of: {1}".format( backend_name, u', '.join(self.backends.keys()) ) ) # On-import analysis. if self.config['auto']: self.import_stages = [self.imported] try: self.backend_instance = self.backends[backend_name]( self.config, self._log ) except (ReplayGainError, FatalReplayGainError) as e: raise ui.UserError( u'replaygain initialization failed: {0}'.format(e))
def span_from_str(span_str): """Build a span dict from the span string representation. """ def normalize_year(d, yearfrom): """Convert string to a 4 digits year """ if yearfrom < 100: raise BucketError(u"%d must be expressed on 4 digits" % yearfrom) # if two digits only, pick closest year that ends by these two # digits starting from yearfrom if d < 100: if (d % 100) < (yearfrom % 100): d = (yearfrom - yearfrom % 100) + 100 + d else: d = (yearfrom - yearfrom % 100) + d return d years = [int(x) for x in re.findall('\d+', span_str)] if not years: raise ui.UserError(u"invalid range defined for year bucket '%s': no " u"year found" % span_str) try: years = [normalize_year(x, years[0]) for x in years] except BucketError as exc: raise ui.UserError(u"invalid range defined for year bucket '%s': %s" % (span_str, exc)) res = {'from': years[0], 'str': span_str} if len(years) > 1: res['to'] = years[-1] return res
def __init__(self, finished_callback=None): """Initialize a player. If a finished_callback is provided, it is called every time a track started with play_file finishes. Once the player has been created, call run() to begin the main runloop in a separate thread. """ # Set up the Gstreamer player. From the pygst tutorial: # http://pygstdocs.berlios.de/pygst-tutorial/playbin.html #### # Updated to GStreamer 1.0 with: # https://wiki.ubuntu.com/Novacut/GStreamer1.0 self.player = Gst.ElementFactory.make("playbin", "player") if self.player is None: raise ui.UserError("Could not create playbin") fakesink = Gst.ElementFactory.make("fakesink", "fakesink") if fakesink is None: raise ui.UserError("Could not create fakesink") self.player.set_property("video-sink", fakesink) bus = self.player.get_bus() bus.add_signal_watch() bus.connect("message", self._handle_message) # Set up our own stuff. self.playing = False self.finished_callback = finished_callback self.cached_time = None self._volume = 1.0
def configure(self, config): cls = type(self) # Gather all the rewrite rules for each field. rules = defaultdict(list) if not config.has_section('rewrite'): return for key, value in config.items('rewrite', True): try: fieldname, pattern = key.split(None, 1) except ValueError: raise ui.UserError("invalid rewrite specification") if fieldname not in library.ITEM_KEYS: raise ui.UserError("invalid field name (%s) in rewriter" % fieldname) log.debug(u'adding template field %s' % key) pattern = re.compile(pattern.lower()) rules[fieldname].append((pattern, value)) if fieldname == 'artist': # Special case for the artist field: apply the same # rewrite for "albumartist" as well. rules['albumartist'].append((pattern, value)) # Replace each template field with the new rewriter function. for fieldname, fieldrules in rules.iteritems(): cls.template_fields[fieldname] = rewriter(fieldname, fieldrules)
def update_album_list(album_list): """Update the MusicBrainz colleciton from a list of Beets albums """ # Get the available collections. collections = mb_call(musicbrainzngs.get_collections) if not collections['collection-list']: raise ui.UserError('no collections exist for user') # Get the first release collection. MusicBrainz also has event # collections, so we need to avoid adding to those. for collection in collections['collection-list']: if 'release-count' in collection: collection_id = collection['id'] break else: raise ui.UserError('No collection found.') # Get a list of all the album IDs. album_ids = [] for album in album_list: aid = album.mb_albumid if aid: if re.match(UUID_REGEX, aid): album_ids.append(aid) else: log.info(u'skipping invalid MBID: {0}'.format(aid)) # Submit to MusicBrainz. print('Updating MusicBrainz collection {0}...'.format(collection_id)) submit_albums(collection_id, album_ids) print('...MusicBrainz collection updated.')
def import_lastfm(lib): user = config['lastfm']['user'] per_page = config['lastimport']['per_page'] if not user: raise ui.UserError('You must specify a user name for lastimport') log.info('Fetching last.fm library for @{0}'.format(user)) page_total = 1 page_current = 0 found_total = 0 unknown_total = 0 retry_limit = config['lastimport']['retry_limit'].get(int) # Iterate through a yet to be known page total count while page_current < page_total: log.info('lastimport: Querying page #{0}{1}...'.format( page_current + 1, '/' + str(page_total) if page_total > 1 else '' )) for retry in range(0, retry_limit): page = fetch_tracks(user, page_current + 1, per_page) if 'tracks' in page: # Let us the reveal the holy total pages! page_total = int(page['tracks']['@attr']['totalPages']) if page_total < 1: # It means nothing to us! raise ui.UserError('Last.fm reported no data.') found, unknown = process_tracks(lib, page['tracks']['track']) found_total += found unknown_total += unknown break else: log.error('lastimport: ERROR: unable to read page #{0}'.format( page_current + 1 )) if retry < retry_limit: log.info( 'lastimport: Retrying page #{0}... ({1}/{2} retry)' .format(page_current + 1, retry + 1, retry_limit) ) else: log.error( 'lastimport: FAIL: unable to fetch page #{0}, ' 'tried {1} times'.format(page_current, retry + 1) ) page_current += 1 log.info('lastimport: ... done!') log.info('lastimport: finished processing {0} song pages'.format( page_total )) log.info('lastimport: {0} unknown play-counts'.format(unknown_total)) log.info('lastimport: {0} play-counts imported'.format(found_total))
def mb_call(func, *args, **kwargs): """Call a MusicBrainz API function and catch exceptions. """ try: return func(*args, **kwargs) except musicbrainzngs.AuthenticationError: raise ui.UserError('authentication with MusicBrainz failed') except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc: raise ui.UserError('MusicBrainz API error: {0}'.format(exc)) except musicbrainzngs.UsageError: raise ui.UserError('MusicBrainz credentials missing')
def get_modifies(self, items, model_cls, context): modifies = [] for query, modify in items: modify = modify.as_str() mod_query, mods, dels = self.parse_modify(modify, model_cls) if mod_query: raise ui.UserError(u'modifyonimport.{0}["{1}"]: unexpected query `{2}` in value'.format(context, query, mod_query)) elif not mods and not dels: raise ui.UserError(u'modifyonimport.{0}["{1}"]: no modifications found'.format(context, query)) dbquery, _ = parse_query_string(util.as_string(query), model_cls) modifies.append((dbquery, mods, dels)) return modifies
def __init__(self): super(ReplayGainPlugin, self).__init__() # default backend is 'command' for backward-compatibility. self.config.add({ 'overwrite': False, 'auto': True, 'backend': u'command', 'threads': cpu_count(), 'parallel_on_import': False, 'per_disc': False, 'peak': 'true', 'targetlevel': 89, 'r128': ['Opus'], 'r128_targetlevel': lufs_to_db(-23), 'copy_album_gain': False, }) self.overwrite = self.config['overwrite'].get(bool) self.per_disc = self.config['per_disc'].get(bool) self.copy_album_gain = self.config['copy_album_gain'].get(bool) # Remember which backend is used for CLI feedback self.backend_name = self.config['backend'].as_str() if self.backend_name not in self.backends: raise ui.UserError( u"Selected ReplayGain backend {0} is not supported. " u"Please select one of: {1}".format( self.backend_name, u', '.join(self.backends.keys()))) peak_method = self.config["peak"].as_str() if peak_method not in self.peak_methods: raise ui.UserError( u"Selected ReplayGain peak method {0} is not supported. " u"Please select one of: {1}".format( peak_method, u', '.join(self.peak_methods.keys()))) self._peak_method = self.peak_methods[peak_method] # On-import analysis. if self.config['auto']: self.register_listener('import_begin', self.import_begin) self.register_listener('import', self.import_end) self.import_stages = [self.imported] # Formats to use R128. self.r128_whitelist = self.config['r128'].as_str_seq() try: self.backend_instance = self.backends[self.backend_name]( self.config, self._log) except (ReplayGainError, FatalReplayGainError) as e: raise ui.UserError( u'replaygain initialization failed: {0}'.format(e))
def import_lastfm(lib, log): user = config['lastfm']['user'].as_str() per_page = config['lastimport']['per_page'].get(int) if not user: raise ui.UserError(u'You must specify a user name for lastimport') log.info(u'Fetching last.fm library for @{0}', user) page_total = 1 page_current = 0 found_total = 0 unknown_total = 0 retry_limit = config['lastimport']['retry_limit'].get(int) # Iterate through a yet to be known page total count while page_current < page_total: log.info(u'Querying page #{0}{1}...', page_current + 1, '/{}'.format(page_total) if page_total > 1 else '') for retry in range(0, retry_limit): tracks, page_total = fetch_tracks(user, page_current + 1, per_page) if page_total < 1: # It means nothing to us! raise ui.UserError(u'Last.fm reported no data.') if tracks: found, unknown = process_tracks(lib, tracks, log) found_total += found unknown_total += unknown break else: log.error(u'ERROR: unable to read page #{0}', page_current + 1) if retry < retry_limit: log.info( u'Retrying page #{0}... ({1}/{2} retry)', page_current + 1, retry + 1, retry_limit ) else: log.error(u'FAIL: unable to fetch page #{0}, ', u'tried {1} times', page_current, retry + 1) page_current += 1 log.info(u'... done!') log.info(u'finished processing {0} song pages', page_total) log.info(u'{0} unknown play-counts', unknown_total) log.info(u'{0} play-counts imported', found_total)
def convert_func(self, lib, opts, args): dest = opts.dest or self.config['dest'].get() if not dest: raise ui.UserError(u'no convert destination set') dest = util.bytestring_path(dest) threads = opts.threads or self.config['threads'].get(int) path_formats = ui.get_path_formats(self.config['paths'] or None) fmt = opts.format or self.config['format'].as_str().lower() if opts.pretend is not None: pretend = opts.pretend else: pretend = self.config['pretend'].get(bool) if opts.hardlink is not None: hardlink = opts.hardlink link = False elif opts.link is not None: hardlink = False link = opts.link else: hardlink = self.config['hardlink'].get(bool) link = self.config['link'].get(bool) if opts.album: albums = lib.albums(ui.decargs(args)) items = [i for a in albums for i in a.items()] if not pretend: for a in albums: ui.print_(format(a, u'')) else: items = list(lib.items(ui.decargs(args))) if not pretend: for i in items: ui.print_(format(i, u'')) if not items: self._log.error(u'Empty query result.') return if not (pretend or opts.yes or ui.input_yn(u"Convert? (Y/n)")): return if opts.album and self.config['copy_album_art']: for album in albums: self.copy_album_art(album, dest, path_formats, pretend, link, hardlink) convert = [self.convert_item(dest, opts.keep_new, path_formats, fmt, pretend, link, hardlink) for _ in range(threads)] pipe = util.pipeline.Pipeline([iter(items), convert]) pipe.run_parallel()
def get_format(): """Get the currently configured format command and extension. """ format = config['convert']['format'].get(unicode).lower() format = ALIASES.get(format, format) format_info = config['convert']['formats'][format].get(dict) # Convenience and backwards-compatibility shortcuts. keys = config['convert'].keys() if 'command' in keys: format_info['command'] = config['convert']['command'].get(unicode) elif 'opts' in keys: # Undocumented option for backwards compatibility with < 1.3.1. format_info['command'] = u'ffmpeg -i $source -y {0} $dest'.format( config['convert']['opts'].get(unicode)) if 'extension' in keys: format_info['extension'] = config['convert']['extension'].get(unicode) try: return ( format_info['command'].encode('utf8'), (u'.' + format_info['extension']).encode('utf8'), ) except KeyError: raise ui.UserError( u'convert: format {0} needs "command" and "extension" fields'. format(format))
def convert_func(lib, opts, args): dest = opts.dest if opts.dest is not None else \ config['convert']['dest'].get() if not dest: raise ui.UserError('no convert destination set') dest = util.bytestring_path(dest) threads = opts.threads if opts.threads is not None else \ config['convert']['threads'].get(int) keep_new = opts.keep_new if not config['convert']['paths']: path_formats = ui.get_path_formats() else: path_formats = ui.get_path_formats(config['convert']['paths']) ui.commands.list_items(lib, ui.decargs(args), opts.album, None) if not ui.input_yn("Convert? (Y/n)"): return if opts.album: items = (i for a in lib.albums(ui.decargs(args)) for i in a.items()) else: items = iter(lib.items(ui.decargs(args))) convert = [ convert_item(dest, keep_new, path_formats) for i in range(threads) ] pipe = util.pipeline.Pipeline([items, convert]) pipe.run_parallel()
def _authenticate(self): """Request an access token via the Client Credentials Flow: https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow """ headers = { 'Authorization': 'Basic {}'.format( base64.b64encode(':'.join( self.config[k].as_str() for k in ('client_id', 'client_secret')).encode()).decode()) } response = requests.post( self.oauth_token_url, data={'grant_type': 'client_credentials'}, headers=headers, ) try: response.raise_for_status() except requests.exceptions.HTTPError as e: raise ui.UserError(u'Spotify authorization failed: {}\n{}'.format( e, response.text)) self.access_token = response.json()['access_token'] # Save the token for later use. self._log.debug(u'{} access token: {}', self.data_source, self.access_token) with open(self.tokenfile, 'w') as f: json.dump({'access_token': self.access_token}, f)
def _handle_response(self, request_type, url, params=None): """Send a request, reauthenticating if necessary. :param request_type: Type of :class:`Request` constructor, e.g. ``requests.get``, ``requests.post``, etc. :type request_type: function :param url: URL for the new :class:`Request` object. :type url: str :param params: (optional) list of tuples or bytes to send in the query string for the :class:`Request`. :type params: dict :return: JSON data for the class:`Response <Response>` object. :rtype: dict """ response = request_type( url, headers={'Authorization': 'Bearer {}'.format(self.access_token)}, params=params, ) if response.status_code != 200: if u'token expired' in response.text: self._log.debug( '{} access token has expired. Reauthenticating.', self.data_source, ) self._authenticate() return self._handle_response(request_type, url, params=params) else: raise ui.UserError( u'{} API error:\n{}\nURL:\n{}\nparams:\n{}'.format( self.data_source, response.text, url, params)) return response.json()
def func(lib, opts, args): # to match "beet import" function paths = args if not paths: raise ui.UserError('no path specified') self.noimport_files(lib, paths)
def check_sanity(self, mods, dels, objs, album): reconfirm = False if not album: # TODO: consider changing this from UserError to promotion, automatically applying it to the album if any(not obj.singleton for obj in objs): for key in mods: if key in library.Album.item_keys: self._log.warning( u'modification of album field `{0}` should be done on the album, not the item', key) reconfirm = True else: for key in mods: if key in self.non_album_fields: self._log.warning( u'modification of non-album field `{0}` should be done on the item, not the album', key) reconfirm = True for key in list(mods.keys()) + dels: if key in self.computed_fields: raise ui.UserError( u'modification or deletion of computed field `{0}` is not supported' .format(key)) return reconfirm
def modify_items(lib, mods, query, write, move, album, color, confirm): """Modifies matching items according to key=value assignments.""" # Parse key=value specifications into a dictionary. allowed_keys = library.ALBUM_KEYS if album else library.ITEM_KEYS_WRITABLE fsets = {} for mod in mods: key, value = mod.split('=', 1) if key not in allowed_keys: raise ui.UserError('"%s" is not a valid field' % key) fsets[key] = value # Get the items to modify. items, albums = _do_query(lib, query, album, False) objs = albums if album else items # Preview change. print_('Modifying %i %ss.' % (len(objs), 'album' if album else 'item')) for obj in objs: # Identify the changed object. if album: print_(u'* %s - %s' % (obj.albumartist, obj.album)) else: print_(u'* %s - %s' % (obj.artist, obj.title)) # Show each change. for field, value in fsets.iteritems(): curval = getattr(obj, field) _showdiff(field, curval, value, color) # Confirm. if confirm: extra = ' and write tags' if write else '' if not ui.input_yn('Really modify%s (Y/n)?' % extra): return # Apply changes to database. for obj in objs: for field, value in fsets.iteritems(): setattr(obj, field, value) if move: cur_path = obj.item_dir() if album else obj.path if lib.directory in ancestry(cur_path): # In library? log.debug('moving object %s' % cur_path) if album: obj.move() else: lib.move(obj) # When modifying items, we have to store them to the database. if not album: lib.store(obj) lib.save() # Apply tags if requested. if write: if album: items = itertools.chain(*(a.items() for a in albums)) for item in items: item.write()
def handle_album(self, album, write): """Compute album and track replay gain store it in all of the album's items. If ``write`` is truthy then ``item.write()`` is called for each item. If replay gain information is already present in all items, nothing is done. """ if not self.album_requires_gain(album): self._log.info(u'Skipping album {0}', album) return self._log.info(u'analyzing {0}', album) try: album_gain = self.backend_instance.compute_album_gain(album) if len(album_gain.track_gains) != len(album.items()): raise ReplayGainError( u"ReplayGain backend failed " u"for some tracks in album {0}".format(album)) self.store_album_gain(album, album_gain.album_gain) for item, track_gain in zip(album.items(), album_gain.track_gains): self.store_track_gain(item, track_gain) if write: item.try_write() except ReplayGainError as e: self._log.info(u"ReplayGain error: {0}", e) except FatalReplayGainError as e: raise ui.UserError(u"Fatal replay gain error: {0}".format(e))
def commands(self): for plugin in plugins.find_plugins(): if plugin.name == 'spotify': self.spotify = plugin break else: raise ui.UserError('spotify plugin is required') def explicits(lib, opts, args): args = ui.decargs(args) items = lib.items(args) results = self.spotify._match_library_tracks(lib, args) if results: for item, track in zip(items, results): if track['explicit']: title = track['name'] album = track['album']['name'] artist = track['artists'][0]['name'] tracknum = track['track_number'] url = track['external_urls']['spotify'] plugins.send("spotify_explicit_track", lib=lib, track=track, item=item) print('{} - {} - {} - {} - {}'.format( album, tracknum, artist, title, url)) explicit_cmd = ui.Subcommand('spotify-explicit', help=u'') explicit_cmd.parser.add_all_common_options() explicit_cmd.func = explicits return [explicit_cmd]
def parse_args(args, opts): # TODO use options instead of interspersed keywords subject = None table = 'items' count = DEFAULT_COUNT order = DEFAULT_ORDER subvals = [] if len(args) >= 1: if args[0].isdigit(): count = int(args[0]) args = args[1:] if len(args) >= 1: subject, args = SUBJECTS.get(args[0]), args[1:] template = TEMPLATES.get(subject) if len(args) >= 2 and args[0] == "by": args = args[1:] by, args = args[0], args[1:] order = ORDERS.get(by) template = TEMPLATES.get(by) if len(args) >= 2 and args[0] == "in": args = args[1:] table2, subvals2 = library.get_query(args).statement() table = '(' + table2 + ')' subvals += subvals2 if not subject or not order or not count: raise ui.UserError(u'invalid arguments') return { 'subject': subject, 'table': table, 'order': order, 'count': count, 'template': template, 'subvals': subvals }
def _echofun(self, func, **kwargs): """Wrapper for requests to the EchoNest API. Will retry up to RETRIES times and wait between retries for RETRY_INTERVAL seconds. """ for i in range(RETRIES): try: result = func(**kwargs) except pyechonest.util.EchoNestAPIError as e: if e.code == 3: # reached access limit per minute time.sleep(RETRY_INTERVAL) elif e.code == 5: # specified identifier does not exist # no use in trying again. log.debug(u'echonest: {0}'.format(e)) return None else: log.error(u'echonest: {0}'.format(e.args[0][0])) return None except (pyechonest.util.EchoNestIOError, socket.error) as e: log.warn(u'echonest: IO error: {0}'.format(e)) time.sleep(RETRY_INTERVAL) else: break else: # If we exited the loop without breaking, then we used up all # our allotted retries. raise ui.UserError(u'echonest request failed repeatedly') return None return result
def noimport_files(self, lib, paths): # Check the user-specified directories. for path in paths: if not os.path.exists(syspath(normpath(path))): raise ui.UserError(u'no such file or directory: {0}'.format( displayable_path(path))) # Open the state file state = importer._open_state() # Create the 'taghistory' set if it doesn't exist if 'taghistory' not in state: state['taghistory'] = set() # For every path... for path in paths: added = 0 # ...get the list of albums in that path... for dirs, paths_in_dir in importer.albums_in_dir(path): # ...check if they're not already in the 'taghistory' set if tuple(dirs) not in state['taghistory']: # ...and add them... state['taghistory'].add(tuple(map(normpath, dirs))) added += 1 # Save the state file importer._save_state(state) log.info(u'Added {0} paths to the skip list', added)
def embed_func(lib, opts, args): if opts.file: imagepath = normpath(opts.file) if not os.path.isfile(syspath(imagepath)): raise ui.UserError(u'image file {0} not found'.format( displayable_path(imagepath))) items = lib.items(decargs(args)) # Confirm with user. if not opts.yes and not _confirm(items, not opts.file): return for item in items: art.embed_item(self._log, item, imagepath, maxwidth, None, compare_threshold, ifempty) else: albums = lib.albums(decargs(args)) # Confirm with user. if not opts.yes and not _confirm(albums, not opts.file): return for album in albums: art.embed_album(self._log, album, maxwidth, False, compare_threshold, ifempty) self.remove_artfile(album)
def handle_track(self, item, write): """Compute track replay gain and store it in the item. If ``write`` is truthy then ``item.write()`` is called to write the data to disk. If replay gain information is already present in the item, nothing is done. """ if not self.track_requires_gain(item): self._log.info(u'Skipping track {0}', item) return self._log.info(u'analyzing {0}', item) try: track_gains = self.backend_instance.compute_track_gain([item]) if len(track_gains) != 1: raise ReplayGainError( u"ReplayGain backend failed for track {0}".format(item)) self.store_track_gain(item, track_gains[0]) if write: item.try_write() except ReplayGainError as e: self._log.info(u"ReplayGain error: {0}", e) except FatalReplayGainError as e: raise ui.UserError(u"Fatal replay gain error: {0}".format(e))
def get_format(fmt=None): """Return the command template and the extension from the config. """ if not fmt: fmt = config['convert']['format'].as_str().lower() fmt = ALIASES.get(fmt, fmt) try: format_info = config['convert']['formats'][fmt].get(dict) command = format_info['command'] extension = format_info.get('extension', fmt) except KeyError: raise ui.UserError( 'convert: format {} needs the "command" field'.format(fmt)) except ConfigTypeError: command = config['convert']['formats'][fmt].get(str) extension = fmt # Convenience and backwards-compatibility shortcuts. keys = config['convert'].keys() if 'command' in keys: command = config['convert']['command'].as_str() elif 'opts' in keys: # Undocumented option for backwards compatibility with < 1.3.1. command = 'ffmpeg -i $source -y {} $dest'.format( config['convert']['opts'].as_str()) if 'extension' in keys: extension = config['convert']['extension'].as_str() return (command.encode('utf-8'), extension.encode('utf-8'))
def get_format(format=None): """Return the command tempate and the extension from the config. """ if not format: format = config['convert']['format'].get(unicode).lower() format = ALIASES.get(format, format) try: format_info = config['convert']['formats'][format].get(dict) command = format_info['command'] extension = format_info['extension'] except KeyError: raise ui.UserError( u'convert: format {0} needs "command" and "extension" fields' .format(format) ) except ConfigTypeError: command = config['convert']['formats'][format].get(str) extension = format # Convenience and backwards-compatibility shortcuts. keys = config['convert'].keys() if 'command' in keys: command = config['convert']['command'].get(unicode) elif 'opts' in keys: # Undocumented option for backwards compatibility with < 1.3.1. command = u'ffmpeg -i $source -y {0} $dest'.format( config['convert']['opts'].get(unicode) ) if 'extension' in keys: extension = config['convert']['extension'].get(unicode) return (command.encode('utf8'), extension.encode('utf8'))
def build_alpha_spans(alpha_spans_str, alpha_regexs): """Extract alphanumerics from string and return sorted list of chars [from...to] """ spans = [] ASCII_DIGITS = string.digits + string.ascii_lowercase for elem in alpha_spans_str: if elem in alpha_regexs: spans.append(re.compile(alpha_regexs[elem])) else: bucket = sorted([x for x in elem.lower() if x.isalnum()]) if bucket: beginIdx = ASCII_DIGITS.index(bucket[0]) endIdx = ASCII_DIGITS.index(bucket[-1]) else: raise ui.UserError("invalid range defined for alpha bucket " "'%s': no alphanumeric character found" % elem) spans.append( re.compile( "^[" + ASCII_DIGITS[beginIdx:endIdx + 1] + ASCII_DIGITS[beginIdx:endIdx + 1].upper() + "]" ) ) return spans