Beispiel #1
0
 class RatingPlugin(plugins.BeetsPlugin):
     item_types = {'rating': types.Float()}
Beispiel #2
0
def get_item_attribute_type_overrides():
    _types = {}
    for attr in KNOWN_NUMERIC_FLEX_ATTRIBUTES:
        _types[attr] = types.Float(6)

    return _types
Beispiel #3
0
class AcousticPlugin(plugins.BeetsPlugin):
    item_types = {
        'average_loudness': types.Float(6),
        'chords_changes_rate': types.Float(6),
        'chords_key': types.STRING,
        'chords_number_rate': types.Float(6),
        'chords_scale': types.STRING,
        'danceable': types.Float(6),
        'gender': types.STRING,
        'genre_rosamerica': types.STRING,
        'initial_key': types.STRING,
        'key_strength': types.Float(6),
        'mood_acoustic': types.Float(6),
        'mood_aggressive': types.Float(6),
        'mood_electronic': types.Float(6),
        'mood_happy': types.Float(6),
        'mood_party': types.Float(6),
        'mood_relaxed': types.Float(6),
        'mood_sad': types.Float(6),
        'moods_mirex': types.STRING,
        'rhythm': types.Float(6),
        'timbre': types.STRING,
        'tonal': types.Float(6),
        'voice_instrumental': types.STRING,
    }

    def __init__(self):
        super().__init__()

        self.config.add({'auto': True, 'force': False, 'tags': []})

        if self.config['auto']:
            self.register_listener('import_task_files', self.import_task_files)

    def commands(self):
        cmd = ui.Subcommand('acousticbrainz',
                            help="fetch metadata from AcousticBrainz")
        cmd.parser.add_option('-f',
                              '--force',
                              dest='force_refetch',
                              action='store_true',
                              default=False,
                              help='re-download data when already present')

        def func(lib, opts, args):
            items = lib.items(ui.decargs(args))
            self._fetch_info(items, ui.should_write(), opts.force_refetch
                             or self.config['force'])

        cmd.func = func
        return [cmd]

    def import_task_files(self, session, task):
        """Function is called upon beet import.
        """
        self._fetch_info(task.imported_items(), False, True)

    def _get_data(self, mbid):
        data = {}
        for url in _generate_urls(mbid):
            self._log.debug('fetching URL: {}', url)

            try:
                res = requests.get(url)
            except requests.RequestException as exc:
                self._log.info('request error: {}', exc)
                return {}

            if res.status_code == 404:
                self._log.info('recording ID {} not found', mbid)
                return {}

            try:
                data.update(res.json())
            except ValueError:
                self._log.debug('Invalid Response: {}', res.text)
                return {}

        return data

    def _fetch_info(self, items, write, force):
        """Fetch additional information from AcousticBrainz for the `item`s.
        """
        tags = self.config['tags'].as_str_seq()
        for item in items:
            # If we're not forcing re-downloading for all tracks, check
            # whether the data is already present. We use one
            # representative field name to check for previously fetched
            # data.
            if not force:
                mood_str = item.get('mood_acoustic', '')
                if mood_str:
                    self._log.info('data already present for: {}', item)
                    continue

            # We can only fetch data for tracks with MBIDs.
            if not item.mb_trackid:
                continue

            self._log.info('getting data for: {}', item)
            data = self._get_data(item.mb_trackid)
            if data:
                for attr, val in self._map_data_to_scheme(data, ABSCHEME):
                    if not tags or attr in tags:
                        self._log.debug('attribute {} of {} set to {}', attr,
                                        item, val)
                        setattr(item, attr, val)
                    else:
                        self._log.debug(
                            'skipping attribute {} of {}'
                            ' (value {}) due to config', attr, item, val)
                item.store()
                if write:
                    item.try_write()

    def _map_data_to_scheme(self, data, scheme):
        """Given `data` as a structure of nested dictionaries, and `scheme` as a
        structure of nested dictionaries , `yield` tuples `(attr, val)` where
        `attr` and `val` are corresponding leaf nodes in `scheme` and `data`.

        As its name indicates, `scheme` defines how the data is structured,
        so this function tries to find leaf nodes in `data` that correspond
        to the leafs nodes of `scheme`, and not the other way around.
        Leaf nodes of `data` that do not exist in the `scheme` do not matter.
        If a leaf node of `scheme` is not present in `data`,
        no value is yielded for that attribute and a simple warning is issued.

        Finally, to account for attributes of which the value is split between
        several leaf nodes in `data`, leaf nodes of `scheme` can be tuples
        `(attr, order)` where `attr` is the attribute to which the leaf node
        belongs, and `order` is the place at which it should appear in the
        value. The different `value`s belonging to the same `attr` are simply
        joined with `' '`. This is hardcoded and not very flexible, but it gets
        the job done.

        For example:

        >>> scheme = {
            'key1': 'attribute',
            'key group': {
                'subkey1': 'subattribute',
                'subkey2': ('composite attribute', 0)
            },
            'key2': ('composite attribute', 1)
        }
        >>> data = {
            'key1': 'value',
            'key group': {
                'subkey1': 'subvalue',
                'subkey2': 'part 1 of composite attr'
            },
            'key2': 'part 2'
        }
        >>> print(list(_map_data_to_scheme(data, scheme)))
        [('subattribute', 'subvalue'),
         ('attribute', 'value'),
         ('composite attribute', 'part 1 of composite attr part 2')]
        """
        # First, we traverse `scheme` and `data`, `yield`ing all the non
        # composites attributes straight away and populating the dictionary
        # `composites` with the composite attributes.

        # When we are finished traversing `scheme`, `composites` should
        # map each composite attribute to an ordered list of the values
        # belonging to the attribute, for example:
        # `composites = {'initial_key': ['B', 'minor']}`.

        # The recursive traversal.
        composites = defaultdict(list)
        yield from self._data_to_scheme_child(data, scheme, composites)

        # When composites has been populated, yield the composite attributes
        # by joining their parts.
        for composite_attr, value_parts in composites.items():
            yield composite_attr, ' '.join(value_parts)

    def _data_to_scheme_child(self, subdata, subscheme, composites):
        """The recursive business logic of :meth:`_map_data_to_scheme`:
        Traverse two structures of nested dictionaries in parallel and `yield`
        tuples of corresponding leaf nodes.

        If a leaf node belongs to a composite attribute (is a `tuple`),
        populate `composites` rather than yielding straight away.
        All the child functions for a single traversal share the same
        `composites` instance, which is passed along.
        """
        for k, v in subscheme.items():
            if k in subdata:
                if type(v) == dict:
                    yield from self._data_to_scheme_child(
                        subdata[k], v, composites)
                elif type(v) == tuple:
                    composite_attribute, part_number = v
                    attribute_parts = composites[composite_attribute]
                    # Parts are not guaranteed to be inserted in order
                    while len(attribute_parts) <= part_number:
                        attribute_parts.append('')
                    attribute_parts[part_number] = subdata[k]
                else:
                    yield v, subdata[k]
            else:
                self._log.warning(
                    'Acousticbrainz did not provide info '
                    'about {}', k)
                self._log.debug(
                    'Data {} could not be mapped to scheme {} '
                    'because key {} was not found', subdata, v, k)
Beispiel #4
0
    ('albumtype',            types.String(),     True, True),
    ('label',                types.String(),     True, True),
    ('acoustid_fingerprint', types.String(),     True, True),
    ('acoustid_id',          types.String(),     True, True),
    ('mb_releasegroupid',    types.String(),     True, True),
    ('asin',                 types.String(),     True, True),
    ('catalognum',           types.String(),     True, True),
    ('script',               types.String(),     True, True),
    ('language',             types.String(),     True, True),
    ('country',              types.String(),     True, True),
    ('albumstatus',          types.String(),     True, True),
    ('media',                types.String(),     True, True),
    ('albumdisambig',        types.String(),     True, True),
    ('disctitle',            types.String(),     True, True),
    ('encoder',              types.String(),     True, True),
    ('rg_track_gain',        types.Float(),      True, True),
    ('rg_track_peak',        types.Float(),      True, True),
    ('rg_album_gain',        types.Float(),      True, True),
    ('rg_album_peak',        types.Float(),      True, True),
    ('original_year',        types.PaddedInt(4), True, True),
    ('original_month',       types.PaddedInt(2), True, True),
    ('original_day',         types.PaddedInt(2), True, True),

    ('length',      types.Float(),                  False, True),
    ('bitrate',     types.ScaledInt(1000, u'kbps'), False, True),
    ('format',      types.String(),                 False, True),
    ('samplerate',  types.ScaledInt(1000, u'kHz'),  False, True),
    ('bitdepth',    types.Integer(),                False, True),
    ('channels',    types.Integer(),                False, True),
    ('mtime',       DateType(),                     False, False),
    ('added',       DateType(),                     False, False),
Beispiel #5
0
class Item(LibModel):
    _table = 'items'
    _flex_table = 'item_attributes'
    _fields = {
        'id': types.Id(True),
        'path': PathType(),
        'album_id': types.Id(False),
        'title': types.String(),
        'artist': types.String(),
        'artist_sort': types.String(),
        'artist_credit': types.String(),
        'album': types.String(),
        'albumartist': types.String(),
        'albumartist_sort': types.String(),
        'albumartist_credit': types.String(),
        'genre': types.String(),
        'composer': types.String(),
        'grouping': types.String(),
        'year': types.PaddedInt(4),
        'month': types.PaddedInt(2),
        'day': types.PaddedInt(2),
        'track': types.PaddedInt(2),
        'tracktotal': types.PaddedInt(2),
        'disc': types.PaddedInt(2),
        'disctotal': types.PaddedInt(2),
        'lyrics': types.String(),
        'comments': types.String(),
        'bpm': types.Integer(),
        'comp': types.Boolean(),
        'mb_trackid': types.String(),
        'mb_albumid': types.String(),
        'mb_artistid': types.String(),
        'mb_albumartistid': types.String(),
        'albumtype': types.String(),
        'label': types.String(),
        'acoustid_fingerprint': types.String(),
        'acoustid_id': types.String(),
        'mb_releasegroupid': types.String(),
        'asin': types.String(),
        'catalognum': types.String(),
        'script': types.String(),
        'language': types.String(),
        'country': types.String(),
        'albumstatus': types.String(),
        'media': types.String(),
        'albumdisambig': types.String(),
        'disctitle': types.String(),
        'encoder': types.String(),
        'rg_track_gain': types.NullFloat(),
        'rg_track_peak': types.NullFloat(),
        'rg_album_gain': types.NullFloat(),
        'rg_album_peak': types.NullFloat(),
        'original_year': types.PaddedInt(4),
        'original_month': types.PaddedInt(2),
        'original_day': types.PaddedInt(2),
        'initial_key': types.MusicalKey(),
        'length': types.Float(),
        'bitrate': types.ScaledInt(1000, u'kbps'),
        'format': types.String(),
        'samplerate': types.ScaledInt(1000, u'kHz'),
        'bitdepth': types.Integer(),
        'channels': types.Integer(),
        'mtime': DateType(),
        'added': DateType(),
    }

    _search_fields = ('artist', 'title', 'comments', 'album', 'albumartist',
                      'genre')

    _media_fields = set(MediaFile.readable_fields()) \
        .intersection(_fields.keys())
    """Set of item fields that are backed by `MediaFile` fields.

    Any kind of field (fixed, flexible, and computed) may be a media
    field. Only these fields are read from disk in `read` and written in
    `write`.
    """
    @classmethod
    def _getters(cls):
        getters = plugins.item_field_getters()
        getters['singleton'] = lambda i: i.album_id is None
        return getters

    @classmethod
    def from_path(cls, path):
        """Creates a new item from the media file at the specified path.
        """
        # Initiate with values that aren't read from files.
        i = cls(album_id=None)
        i.read(path)
        i.mtime = i.current_mtime()  # Initial mtime.
        return i

    def __setitem__(self, key, value):
        """Set the item's value for a standard field or a flexattr.
        """
        # Encode unicode paths and read buffers.
        if key == 'path':
            if isinstance(value, unicode):
                value = bytestring_path(value)
            elif isinstance(value, buffer):
                value = str(value)

        if key in MediaFile.fields():
            self.mtime = 0  # Reset mtime on dirty.

        super(Item, self).__setitem__(key, value)

    def update(self, values):
        """Set all key/value pairs in the mapping. If mtime is
        specified, it is not reset (as it might otherwise be).
        """
        super(Item, self).update(values)
        if self.mtime == 0 and 'mtime' in values:
            self.mtime = values['mtime']

    def get_album(self):
        """Get the Album object that this item belongs to, if any, or
        None if the item is a singleton or is not associated with a
        library.
        """
        if not self._db:
            return None
        return self._db.get_album(self)

    # Interaction with file metadata.

    def read(self, read_path=None):
        """Read the metadata from the associated file.

        If ``read_path`` is specified, read metadata from that file
        instead. Updates all the properties in `_media_fields`
        from the media file.

        Raises a `ReadError` if the file could not be read.
        """
        if read_path is None:
            read_path = self.path
        else:
            read_path = normpath(read_path)
        try:
            mediafile = MediaFile(syspath(read_path))
        except (OSError, IOError) as exc:
            raise ReadError(read_path, exc)

        for key in self._media_fields:
            value = getattr(mediafile, key)
            if isinstance(value, (int, long)):
                # Filter values wider than 64 bits (in signed representation).
                # SQLite cannot store them. py26: Post transition, we can use:
                # value.bit_length() > 63
                if abs(value) >= 2**63:
                    value = 0
            self[key] = value

        # Database's mtime should now reflect the on-disk value.
        if read_path == self.path:
            self.mtime = self.current_mtime()

        self.path = read_path

    def write(self, path=None):
        """Write the item's metadata to a media file.

        All fields in `_media_fields` are written to disk according to
        the values on this object.

        Can raise either a `ReadError` or a `WriteError`.
        """
        if path is None:
            path = self.path
        else:
            path = normpath(path)
        try:
            mediafile = MediaFile(path)
        except (OSError, IOError) as exc:
            raise ReadError(self.path, exc)

        plugins.send('write', item=self, path=path)

        mediafile.update(self)
        try:
            mediafile.save(id3v23=beets.config['id3v23'].get(bool))
        except (OSError, IOError, MutagenError) as exc:
            raise WriteError(self.path, exc)

        # The file has a new mtime.
        if path == self.path:
            self.mtime = self.current_mtime()
        plugins.send('after_write', item=self, path=path)

    def try_write(self, path=None):
        """Calls `write()` but catches and logs `FileOperationError`
        exceptions.

        Returns `False` an exception was caught and `True` otherwise.
        """
        try:
            self.write(path)
            return True
        except FileOperationError as exc:
            log.error(exc)
            return False

    # Files themselves.

    def move_file(self, dest, copy=False):
        """Moves or copies the item's file, updating the path value if
        the move succeeds. If a file exists at ``dest``, then it is
        slightly modified to be unique.
        """
        if not util.samefile(self.path, dest):
            dest = util.unique_path(dest)
        if copy:
            util.copy(self.path, dest)
            plugins.send("item_copied",
                         item=self,
                         source=self.path,
                         destination=dest)
        else:
            util.move(self.path, dest)
            plugins.send("item_moved",
                         item=self,
                         source=self.path,
                         destination=dest)

        # Either copying or moving succeeded, so update the stored path.
        self.path = dest

    def current_mtime(self):
        """Returns the current mtime of the file, rounded to the nearest
        integer.
        """
        return int(os.path.getmtime(syspath(self.path)))

    # Model methods.

    def remove(self, delete=False, with_album=True):
        """Removes the item. If `delete`, then the associated file is
        removed from disk. If `with_album`, then the item's album (if
        any) is removed if it the item was the last in the album.
        """
        super(Item, self).remove()

        # Remove the album if it is empty.
        if with_album:
            album = self.get_album()
            if album and not album.items():
                album.remove(delete, False)

        # Send a 'item_removed' signal to plugins
        plugins.send('item_removed', item=self)

        # Delete the associated file.
        if delete:
            util.remove(self.path)
            util.prune_dirs(os.path.dirname(self.path), self._db.directory)

        self._db._memotable = {}

    def move(self, copy=False, basedir=None, with_album=True):
        """Move the item to its designated location within the library
        directory (provided by destination()). Subdirectories are
        created as needed. If the operation succeeds, the item's path
        field is updated to reflect the new location.

        If copy is True, moving the file is copied rather than moved.

        basedir overrides the library base directory for the
        destination.

        If the item is in an album, the album is given an opportunity to
        move its art. (This can be disabled by passing
        with_album=False.)

        The item is stored to the database if it is in the database, so
        any dirty fields prior to the move() call will be written as a
        side effect. You probably want to call save() to commit the DB
        transaction.
        """
        self._check_db()
        dest = self.destination(basedir=basedir)

        # Create necessary ancestry for the move.
        util.mkdirall(dest)

        # Perform the move and store the change.
        old_path = self.path
        self.move_file(dest, copy)
        self.store()

        # If this item is in an album, move its art.
        if with_album:
            album = self.get_album()
            if album:
                album.move_art(copy)
                album.store()

        # Prune vacated directory.
        if not copy:
            util.prune_dirs(os.path.dirname(old_path), self._db.directory)

    # Templating.

    def _formatted_mapping(self, for_path=False):
        """Get a mapping containing string-formatted values from either
        this item or the associated album, if any.
        """
        mapping = super(Item, self)._formatted_mapping(for_path)

        # Merge in album-level fields.
        album = self.get_album()
        if album:
            for key in album.keys(True):
                if key in Album.item_keys or key not in self._fields.keys():
                    mapping[key] = album._get_formatted(key, for_path)

        # Use the album artist if the track artist is not set and
        # vice-versa.
        if not mapping['artist']:
            mapping['artist'] = mapping['albumartist']
        if not mapping['albumartist']:
            mapping['albumartist'] = mapping['artist']

        return mapping

    def destination(self,
                    fragment=False,
                    basedir=None,
                    platform=None,
                    path_formats=None):
        """Returns the path in the library directory designated for the
        item (i.e., where the file ought to be). fragment makes this
        method return just the path fragment underneath the root library
        directory; the path is also returned as Unicode instead of
        encoded as a bytestring. basedir can override the library's base
        directory for the destination.
        """
        self._check_db()
        platform = platform or sys.platform
        basedir = basedir or self._db.directory
        path_formats = path_formats or self._db.path_formats

        # Use a path format based on a query, falling back on the
        # default.
        for query, path_format in path_formats:
            if query == PF_KEY_DEFAULT:
                continue
            query = get_query(query, type(self))
            if query.match(self):
                # The query matches the item! Use the corresponding path
                # format.
                break
        else:
            # No query matched; fall back to default.
            for query, path_format in path_formats:
                if query == PF_KEY_DEFAULT:
                    break
            else:
                assert False, "no default path format"
        if isinstance(path_format, Template):
            subpath_tmpl = path_format
        else:
            subpath_tmpl = Template(path_format)

        # Evaluate the selected template.
        subpath = self.evaluate_template(subpath_tmpl, True)

        # Prepare path for output: normalize Unicode characters.
        if platform == 'darwin':
            subpath = unicodedata.normalize('NFD', subpath)
        else:
            subpath = unicodedata.normalize('NFC', subpath)
        # Truncate components and remove forbidden characters.
        subpath = util.sanitize_path(subpath, self._db.replacements)
        # Encode for the filesystem.
        if not fragment:
            subpath = bytestring_path(subpath)

        # Preserve extension.
        _, extension = os.path.splitext(self.path)
        if fragment:
            # Outputting Unicode.
            extension = extension.decode('utf8', 'ignore')
        subpath += extension.lower()

        # Truncate too-long components.
        maxlen = beets.config['max_filename_length'].get(int)
        if not maxlen:
            # When zero, try to determine from filesystem.
            maxlen = util.max_filename_length(self._db.directory)
        subpath = util.truncate_path(subpath, maxlen)

        if fragment:
            return subpath
        else:
            return normpath(os.path.join(basedir, subpath))