Ejemplo n.º 1
0
  title={Two data sets for tempo estimation and key detection in electronic dance music annotated from user corrections},
  author={Knees, Peter and Faraldo P{\'e}rez, {\'A}ngel and Boyer, Herrera and Vogl, Richard and B{\"o}ck, Sebastian and H{\"o}rschl{\"a}ger, Florian and Le Goff, Mickael and others},
  booktitle={Proceedings of the 16th International Society for Music Information Retrieval Conference (ISMIR); 2015 Oct 26-30; M{\'a}laga, Spain.[M{\'a}laga]: International Society for Music Information Retrieval, 2015. p. 364-70.},
  year={2015},
  organization={International Society for Music Information Retrieval (ISMIR)},
}
@inproceedings{SchreiberM18a_Tempo_ISMIR,
  author={Hendrik Schreiber and Meinard M{\"u}ller},
  title={A Crowdsourced Experiment for Tempo Estimation of Electronic Dance Music},
  booktitle={Proceedings of the International Conference on Music Information Retrieval ({ISMIR})},
  address={Paris, France},
  year={2018},
  url-pdf={http://www.tagtraum.com/download/2018_schreiber_tempo_giantsteps.pdf},
}"""

DATA = utils.LargeData("giantsteps_tempo_index.json")

REMOTES = {
    "annotations":
    download_utils.RemoteFileMetadata(
        filename=
        "giantsteps-tempo-dataset-0b7d47ba8cae59d3535a02e3db69e2cf6d0af5bb.zip",
        url=
        "https://github.com/GiantSteps/giantsteps-tempo-dataset/archive/0b7d47ba8cae59d3535a02e3db69e2cf6d0af5bb.zip",
        checksum="8fdafbaf505fe3f293bd912c92b72ac8",
        destination_dir="",
    )
}
DOWNLOAD_INFO = """
    Unfortunately the audio files of the Giant Steps Tempo dataset are not available
    for download. If you have the Giant Steps audio dataset, place the contents into
Ejemplo n.º 2
0
            "annotator_2_id": line[3],
            "duration": duration,
            "title": line[7],
            "artist": line[8],
            "annotator_1_time": line[10],
            "annotator_2_time": line[11],
            "class": line[14],
            "genre": line[15],
        }

    metadata_index["data_home"] = data_home

    return metadata_index


DATA = utils.LargeData("salami_index.json", _load_metadata)


class Track(core.Track):
    """salami Track class

    Args:
        track_id (str): track id of the track

    Attributes:
        annotator_1_id (str): number that identifies annotator 1
        annotator_1_time (str): time that the annotator 1 took to complete the annotation
        annotator_2_id (str): number that identifies annotator 1
        annotator_2_time (str): time that the annotator 1 took to complete the annotation
        artist (str): song artist
        audio_path (str): path to the audio file
Ejemplo n.º 3
0
import os
import numpy as np

import mirdata.utils as utils
import mirdata.download_utils as download_utils
import mirdata.jams_utils as jams_utils

DATASET_DIR = 'Beatles'
ANNOTATIONS_REMOTE = download_utils.RemoteFileMetadata(
    filename='The Beatles Annotations.tar.gz',
    url='http://isophonics.net/files/annotations/The%20Beatles%20Annotations.tar.gz',
    checksum='62425c552d37c6bb655a78e4603828cc',
    destination_dir='annotations',
)

DATA = utils.LargeData('beatles_index.json')


class Track(object):
    """Beatles track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        track_id (str): track id
        audio_path (str): track audio path
        title (str): title of the track
        beats (BeatData): beat annotation
Ejemplo n.º 4
0
            'contains_strings': tf_dict[line[4]],
            'contains_brass': tf_dict[line[5]],
            'only_strings': tf_dict[line[6]],
            'only_winds': tf_dict[line[7]],
            'only_brass': tf_dict[line[8]],
            'composer': id_split[0],
            'work': '-'.join(id_split[1:-1]),
            'excerpt': id_split[-1][2:],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('orchset_index.json', _load_metadata)


class Track(track.Track):
    """orchset Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        alternating_melody (bool): True if the melody alternates between instruments
        audio_path_mono (str): path to the mono audio file
        audio_path_stereo (str): path to the stereo audio file
        composer (str): the work's composer
Ejemplo n.º 5
0
import os
import librosa

from mirdata import download_utils
from mirdata import utils

DATASET_DIR = "GTZAN-Genre"

DATASET_REMOTE = download_utils.RemoteFileMetadata(
    filename="genres.tar.gz",
    url="http://opihi.cs.uvic.ca/sound/genres.tar.gz",
    checksum="5b3d6dddb579ab49814ab86dba69e7c7",
    destination_dir="gtzan_genre",
)

DATA = utils.LargeData("gtzan_genre_index.json")


class Track(object):
    """GTZAN-Genre track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets/GuitarSet`

    Attributes:
        track_id (str): track id
        genre (str): annotated genre
        audio_path (str): absolute audio path
    """
Ejemplo n.º 6
0
def _load_metadata(data_home):
    metadata_path = os.path.join(data_home, 'medleydb_melody_metadata.json')

    if not os.path.exists(metadata_path):
        logging.info('Metadata file {} not found.'.format(metadata_path))
        return None

    with open(metadata_path, 'r') as fhandle:
        metadata = json.load(fhandle)

    metadata['data_home'] = data_home
    return metadata


DATA = utils.LargeData('medleydb_melody_index.json', _load_metadata)


class Track(track.Track):
    """medleydb_melody Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): artist
        audio_path (str): path to the audio file
        genre (str): genre
        is_excerpt (bool): True if the track is an excerpt
Ejemplo n.º 7
0
AUDIO_MIX_REMOTE = download_utils.RemoteFileMetadata(
    filename='audio_mono-pickup_mix.zip',
    url=
    'https://zenodo.org/record/3371780/files/audio_mono-pickup_mix.zip?download=1',
    checksum='aecce79f425a44e2055e46f680e10f6a',
    destination_dir='audio_mono-pickup_mix',
)
_STYLE_DICT = {
    'Jazz': 'Jazz',
    'BN': 'Bossa Nova',
    'Rock': 'Rock',
    'SS': 'Singer-Songwriter',
    'Funk': 'Funk',
}
_GUITAR_STRINGS = ['E', 'A', 'D', 'G', 'B', 'e']
DATA = utils.LargeData('guitarset_index.json')


class Track(track.Track):
    """guitarset Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_hex_cln_path (str): path to the debleeded hex wave file
        audio_hex_path (str): path to the original hex wave file
        audio_mic_path (str): path to the mono wave via microphone
        audio_mix_path (str): path to the mono wave via downmixing hex pickup
Ejemplo n.º 8
0
DATASET_DIR = 'DALI'


def _load_metadata(data_home):
    metadata_path = os.path.join(data_home, os.path.join('dali_metadata.json'))
    if not os.path.exists(metadata_path):
        logging.info('Metadata file {} not found.'.format(metadata_path))
        return None
    with open(metadata_path, 'r') as fhandle:
        metadata_index = json.load(fhandle)

    metadata_index['data_home'] = data_home
    return metadata_index


DATA = utils.LargeData('dali_index.json', _load_metadata)


class Track(track.Track):
    """DALI melody Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        album (str): the track's album
        annotation_path (str): path to the track's annotation file
        artist (str): the track's artist
        audio_path (str): path to the track's audio file
Ejemplo n.º 9
0
            'suffix': line[1],
            'track_number': line[2],
            'category': line[3],
            'sub_category': line[4],
            'title': line[5],
            'composer': line[6],
            'artist': line[7],
            'duration': _duration_to_sec(line[8]),
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('rwc_genre_index.json', _load_metadata)


class Track(object):
    """RWC Genre Track class

    Args:
        track_id (str): Track id of the Track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        track_id (str): Track id
        audio_path (str): Audio path of this Track
        piece_number (str): Piece number of this Track, [1-100]
        suffix (str): M01-M09
Ejemplo n.º 10
0
        filename="audio_mono-pickup_mix.zip",
        url=
        "https://zenodo.org/record/3371780/files/audio_mono-pickup_mix.zip?download=1",
        checksum="aecce79f425a44e2055e46f680e10f6a",
        destination_dir="audio_mono-pickup_mix",
    ),
}
_STYLE_DICT = {
    "Jazz": "Jazz",
    "BN": "Bossa Nova",
    "Rock": "Rock",
    "SS": "Singer-Songwriter",
    "Funk": "Funk",
}
_GUITAR_STRINGS = ["E", "A", "D", "G", "B", "e"]
DATA = utils.LargeData("guitarset_index.json")


class Track(core.Track):
    """guitarset Track class

    Args:
        track_id (str): track id of the track

    Attributes:
        audio_hex_cln_path (str): path to the debleeded hex wave file
        audio_hex_path (str): path to the original hex wave file
        audio_mic_path (str): path to the mono wave via microphone
        audio_mix_path (str): path to the mono wave via downmixing hex pickup
        jams_path (str): path to the jams file
        mode (str): one of ['solo', 'comp']
Ejemplo n.º 11
0
            'piece_number': line[0],
            'suffix': line[1],
            'track_number': line[2],
            'title': line[3],
            'artist': line[4],
            'duration': _duration_to_sec(line[5]),
            'variation': line[6],
            'instruments': line[7],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('rwc_jazz_index.json', _load_metadata)


class Track(track.Track):
    """rwc_jazz Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): Artist name
        audio_path (str): path of the audio file
        beats_path (str): path of the beat annotation file
        duration (float): Duration of the track in seconds
Ejemplo n.º 12
0
Archivo: irmas.py Proyecto: MTG/mirdata
        url=
        'https://zenodo.org/record/1290750/files/IRMAS-TestingData-Part2.zip?download=1',
        checksum='afb0c8ea92f34ee653693106be95c895',
        destination_dir=None,
    ),
    'testing_data_3':
    download_utils.RemoteFileMetadata(
        filename='IRMAS-TestingData-Part3.zip',
        url=
        'https://zenodo.org/record/1290750/files/IRMAS-TestingData-Part3.zip?download=1',
        checksum='9b3fb2d0c89cdc98037121c25bd5b556',
        destination_dir=None,
    ),
}

DATA = utils.LargeData('irmas_index.json')

INST_DICT = [
    'cel',
    'cla',
    'flu',
    'gac',
    'gel',
    'org',
    'pia',
    'sax',
    'tru',
    'vio',
    'voi',
]
Ejemplo n.º 13
0
    metadata = dict()
    metadata['data_home'] = data_home
    for i, j in zip(indexes, range(len(artists))):
        metadata[i] = {
            'musicBrainzID': identifiers[j],
            'artist': artists[j],
            'title': titles[j],
            'release': releases[j],
            'duration': durations[j],
        }

    return metadata


DATA = utils.LargeData('cante100_index.json', _load_metadata)


class Track(core.Track):
    """cante100 track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets/cante100`

    Attributes:
        track_id (str): track id
        identifier (str): musicbrainz id of the track
        artist (str): performing artists
        title (str): title of the track song
Ejemplo n.º 14
0
def _load_metadata(data_home):
    metadata_path = os.path.join(data_home, 'medleydb_pitch_metadata.json')

    if not os.path.exists(metadata_path):
        logging.info('Metadata file {} not found.'.format(metadata_path))
        return None

    with open(metadata_path, 'r') as fhandle:
        metadata = json.load(fhandle)

    metadata['data_home'] = data_home
    return metadata


DATA = utils.LargeData('medleydb_pitch_index.json', _load_metadata)


class Track(track.Track):
    """medleydb_pitch Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): artist
        audio_path (str): path to the audio file
        genre (str): genre
        instrument (str): instrument of the track
Ejemplo n.º 15
0
            'piece_number': line[0],
            'suffix': line[1],
            'track_number': line[2],
            'title': line[3],
            'composer': line[4],
            'artist': line[5],
            'duration': _duration_to_sec(line[6]),
            'category': line[7],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('rwc_classical_index.json', _load_metadata)


class Track(track.Track):
    """rwc_classical Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        artist (str): the track's artist
        audio_path (str): path of the audio file
        beats_path (str): path of the beat annotation file
        category (str): One of 'Symphony', 'Concerto', 'Orchestral',
Ejemplo n.º 16
0
    year = {2013}
}"""

REMOTES = {
    "remote_data":
    download_utils.RemoteFileMetadata(
        filename="mridangam_stroke_1.5.zip",
        url=
        "https://zenodo.org/record/4068196/files/mridangam_stroke_1.5.zip?download=1",
        checksum="39af55b2476b94c7946bec24331ec01a",  # the md5 checksum
        destination_dir=
        None,  # relative path for where to unzip the data, or None
    ),
}

DATA = utils.LargeData("mridangam_stroke_index.json")

STROKE_DICT = {
    "bheem",
    "cha",
    "dheem",
    "dhin",
    "num",
    "ta",
    "tha",
    "tham",
    "thi",
    "thom",
}

TONIC_DICT = {"B", "C", "C#", "D", "D#", "E"}
Ejemplo n.º 17
0
    # load metadata however makes sense for your dataset
    with open(metadata_path, 'r') as fhandle:
        raw_metadata = json.load(fhandle)

    metadata = {}
    for mdata in raw_metadata:
        track_id = mdata['midi_filename'].split('.')[0]
        metadata[track_id] = mdata

    metadata['data_home'] = data_home

    return metadata


DATA = utils.LargeData('maestro_index.json', _load_metadata)


class Track(track.Track):
    """MAESTRO Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): Path to the track's audio file
        canonical_composer (str): Composer of the piece, standardized on a
            single spelling for a given name.
        canonical_title (str): Title of the piece. Not guaranteed to be
Ejemplo n.º 18
0
        return None

    with open(id_map_path, 'r') as fhandle:
        reader = csv.reader(fhandle, delimiter='\t')
        singer_map = {}
        for line in reader:
            if line[0] == 'singer':
                continue
            singer_map[line[1]] = line[0]

    singer_map['data_home'] = data_home

    return singer_map


DATA = utils.LargeData('ikala_index.json', _load_metadata)


class Track(track.Track):
    """ikala Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): path to the track's audio file
        f0_path (str): path to the track's f0 annotation file
        lyrics_path (str): path to the track's lyric annotation file
        section (str): section. Either 'verse' or 'chorus'
Ejemplo n.º 19
0
            'track_number': line[2],
            'title': line[3],
            'artist': line[4],
            'singer_information': line[5],
            'duration': _duration_to_sec(line[6]),
            'tempo': line[7],
            'instruments': line[8],
            'drum_information': line[9],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('rwc_popular_index.json', _load_metadata)


class Track(object):
    """RWC Popular Track class

    Args:
        track_id (str): Track id of the Track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        track_id (str): Track id
        audio_path (str): Audio path of this Track
        piece_number (str): Piece number of this Track, [1-50]
        suffix (str): M01-M04
Ejemplo n.º 20
0
        checksum="62425c552d37c6bb655a78e4603828cc",
        destination_dir="annotations",
    )
}

DOWNLOAD_INFO = """
        Unfortunately the audio files of the Beatles dataset are not available
        for download. If you have the Beatles dataset, place the contents into
        a folder called Beatles with the following structure:
            > Beatles/
                > annotations/
                > audio/
        and copy the Beatles folder to {}
"""

DATA = utils.LargeData("beatles_index.json")


class Track(core.Track):
    """Beatles track class

    Args:
        track_id (str): track id of the track

    Attributes:
        audio_path (str): track audio path
        beats_path (str): beat annotation path
        chords_path (str): chord annotation path
        keys_path (str): key annotation path
        sections_path (str): sections annotation path
        title (str): title of the track
Ejemplo n.º 21
0
            'annotator_2_id': line[3],
            'duration': duration,
            'title': line[7],
            'artist': line[8],
            'annotator_1_time': line[10],
            'annotator_2_time': line[11],
            'class': line[14],
            'genre': line[15],
        }

    metadata_index['data_home'] = data_home

    return metadata_index


DATA = utils.LargeData('salami_index.json', _load_metadata)


class Track(track.Track):
    """salami Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        annotator_1_id (str): number that identifies annotator 1
        annotator_1_time (str): time that the annotator 1 took to complete the annotation
        annotator_2_id (str): number that identifies annotator 1
        annotator_2_time (str): time that the annotator 1 took to complete the annotation
Ejemplo n.º 22
0
                "style": str(style),
                "tempo": int(bpm),
                "beat_type": str(beat_type),
                "time_signature": str(time_signature),
                "midi_filename": str(midi_filename),
                "audio_filename": str(audio_filename),
                "duration": float(duration),
                "split": str(split),
            }

    metadata_index["data_home"] = data_home

    return metadata_index


DATA = utils.LargeData("groove_midi_index.json", _load_metadata)


class Track(core.Track):
    """Groove MIDI Track class

    Args:
        track_id (str): track id of the track

    Attributes:
        drummer (str): Drummer id of the track (ex. 'drummer1')
        session (str): Type of session  (ex. 'session1', 'eval_session')
        track_id (str): track id of the track (ex. 'drummer1/eval_session/1')
        style (str): Style (genre, groove type) of the track (ex. 'funk/groove1')
        tempo (int): Track tempo in beats per minute (ex. 138)
        beat_type (str): Whether the track is a beat or a fill (ex. 'beat')
Ejemplo n.º 23
0
"""


def _load_metadata(data_home):
    metadata_path = os.path.join(data_home, os.path.join("dali_metadata.json"))
    if not os.path.exists(metadata_path):
        logging.info("Metadata file {} not found.".format(metadata_path))
        return None
    with open(metadata_path, "r") as fhandle:
        metadata_index = json.load(fhandle)

    metadata_index["data_home"] = data_home
    return metadata_index


DATA = utils.LargeData("dali_index.json", _load_metadata)


class Track(core.Track):
    """DALI melody Track class

    Args:
        track_id (str): track id of the track

    Attributes:
        album (str): the track's album
        annotation_path (str): path to the track's annotation file
        artist (str): the track's artist
        audio_path (str): path to the track's audio file
        audio_url (str): youtube ID
        dataset_version (int): dataset annotation version
Ejemplo n.º 24
0
        filename='keys.zip',
        url='https://zenodo.org/record/1095691/files/keys.zip?download=1',
        checksum='775b7d17e009f5818544cf505b6a96fd',
        destination_dir='.',
    ),
    'metadata':
    download_utils.RemoteFileMetadata(
        filename='original_metadata.zip',
        url=
        'https://zenodo.org/record/1095691/files/original_metadata.zip?download=1',
        checksum='54181e0f34c35d9720439750d0b08091',
        destination_dir='.',
    ),
}

DATA = utils.LargeData('giantsteps_key_index.json')


class Track(track.Track):
    """giantsteps_key track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): track audio path
        keys_path (str): key annotation path
        metadata_path (str): sections annotation path
        title (str): title of the track
Ejemplo n.º 25
0
        filename="original_metadata.zip",
        url=
        "https://zenodo.org/record/1101082/files/original_metadata.zip?download=1",
        checksum="bb3e3ac1fe5dee7600ef2814accdf8f8",
        destination_dir=".",
    ),
    "audio":
    download_utils.RemoteFileMetadata(
        filename="audio.zip",
        url="https://zenodo.org/record/1101082/files/audio.zip?download=1",
        checksum="f490ee6c23578482d6fcfa11b82636a1",
        destination_dir=".",
    ),
}

DATA = utils.LargeData("beatport_key_index.json")


class Track(core.Track):
    """beatport_key track class
    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored.
            If `None`, looks for the data in the default directory, `~/mir_datasets`
    Attributes:
        audio_path (str): track audio path
        keys_path (str): key annotation path
        metadata_path (str): sections annotation path
        title (str): title of the track
        track_id (str): track id
    """
Ejemplo n.º 26
0
        next(csv_reader)
        for row in csv_reader:
            subset, instrument_str, instrument_id, song_id, track_id = row
            metadata_index[str(track_id)] = {
                "subset": str(subset),
                "instrument": str(instrument_str),
                "instrument_id": int(instrument_id),
                "song_id": int(song_id),
            }

    metadata_index["data_home"] = data_home

    return metadata_index


DATA = utils.LargeData("medley_solos_db_index.json", _load_metadata)


class Track(track.Track):
    """medley_solos_db Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): path to the track's audio file
        instrument (str): instrument encoded by its English name
        instrument_id (int): instrument encoded as an integer
        song_id (int): song encoded as an integer
Ejemplo n.º 27
0
def _load_metadata(metadata_path):
    if not os.path.exists(metadata_path):
        logging.info('Metadata file {} not found.'.format(metadata_path))
        return None

    with open(metadata_path) as f:
        metadata = json.load(f)
        data_home = metadata_path.split('/' + metadata_path.split('/')[-3])[0]
        metadata['track_id'] = (str(metadata_path.split('/')[-3]) + '_' +
                                str(metadata_path.split('/')[-2]))
        metadata['data_home'] = data_home

        return metadata


DATA = utils.LargeData('saraga_index.json', _load_metadata)


class Track(core.Track):
    """Saraga Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Common attributes:
        iam_style (str): flag to identify if track belongs to hindustani or carnatic collection
        title (str): Title of the piece in the track
        mbid (str): MusicBrainz ID of the track
        album_artists (list, dicts): list of dicts containing the album artists present in the track and its mbid
Ejemplo n.º 28
0
                "Pitch": row[7],
                "Pitch ID": int(row[8]),
                "Dynamics": row[9],
                "Dynamics ID": int(row[10]),
                "Instance ID": int(row[11]),
                "Resampled": (row[13] == "TRUE"),
            }
            if len(row[12]) > 0:
                metadata_index[key]["String ID"] = int(float(row[12]))

    metadata_index["data_home"] = data_home

    return metadata_index


DATA = utils.LargeData("tinysol_index.json", _load_metadata)


class Track(track.Track):
    """tinysol Track class

    Args:
        track_id (str): track id of the track
        data_home (str): Local path where the dataset is stored. default=None
            If `None`, looks for the data in the default directory, `~/mir_datasets`

    Attributes:
        audio_path (str): path of the audio file
        dynamics (str): dynamics abbreviation. Ex: pp, mf, ff, etc.
        dynamics_id (int): pp=0, p=1, mf=2, f=3, ff=4
        family (str): instrument family encoded by its English name
Ejemplo n.º 29
0
Archivo: ikala.py Proyecto: MTG/mirdata
        return None

    with open(id_map_path, "r") as fhandle:
        reader = csv.reader(fhandle, delimiter="\t")
        singer_map = {}
        for line in reader:
            if line[0] == "singer":
                continue
            singer_map[line[1]] = line[0]

    singer_map["data_home"] = data_home

    return singer_map


DATA = utils.LargeData("ikala_index.json", _load_metadata)


class Track(core.Track):
    """ikala Track class

    Args:
        track_id (str): track id of the track

    Attributes:
        audio_path (str): path to the track's audio file
        f0_path (str): path to the track's f0 annotation file
        lyrics_path (str): path to the track's lyric annotation file
        section (str): section. Either 'verse' or 'chorus'
        singer_id (str): singer id
        song_id (str): song id of the track