Esempio n. 1
0
def test_track():
    default_trackid = 'AClassicEducation_NightOwl_STEM_08'
    data_home = 'tests/resources/mir_datasets/MedleyDB-Pitch'
    track = medleydb_pitch.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'track_id': 'AClassicEducation_NightOwl_STEM_08',
        'audio_path': 'tests/resources/mir_datasets/' +
        'MedleyDB-Pitch/audio/AClassicEducation_NightOwl_STEM_08.wav',
        'pitch_path': 'tests/resources/mir_datasets/' +
        'MedleyDB-Pitch/pitch/AClassicEducation_NightOwl_STEM_08.csv',
        'instrument': 'male singer',
        'artist': 'AClassicEducation',
        'title': 'NightOwl',
        'genre': 'Singer/Songwriter'
    }

    expected_property_types = {
        'pitch': utils.F0Data,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2, )
def test_track():
    default_trackid = "1"
    data_home = "tests/resources/mir_datasets/beatport_key"
    dataset = beatport_key.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "audio_path": "tests/resources/mir_datasets/beatport_key/audio/100066 Lindstrom - Monsteer (Original Mix).mp3",
        "keys_path": "tests/resources/mir_datasets/beatport_key/keys/100066 Lindstrom - Monsteer (Original Mix).txt",
        "metadata_path": "tests/resources/mir_datasets/beatport_key/meta/100066 Lindstrom - Monsteer (Original Mix).json",
        "title": "100066 Lindstrom - Monsteer (Original Mix)",
        "track_id": "1",
    }

    expected_property_types = {
        "key": list,
        "genres": dict,
        "artists": list,
        "tempo": int,
        "audio": tuple,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 44100, "sample rate {} is not 44100".format(sr)
    assert audio.shape == (88200,), "audio shape {} was not (88200,)".format(
        audio.shape
    )
Esempio n. 3
0
def test_track():

    default_trackid = "RM-J004"
    data_home = "tests/resources/mir_datasets/rwc_jazz"
    track = rwc_jazz.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        "track_id": "RM-J004",
        "audio_path": "tests/resources/mir_datasets/rwc_jazz/"
        + "audio/rwc-j-m01/4.wav",
        "sections_path": "tests/resources/mir_datasets/rwc_jazz/"
        + "annotations/AIST.RWC-MDB-J-2001.CHORUS/RM-J004.CHORUS.TXT",
        "beats_path": "tests/resources/mir_datasets/rwc_jazz/"
        + "annotations/AIST.RWC-MDB-J-2001.BEAT/RM-J004.BEAT.TXT",
        "piece_number": "No. 4",
        "suffix": "M01",
        "track_number": "Tr. 04",
        "title": "Crescent Serenade (Piano Solo)",
        "artist": "Makoto Nakamura",
        "duration": 167,
        "variation": "Instrumentation 1",
        "instruments": "Pf",
    }

    expected_property_types = {"beats": utils.BeatData, "sections": utils.SectionData}

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2,)
Esempio n. 4
0
def test_track():
    default_trackid = '113'
    data_home = 'tests/resources/mir_datasets/GiantSteps_tempo'
    track = giantsteps_tempo.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'audio_path':
        'tests/resources/mir_datasets/GiantSteps_tempo/audio/28952.LOFI.mp3',
        'annotation_v1_path':
        'tests/resources/mir_datasets/GiantSteps_tempo/giantsteps-tempo-dataset'
        '-0b7d47ba8cae59d3535a02e3db69e2cf6d0af5bb/annotations/jams/28952.LOFI.jams',
        'annotation_v2_path':
        'tests/resources/mir_datasets/GiantSteps_tempo/giantsteps-tempo-dataset'
        '-0b7d47ba8cae59d3535a02e3db69e2cf6d0af5bb/annotations_v2/jams/28952.LOFI.jams',
        'title':
        '28952',
        'track_id':
        '113',
    }

    expected_property_types = {
        'tempo': utils.TempoData,
        'tempo_v2': utils.TempoData,
        'genre': str
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 22050, 'sample rate {} is not 22050'.format(sr)
    print(audio.shape)
    assert audio.shape == (
        2646720, ), 'audio shape {} was not (2646720,)'.format(audio.shape)
Esempio n. 5
0
def test_track():
    default_trackid = "0"
    data_home = "tests/resources/mir_datasets/queen"
    dataset = queen.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "audio_path":
        "tests/resources/mir_datasets/queen/audio/Greatest Hits I/01 Bohemian Rhapsody.flac",
        "chords_path": "tests/resources/mir_datasets/queen/"
        "annotations/chordlab/Queen/Greatest Hits I/01 Bohemian Rhapsody.lab",
        "keys_path": "tests/resources/mir_datasets/queen/" +
        "annotations/keylab/Queen/Greatest Hits I/01 Bohemian Rhapsody.lab",
        "sections_path": "tests/resources/mir_datasets/queen/" +
        "annotations/seglab/Queen/Greatest Hits I/01 Bohemian Rhapsody.lab",
        "title": "01 Bohemian Rhapsody",
        "track_id": "0",
    }

    expected_property_types = {
        "chords": annotations.ChordData,
        "key": annotations.KeyData,
        "sections": annotations.SectionData,
        "audio": tuple,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 44100, "sample rate {} is not 44100".format(sr)
    assert audio.shape == (
        144384, ), "audio shape {} was not (144384,)".format(audio.shape)
Esempio n. 6
0
def test_track():
    default_trackid = '008'
    track = cante100.Track(default_trackid, data_home=TEST_DATA_HOME)
    expected_attributes = {
        'artist':
        'Toronjo',
        'duration':
        179.0,
        'audio_path':
        'tests/resources/mir_datasets/cante100/cante100audio/008_PacoToronjo_'
        + 'Fandangos.mp3',
        'f0_path':
        'tests/resources/mir_datasets/cante100/cante100midi_f0/008_PacoToronjo_'
        + 'Fandangos.f0.csv',
        'identifier':
        '4eebe839-82bb-426e-914d-7c4525dd9dad',
        'notes_path':
        'tests/resources/mir_datasets/cante100/cante100_automaticTranscription/008_PacoToronjo_'
        + 'Fandangos.notes.csv',
        'release':
        'Atlas del cante flamenco',
        'spectrogram_path':
        'tests/resources/mir_datasets/cante100/cante100_spectrum/008_PacoToronjo_'
        + 'Fandangos.spectrum.csv',
        'title':
        'Huelva Como Capital',
        'track_id':
        '008',
    }

    expected_property_types = {'melody': utils.F0Data, 'notes': utils.NoteData}

    run_track_tests(track, expected_attributes, expected_property_types)
Esempio n. 7
0
def test_track():
    default_trackid = 'Fl-ord-C4-mf-N-T14d'
    data_home = 'tests/resources/mir_datasets/TinySOL'
    track = tinysol.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'track_id': 'Fl-ord-C4-mf-N-T14d',
        'audio_path': 'tests/resources/mir_datasets/TinySOL/' +
        'audio/Winds/Flute/ordinario/Fl-ord-C4-mf-N-T14d.wav',
        'dynamics': 'mf',
        'fold': 0,
        'family': 'Winds',
        'instrument_abbr': 'Fl',
        'instrument_full': 'Flute',
        'technique_abbr': 'ord',
        'technique_full': 'ordinario',
        'pitch': 'C4',
        'pitch_id': 60,
        'dynamics_id': 2,
        'instance_id': 0,
        'is_resampled': True,
        'string_id': None,
    }

    expected_property_types = {}

    run_track_tests(track, expected_attributes, expected_property_types)

    y, sr = track.audio
    assert y.shape == (272417, )
    assert sr == 44100

    # test with a string instrument
    track = tinysol.Track('Cb-ord-A2-mf-2c-N', data_home=data_home)
Esempio n. 8
0
def test_track():
    default_trackid = 'MusicDelta_Beethoven'
    data_home = 'tests/resources/mir_datasets/MedleyDB-Melody'
    track = medleydb_melody.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'track_id': 'MusicDelta_Beethoven',
        'audio_path': 'tests/resources/mir_datasets/' +
        'MedleyDB-Melody/audio/MusicDelta_Beethoven_MIX.wav',
        'melody1_path': 'tests/resources/mir_datasets/' +
        'MedleyDB-Melody/melody1/MusicDelta_Beethoven_MELODY1.csv',
        'melody2_path': 'tests/resources/mir_datasets/' +
        'MedleyDB-Melody/melody2/MusicDelta_Beethoven_MELODY2.csv',
        'melody3_path': 'tests/resources/mir_datasets/' +
        'MedleyDB-Melody/melody3/MusicDelta_Beethoven_MELODY3.csv',
        'artist': 'MusicDelta',
        'title': 'Beethoven',
        'genre': 'Classical',
        'is_excerpt': True,
        'is_instrumental': True,
        'n_sources': 18,
    }

    expected_property_types = {
        'melody1': utils.F0Data,
        'melody2': utils.F0Data,
        'melody3': utils.MultipitchData,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2, )
Esempio n. 9
0
def test_track():
    default_trackid = "some_id"
    data_home = "tests/resources/mir_datasets/dataset"
    dataset = example.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "track_id":
        "some_id",
        "audio_path":
        "tests/resources/mir_datasets/example/" + "Wavfile/some_id.wav",
        "song_id":
        "some_id",
        "annotation_path":
        "tests/resources/mir_datasets/example/annotation/some_id.pv",
    }

    expected_property_types = {"annotation": annotations.XData}

    assert track._track_paths == {
        "audio": ["Wavfile/some_id.wav", "278ae003cb0d323e99b9a643c0f2eeda"],
        "annotation":
        ["Annotation/some_id.pv", "0d93a011a9e668fd80673049089bbb14"],
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    audio, sr = track.audio
    assert sr == 44100
    assert audio.shape == (44100 * 2, )
Esempio n. 10
0
def test_track():
    default_trackid = "Fl-ord-C4-mf-N-T14d"
    data_home = "tests/resources/mir_datasets/tinysol"
    track = tinysol.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        "track_id": "Fl-ord-C4-mf-N-T14d",
        "audio_path": "tests/resources/mir_datasets/tinysol/" +
        "audio/Winds/Flute/ordinario/Fl-ord-C4-mf-N-T14d.wav",
        "dynamics": "mf",
        "fold": 0,
        "family": "Winds",
        "instrument_abbr": "Fl",
        "instrument_full": "Flute",
        "technique_abbr": "ord",
        "technique_full": "ordinario",
        "pitch": "C4",
        "pitch_id": 60,
        "dynamics_id": 2,
        "instance_id": 0,
        "is_resampled": True,
        "string_id": None,
    }

    expected_property_types = {}

    run_track_tests(track, expected_attributes, expected_property_types)

    y, sr = track.audio
    assert y.shape == (272417, )
    assert sr == 44100

    # test with a string instrument
    track = tinysol.Track("Cb-ord-A2-mf-2c-N", data_home=data_home)
Esempio n. 11
0
def test_track():
    default_trackid = "0"
    data_home = "tests/resources/mir_datasets/tonality_classicaldb"
    dataset = tonality_classicaldb.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "audio_path":
        "tests/resources/mir_datasets/tonality_classicaldb/audio/01-Allegro__Gloria_in_excelsis_Deo_in_D_Major - D.wav",
        "key_path":
        "tests/resources/mir_datasets/tonality_classicaldb/keys/01-Allegro__Gloria_in_excelsis_Deo_in_D_Major - D.txt",
        "spectrum_path":
        "tests/resources/mir_datasets/tonality_classicaldb/spectrums/01-Allegro__Gloria_in_excelsis_Deo_in_D_Major - D.json",
        "hpcp_path":
        "tests/resources/mir_datasets/tonality_classicaldb/HPCPs/01-Allegro__Gloria_in_excelsis_Deo_in_D_Major - D.json",
        "musicbrainz_path":
        "tests/resources/mir_datasets/tonality_classicaldb/musicbrainz_metadata/01-Allegro__Gloria_in_excelsis_Deo_in_D_Major - D.json",
        "title": "01-Allegro__Gloria_in_excelsis_Deo_in_D_Major - D",
        "track_id": "0",
    }

    expected_property_types = {
        "key": str,
        "spectrum": np.ndarray,
        "hpcp": np.ndarray,
        "musicbrainz_metadata": dict,
        "audio": tuple,
    }
    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 44100, "sample rate {} is not 44100".format(sr)
    assert audio.shape == (88200, ), "audio shape {} was not (88200,)".format(
        audio.shape)
Esempio n. 12
0
def test_track():
    default_trackid = "3"
    data_home = "tests/resources/mir_datasets/giantsteps_key"
    dataset = giantsteps_key.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "audio_path": "tests/resources/mir_datasets/giantsteps_key/audio/10089 Jason Sparks - Close My Eyes feat. J. "
        "Little (Original Mix).mp3",
        "keys_path": "tests/resources/mir_datasets/giantsteps_key/keys_gs+/10089 Jason Sparks - Close My Eyes feat. J. "
        "Little (Original Mix).txt",
        "metadata_path": "tests/resources/mir_datasets/giantsteps_key/meta/10089 Jason Sparks - Close My Eyes feat. J. "
        "Little (Original Mix).json",
        "title": "10089 Jason Sparks - Close My Eyes feat. J. Little (Original Mix)",
        "track_id": "3",
    }

    expected_property_types = {
        "key": str,
        "genres": dict,
        "artists": list,
        "tempo": int,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 44100, "sample rate {} is not 44100".format(sr)
    assert audio.shape == (88200,), "audio shape {} was not (88200,)".format(
        audio.shape
    )
Esempio n. 13
0
def test_track():
    default_trackid = "AClassicEducation_NightOwl_STEM_08"
    data_home = "tests/resources/mir_datasets/medleydb_pitch"
    dataset = medleydb_pitch.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "track_id": "AClassicEducation_NightOwl_STEM_08",
        "audio_path": "tests/resources/mir_datasets/"
        + "medleydb_pitch/audio/AClassicEducation_NightOwl_STEM_08.wav",
        "pitch_path": "tests/resources/mir_datasets/"
        + "medleydb_pitch/pitch/AClassicEducation_NightOwl_STEM_08.csv",
        "instrument": "male singer",
        "artist": "AClassicEducation",
        "title": "NightOwl",
        "genre": "Singer/Songwriter",
    }

    expected_property_types = {"pitch": annotations.F0Data}

    run_track_tests(track, expected_attributes, expected_property_types)

    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2,)
Esempio n. 14
0
def test_track():
    default_trackid = '3'
    data_home = 'tests/resources/mir_datasets/GiantSteps_key'
    track = giantsteps_key.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'audio_path':
        'tests/resources/mir_datasets/GiantSteps_key/audio/10089 Jason Sparks - Close My Eyes feat. J. '
        'Little (Original Mix).mp3',
        'keys_path':
        'tests/resources/mir_datasets/GiantSteps_key/keys_gs+/10089 Jason Sparks - Close My Eyes feat. J. '
        'Little (Original Mix).txt',
        'metadata_path':
        'tests/resources/mir_datasets/GiantSteps_key/meta/10089 Jason Sparks - Close My Eyes feat. J. '
        'Little (Original Mix).json',
        'title':
        '10089 Jason Sparks - Close My Eyes feat. J. Little (Original Mix)',
        'track_id':
        '3',
    }

    expected_property_types = {
        'key': str,
        'genres': dict,
        'artists': list,
        'tempo': int
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 44100, 'sample rate {} is not 44100'.format(sr)
    assert audio.shape == (
        5294592, ), 'audio shape {} was not (5294592,)'.format(audio.shape)
def test_track():
    default_trackid = "tagtraum#validation#be9e01e5-8f93-494d-bbaa-ddcc5a52f629#2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960#trance########"
    data_home = "tests/resources/mir_datasets/acousticbrainz_genre"

    dataset = acousticbrainz_genre.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "path":
        "tests/resources/mir_datasets/acousticbrainz_genre/acousticbrainz-mediaeval-validation/be/be9e01e5-8f93-494d-bbaa-ddcc5a52f629.json",
        "track_id":
        "tagtraum#validation#be9e01e5-8f93-494d-bbaa-ddcc5a52f629#2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960#trance########",
        "genre": ["trance"],
        "mbid": "be9e01e5-8f93-494d-bbaa-ddcc5a52f629",
        "mbid_group": "2b6bfcfd-46a5-3f98-a58f-2c51d7c9e960",
        "split": "validation",
    }

    expected_property_types = {
        "artist": list,
        "title": list,
        "date": list,
        "file_name": str,
        "album": list,
        "tracknumber": list,
        "tonal": dict,
        "low_level": dict,
        "rhythm": dict,
        "acousticbrainz_metadata": dict,
    }

    run_track_tests(track, expected_attributes, expected_property_types)
Esempio n. 16
0
def test_track():

    default_trackid = 'RM-J004'
    data_home = 'tests/resources/mir_datasets/RWC-Jazz'
    track = rwc_jazz.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'track_id': 'RM-J004',
        'audio_path':
        'tests/resources/mir_datasets/RWC-Jazz/' + 'audio/rwc-j-m01/4.wav',
        'sections_path': 'tests/resources/mir_datasets/RWC-Jazz/' +
        'annotations/AIST.RWC-MDB-J-2001.CHORUS/RM-J004.CHORUS.TXT',
        'beats_path': 'tests/resources/mir_datasets/RWC-Jazz/' +
        'annotations/AIST.RWC-MDB-J-2001.BEAT/RM-J004.BEAT.TXT',
        'piece_number': 'No. 4',
        'suffix': 'M01',
        'track_number': 'Tr. 04',
        'title': 'Crescent Serenade (Piano Solo)',
        'artist': 'Makoto Nakamura',
        'duration': 167,
        'variation': 'Instrumentation 1',
        'instruments': 'Pf',
    }

    expected_property_types = {
        'beats': utils.BeatData,
        'sections': utils.SectionData
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2, )
def test_track():

    default_trackid = "59_Bairagi"
    data_home = "tests/resources/mir_datasets/saraga_hindustani"
    dataset = saraga_hindustani.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "track_id":
        "59_Bairagi",
        "title":
        "Bairagi",
        "audio_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        + "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.mp3.mp3",
        "ctonic_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        +
        "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.ctonic.txt",
        "pitch_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        + "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.pitch.txt",
        "tempo_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        +
        "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.tempo-manual.txt",
        "sama_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        +
        "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.sama-manual.txt",
        "sections_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        +
        "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.sections-manual-p.txt",
        "phrases_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        +
        "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.mphrases-manual.txt",
        "metadata_path":
        "tests/resources/mir_datasets/saraga_hindustani/saraga1.5_hindustani/"
        + "Geetinandan : Part-3 by Ajoy Chakrabarty/Bairagi/Bairagi.json",
    }

    expected_property_types = {
        "tempo": dict,
        "phrases": annotations.EventData,
        "pitch": annotations.F0Data,
        "sama": annotations.BeatData,
        "sections": annotations.SectionData,
        "tonic": float,
        "metadata": dict,
        "audio": tuple,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    audio, sr = track.audio
    assert sr == 44100
    assert audio.shape[0] == 2
Esempio n. 18
0
def test_track():
    default_trackid = "RM-C003"
    data_home = "tests/resources/mir_datasets/rwc_classical"
    dataset = rwc_classical.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "track_id": "RM-C003",
        "audio_path": "tests/resources/mir_datasets/rwc_classical/" +
        "audio/rwc-c-m01/3.wav",
        "sections_path": "tests/resources/mir_datasets/rwc_classical/" +
        "annotations/AIST.RWC-MDB-C-2001.CHORUS/RM-C003.CHORUS.TXT",
        "beats_path": "tests/resources/mir_datasets/rwc_classical/" +
        "annotations/AIST.RWC-MDB-C-2001.BEAT/RM-C003.BEAT.TXT",
        "piece_number": "No. 3",
        "suffix": "M01",
        "track_number": "Tr. 03",
        "title": "Symphony no.5 in C minor, op.67. 1st mvmt.",
        "composer": "Beethoven, Ludwig van",
        "artist": "Tokyo City Philharmonic Orchestra",
        "duration": 435,
        "category": "Symphony",
    }

    expected_property_types = {
        "beats": annotations.BeatData,
        "sections": annotations.SectionData,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2, )
Esempio n. 19
0
def test_track():
    default_trackid = "01-D_AMairena"
    dataset = tonas.Dataset(TEST_DATA_HOME)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "singer": "En el barrio de Triana",
        "style": "Debla",
        "title": "Antonio Mairena",
        "tuning_frequency": 451.0654725341684,
        "f0_path":
        "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.f0.Corrected",
        "notes_path":
        "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.notes.Corrected",
        "audio_path":
        "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.wav",
        "track_id": "01-D_AMairena",
    }

    expected_property_types = {
        "f0": tonas.F0Data,
        "notes": tonas.NoteData,
        "audio": tuple,
        "singer": str,
        "style": str,
        "title": str,
        "tuning_frequency": float,
    }

    run_track_tests(track, expected_attributes, expected_property_types)
Esempio n. 20
0
def test_track():
    default_trackid = "MusicDelta_Beethoven"
    data_home = "tests/resources/mir_datasets/medleydb_melody"
    track = medleydb_melody.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        "track_id": "MusicDelta_Beethoven",
        "audio_path": "tests/resources/mir_datasets/" +
        "medleydb_melody/audio/MusicDelta_Beethoven_MIX.wav",
        "melody1_path": "tests/resources/mir_datasets/" +
        "medleydb_melody/melody1/MusicDelta_Beethoven_MELODY1.csv",
        "melody2_path": "tests/resources/mir_datasets/" +
        "medleydb_melody/melody2/MusicDelta_Beethoven_MELODY2.csv",
        "melody3_path": "tests/resources/mir_datasets/" +
        "medleydb_melody/melody3/MusicDelta_Beethoven_MELODY3.csv",
        "artist": "MusicDelta",
        "title": "Beethoven",
        "genre": "Classical",
        "is_excerpt": True,
        "is_instrumental": True,
        "n_sources": 18,
    }

    expected_property_types = {
        "melody1": utils.F0Data,
        "melody2": utils.F0Data,
        "melody3": utils.MultipitchData,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2, )
Esempio n. 21
0
def test_track():
    default_trackid = 'RM-C003'
    data_home = 'tests/resources/mir_datasets/RWC-Classical'
    track = rwc_classical.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'track_id': 'RM-C003',
        'audio_path': 'tests/resources/mir_datasets/RWC-Classical/' +
        'audio/rwc-c-m01/3.wav',
        'sections_path': 'tests/resources/mir_datasets/RWC-Classical/' +
        'annotations/AIST.RWC-MDB-C-2001.CHORUS/RM-C003.CHORUS.TXT',
        'beats_path': 'tests/resources/mir_datasets/RWC-Classical/' +
        'annotations/AIST.RWC-MDB-C-2001.BEAT/RM-C003.BEAT.TXT',
        'piece_number': 'No. 3',
        'suffix': 'M01',
        'track_number': 'Tr. 03',
        'title': 'Symphony no.5 in C minor, op.67. 1st mvmt.',
        'composer': 'Beethoven, Ludwig van',
        'artist': 'Tokyo City Philharmonic Orchestra',
        'duration': 435,
        'category': 'Symphony',
    }

    expected_property_types = {
        'beats': utils.BeatData,
        'sections': utils.SectionData
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    y, sr = track.audio
    assert sr == 44100
    assert y.shape == (44100 * 2, )
Esempio n. 22
0
def test_track():
    default_trackid = "113"
    data_home = "tests/resources/mir_datasets/giantsteps_tempo"
    dataset = giantsteps_tempo.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "audio_path":
        "tests/resources/mir_datasets/giantsteps_tempo/audio/28952.LOFI.mp3",
        "annotation_v1_path":
        "tests/resources/mir_datasets/giantsteps_tempo/giantsteps-tempo-dataset"
        "-0b7d47ba8cae59d3535a02e3db69e2cf6d0af5bb/annotations/jams/28952.LOFI.jams",
        "annotation_v2_path":
        "tests/resources/mir_datasets/giantsteps_tempo/giantsteps-tempo-dataset"
        "-0b7d47ba8cae59d3535a02e3db69e2cf6d0af5bb/annotations_v2/jams/28952.LOFI.jams",
        "title":
        "28952",
        "track_id":
        "113",
    }

    expected_property_types = {
        "tempo": annotations.TempoData,
        "tempo_v2": annotations.TempoData,
        "genre": str,
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    audio, sr = track.audio
    assert sr == 22050, "sample rate {} is not 22050".format(sr)
    print(audio.shape)
Esempio n. 23
0
def test_track():
    default_trackid = 'drummer1/eval_session/1'
    data_home = 'tests/resources/mir_datasets/Groove MIDI'
    track = groove_midi.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'drummer':
        'drummer1',
        'session':
        'drummer1/eval_session',
        'track_id':
        'drummer1/eval_session/1',
        'style':
        'funk/groove1',
        'bpm':
        138,
        'beat_type':
        'beat',
        'time_signature':
        '4-4',
        'midi_filename':
        'drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid',
        'audio_filename':
        'drummer1/eval_session/1_funk-groove1_138_beat_4-4.wav',
        'midi_path':
        os.path.join(data_home,
                     'drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid'),
        'audio_path':
        os.path.join(data_home,
                     'drummer1/eval_session/1_funk-groove1_138_beat_4-4.wav'),
        'duration':
        27.872308,
        'split':
        'test',
    }

    assert track._track_paths == {
        'audio': [
            'drummer1/eval_session/1_funk-groove1_138_beat_4-4.wav',
            '7f94a191506f70ac9d313b7978203c3c',
        ],
        'midi': [
            'drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid',
            'b01a609cee84cfbc2c154bb9b6566955',
        ],
    }

    run_track_tests(track, expected_attributes, {})

    # test audio loading functions
    audio, sr = track.audio
    assert sr == 22050
    assert audio.shape == (613566, )

    # test midi loading functions
    midi_data = track.midi
    assert len(midi_data.instruments) == 1
    assert len(midi_data.instruments[0].notes) == 410
    assert midi_data.estimate_tempo() == 198.7695135305443
    assert midi_data.get_piano_roll().shape == (128, 2787)
Esempio n. 24
0
def test_track():
    default_trackid = "10161_chorus"
    data_home = "tests/resources/mir_datasets/ikala"
    dataset = ikala.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "track_id":
        "10161_chorus",
        "audio_path":
        "tests/resources/mir_datasets/ikala/" + "Wavfile/10161_chorus.wav",
        "song_id":
        "10161",
        "section":
        "chorus",
        "singer_id":
        "1",
        "f0_path":
        "tests/resources/mir_datasets/ikala/PitchLabel/10161_chorus.pv",
        "lyrics_path":
        "tests/resources/mir_datasets/ikala/Lyrics/10161_chorus.lab",
    }

    expected_property_types = {
        "f0": annotations.F0Data,
        "lyrics": annotations.LyricData,
        "vocal_audio": tuple,
        "instrumental_audio": tuple,
        "mix_audio": tuple,
    }

    assert track._track_paths == {
        "audio":
        ["Wavfile/10161_chorus.wav", "278ae003cb0d323e99b9a643c0f2eeda"],
        "pitch":
        ["PitchLabel/10161_chorus.pv", "0d93a011a9e668fd80673049089bbb14"],
        "lyrics":
        ["Lyrics/10161_chorus.lab", "79bbeb72b422056fd43be4e8d63319ce"],
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    vocal, sr_vocal = track.vocal_audio
    assert sr_vocal == 44100
    assert vocal.shape == (44100 * 2, )

    instrumental, sr_instrumental = track.instrumental_audio
    assert sr_instrumental == 44100
    assert instrumental.shape == (44100 * 2, )

    # make sure we loaded the correct channels to vocal/instrumental
    # (in this example, the first quarter second has only instrumentals)
    assert np.mean(np.abs(vocal[:8820])) < np.mean(np.abs(instrumental[:8820]))

    mix, sr_mix = track.mix_audio
    assert sr_mix == 44100
    assert mix.shape == (44100 * 2, )
    assert np.array_equal(mix, instrumental + vocal)
def test_multitrack():
    default_trackid = "beethoven"
    data_home = "tests/resources/mir_datasets/phenicx_anechoic"
    dataset = phenicx_anechoic.Dataset(data_home)
    mtrack = dataset.multitrack(default_trackid)
    # import pdb;pdb.set_trace()
    expected_attributes = {
        "mtrack_id": "beethoven",
        "track_audio_property": "audio",
        "track_ids": [
            "beethoven-horn",
            "beethoven-doublebass",
            "beethoven-violin",
            "beethoven-bassoon",
            "beethoven-flute",
            "beethoven-clarinet",
            "beethoven-viola",
            "beethoven-oboe",
            "beethoven-cello",
            "beethoven-trumpet",
        ],
        "instruments": {
            "horn": "beethoven-horn",
            "doublebass": "beethoven-doublebass",
            "violin": "beethoven-violin",
            "bassoon": "beethoven-bassoon",
            "flute": "beethoven-flute",
            "clarinet": "beethoven-clarinet",
            "viola": "beethoven-viola",
            "oboe": "beethoven-oboe",
            "cello": "beethoven-cello",
            "trumpet": "beethoven-trumpet",
        },
        "sections": {
            "brass": ["beethoven-horn", "beethoven-trumpet"],
            "strings": [
                "beethoven-doublebass",
                "beethoven-violin",
                "beethoven-viola",
                "beethoven-cello",
            ],
            "woodwinds": [
                "beethoven-bassoon",
                "beethoven-flute",
                "beethoven-clarinet",
                "beethoven-oboe",
            ],
        },
        "piece": "beethoven",
    }

    expected_property_types = {
        "tracks": dict,
        "track_audio_property": str,
    }

    run_track_tests(mtrack, expected_attributes, expected_property_types)
    run_multitrack_tests(mtrack)
Esempio n. 26
0
def test_track():
    default_trackid = "1"
    default_trackid_train = "0189__2"
    data_home = "tests/resources/mir_datasets/irmas"
    dataset = irmas.Dataset(data_home)
    track = dataset.track(default_trackid)
    track_train = dataset.track(default_trackid_train)
    expected_attributes = {
        "annotation_path":
        "tests/resources/mir_datasets/irmas/IRMAS-TestingData-Part1/Part1/" +
        "02 - And The Body Will Die-8.txt",
        "audio_path":
        "tests/resources/mir_datasets/irmas/IRMAS-TestingData-Part1/Part1/" +
        "02 - And The Body Will Die-8.wav",
        "track_id":
        "1",
        "predominant_instrument":
        None,
        "genre":
        None,
        "drum":
        None,
        "train":
        False,
    }
    expected_attributes_train = {
        "annotation_path":
        "tests/resources/mir_datasets/irmas/IRMAS-TrainingData/cla/" +
        "[cla][cla]0189__2.wav",
        "audio_path":
        "tests/resources/mir_datasets/irmas/IRMAS-TrainingData/cla/" +
        "[cla][cla]0189__2.wav",
        "track_id":
        "0189__2",
        "predominant_instrument":
        "cla",
        "genre":
        "cla",
        "drum":
        None,
        "train":
        True,
    }

    expected_property_test_types = {
        "instrument": list,
        "audio": tuple,
    }

    run_track_tests(track, expected_attributes, expected_property_test_types)
    run_track_tests(track_train, expected_attributes_train,
                    expected_property_test_types)

    audio, sr = track.audio
    assert sr == 44100
    assert len(audio) == 2
    assert len(audio[1, :]) == 88200
def test_track():
    default_trackid = "drummer1/eval_session/1"
    data_home = "tests/resources/mir_datasets/groove_midi"
    dataset = groove_midi.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "drummer": "drummer1",
        "session": "drummer1/eval_session",
        "track_id": "drummer1/eval_session/1",
        "style": "funk/groove1",
        "tempo": 138,
        "beat_type": "beat",
        "time_signature": "4-4",
        "midi_filename": "drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid",
        "audio_filename": "drummer1/eval_session/1_funk-groove1_138_beat_4-4.wav",
        "midi_path": os.path.join(
            data_home, "drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid"
        ),
        "audio_path": os.path.join(
            data_home, "drummer1/eval_session/1_funk-groove1_138_beat_4-4.wav"
        ),
        "duration": 27.872308,
        "split": "test",
    }

    expected_property_types = {
        "beats": annotations.BeatData,
        "drum_events": annotations.EventData,
        "midi": pretty_midi.PrettyMIDI,
        "audio": tuple,
    }

    assert track._track_paths == {
        "audio": [
            "drummer1/eval_session/1_funk-groove1_138_beat_4-4.wav",
            "7f94a191506f70ac9d313b7978203c3c",
        ],
        "midi": [
            "drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid",
            "b01a609cee84cfbc2c154bb9b6566955",
        ],
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    audio, sr = track.audio
    assert sr == 22050
    assert audio.shape == (613566,)

    # test midi loading functions
    midi_data = track.midi
    assert len(midi_data.instruments) == 1
    assert len(midi_data.instruments[0].notes) == 410
    assert midi_data.estimate_tempo() == 198.7695135305443
    assert midi_data.get_piano_roll().shape == (128, 2787)
Esempio n. 28
0
def test_track():
    default_trackid = '10161_chorus'
    data_home = 'tests/resources/mir_datasets/iKala'
    track = ikala.Track(default_trackid, data_home=data_home)

    expected_attributes = {
        'track_id':
        '10161_chorus',
        'audio_path':
        'tests/resources/mir_datasets/iKala/' + 'Wavfile/10161_chorus.wav',
        'song_id':
        '10161',
        'section':
        'chorus',
        'singer_id':
        '1',
        'f0_path':
        'tests/resources/mir_datasets/iKala/PitchLabel/10161_chorus.pv',
        'lyrics_path':
        'tests/resources/mir_datasets/iKala/Lyrics/10161_chorus.lab',
    }

    expected_property_types = {
        'f0': utils.F0Data,
        'lyrics': utils.LyricData,
    }

    assert track._track_paths == {
        'audio':
        ['Wavfile/10161_chorus.wav', '278ae003cb0d323e99b9a643c0f2eeda'],
        'pitch':
        ['PitchLabel/10161_chorus.pv', '0d93a011a9e668fd80673049089bbb14'],
        'lyrics':
        ['Lyrics/10161_chorus.lab', '79bbeb72b422056fd43be4e8d63319ce'],
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    vocal, sr_vocal = track.vocal_audio
    assert sr_vocal == 44100
    assert vocal.shape == (44100 * 2, )

    instrumental, sr_instrumental = track.instrumental_audio
    assert sr_instrumental == 44100
    assert instrumental.shape == (44100 * 2, )

    # make sure we loaded the correct channels to vocal/instrumental
    # (in this example, the first quarter second has only instrumentals)
    assert np.mean(np.abs(vocal[:8820])) < np.mean(np.abs(instrumental[:8820]))

    mix, sr_mix = track.mix_audio
    assert sr_mix == 44100
    assert mix.shape == (44100 * 2, )
    assert np.array_equal(mix, instrumental + vocal)
Esempio n. 29
0
def test_track():
    default_trackid = '1'
    default_trackid_train = '0189__2'
    data_home = "tests/resources/mir_datasets/irmas"
    track = irmas.Track(default_trackid, data_home=data_home)
    track_train = irmas.Track(default_trackid_train, data_home=data_home)
    expected_attributes = {
        'annotation_path':
        "tests/resources/mir_datasets/irmas/IRMAS-TestingData-Part1/Part1/" +
        "02 - And The Body Will Die-8.txt",
        'audio_path':
        "tests/resources/mir_datasets/irmas/IRMAS-TestingData-Part1/Part1/" +
        "02 - And The Body Will Die-8.wav",
        'track_id':
        '1',
        'predominant_instrument':
        None,
        'genre':
        None,
        'drum':
        None,
        'train':
        False
    }
    expected_attributes_train = {
        'annotation_path':
        "tests/resources/mir_datasets/irmas/IRMAS-TrainingData/cla/" +
        "[cla][cla]0189__2.wav",
        'audio_path':
        "tests/resources/mir_datasets/irmas/IRMAS-TrainingData/cla/" +
        "[cla][cla]0189__2.wav",
        'track_id':
        '0189__2',
        'predominant_instrument':
        'cla',
        'genre':
        'cla',
        'drum':
        None,
        'train':
        True
    }

    expected_property_test_types = {'instrument': list}

    run_track_tests(track, expected_attributes, expected_property_test_types)
    run_track_tests(track_train, expected_attributes_train,
                    expected_property_test_types)

    audio, sr = track.audio
    assert sr == 44100
    assert len(audio) == 2
    assert len(audio[1, :]) == 882000
Esempio n. 30
0
def test_track():
    default_trackid = "2018/MIDI-Unprocessed_Chamber3_MID--AUDIO_10_R3_2018_wav--1"
    data_home = "tests/resources/mir_datasets/maestro"
    dataset = maestro.Dataset(data_home)
    track = dataset.track(default_trackid)

    expected_attributes = {
        "track_id":
        "2018/MIDI-Unprocessed_Chamber3_MID--AUDIO_10_R3_2018_wav--1",
        "midi_path":
        os.path.join(
            data_home,
            "2018/MIDI-Unprocessed_Chamber3_MID--AUDIO_10_R3_2018_wav--1.midi",
        ),
        "audio_path":
        os.path.join(
            data_home,
            "2018/MIDI-Unprocessed_Chamber3_MID--AUDIO_10_R3_2018_wav--1.wav"),
        "canonical_composer":
        "Alban Berg",
        "canonical_title":
        "Sonata Op. 1",
        "year":
        2018,
        "duration":
        698.661160312,
        "split":
        "train",
    }

    expected_property_types = {
        "notes": annotations.NoteData,
        "midi": pretty_midi.PrettyMIDI,
        "audio": tuple,
    }

    assert track._track_paths == {
        "audio": [
            "2018/MIDI-Unprocessed_Chamber3_MID--AUDIO_10_R3_2018_wav--1.wav",
            "1694d8431f01eeb2a18444196550b99d",
        ],
        "midi": [
            "2018/MIDI-Unprocessed_Chamber3_MID--AUDIO_10_R3_2018_wav--1.midi",
            "4901b1578ee4fe8c1696e02f60924949",
        ],
    }

    run_track_tests(track, expected_attributes, expected_property_types)

    # test audio loading functions
    audio, sr = track.audio
    assert sr == 48000
    assert audio.shape == (48000 * 2, )