Пример #1
0
    def test_read_chord_annotations(self):
        chords = load_chords(pj(DETECTIONS_PATH,
                             'sample2.dc_chord_recognition.txt'))
        _compare_labels(self, chords,
                        np.array([(0.0, 1.6, 'F:maj'),
                                  (1.6, 2.5, 'A:maj'),
                                  (2.5, 4.1, 'D:maj')],
                                 dtype=SEGMENT_DTYPE))

        chords = load_chords(pj(DETECTIONS_PATH,
                             'sample.dc_chord_recognition.txt'))
        _compare_labels(self, chords,
                        np.array([(0.0, 2.9, 'G#:maj')],
                                 dtype=SEGMENT_DTYPE))
Пример #2
0
 def test_txt(self):
     for sf, true_act, true_res in zip([sample_file, sample2_file],
                                       self.activations, self.results):
         # save activations as txt file
         run_save(self.bin, sf, tmp_act, args=['--sep', ' '])
         act = Activations(tmp_act, sep=' ', fps=100)
         self.assertTrue(np.allclose(act, true_act, atol=1e-5))
         # reload from file
         run_load(self.bin, tmp_act, tmp_result, args=['--sep', ' '])
         self._check_results(load_chords(tmp_result), true_res)
Пример #3
0
 def test_binary(self):
     for sf, true_act, true_res in zip([sample_file, sample2_file],
                                       self.activations, self.results):
         # save activations as binary file
         run_save(self.bin, sf, tmp_act)
         act = Activations(tmp_act)
         self.assertTrue(np.allclose(act, true_act, atol=1e-5))
         self.assertEqual(act.fps, true_act.fps)
         # reload from file
         run_load(self.bin, tmp_act, tmp_result)
         self._check_results(load_chords(tmp_result), true_res)
Пример #4
0
 def setUp(self):
     self.bin = pj(program_path, "DCChordRecognition")
     self.activations = [
         Activations(pj(ACTIVATIONS_PATH, af))
         for af in ['sample.deep_chroma.npz', 'sample2.deep_chroma.npz']
     ]
     self.results = [
         load_chords(pj(DETECTIONS_PATH, df))
         for df in ['sample.dc_chord_recognition.txt',
                    'sample2.dc_chord_recognition.txt']
     ]
Пример #5
0
 def test_run(self):
     for sf, true_res in zip([sample_file, sample2_file], self.results):
         run_single(self.bin, sf, tmp_result)
         self._check_results(load_chords(tmp_result), true_res)
Пример #6
0
import unittest
from os.path import join as pj

from madmom.features import Activations
from madmom.features.chords import *
from madmom.io import load_chords
from . import ACTIVATIONS_PATH, AUDIO_PATH, DETECTIONS_PATH

sample_files = [pj(AUDIO_PATH, sf) for sf in ['sample.wav', 'sample2.wav']]

sample_cnn_acts = [Activations(pj(ACTIVATIONS_PATH, af))
                   for af in ['sample.cnn_chord_features.npz',
                              'sample2.cnn_chord_features.npz']]

sample_cnn_labels = [load_chords(pj(DETECTIONS_PATH, df))
                     for df in ['sample.cnn_chord_recognition.txt',
                                'sample2.cnn_chord_recognition.txt']]

sample_deep_chroma_acts = [Activations(pj(ACTIVATIONS_PATH, af))
                           for af in ['sample.deep_chroma.npz',
                                      'sample2.deep_chroma.npz']]

sample_deep_chroma_labels = [load_chords(pj(DETECTIONS_PATH, df))
                             for df in ['sample.dc_chord_recognition.txt',
                                        'sample2.dc_chord_recognition.txt']]


def _compare_labels(test_case, labels, reference_labels):
    test_case.assertTrue(
        np.allclose(labels['start'], reference_labels['start']))