Ejemplo n.º 1
0
    def test_without_last_frame(self):
        true = np.ones((103,)) * 2
        true[10:20] = np.zeros((10,))
        true[45:55] = np.zeros((10,))
        predicted = np.ones((103,)) * (-2)
        predicted[50:60] = np.zeros((10,))
        predicted[70:80] = np.zeros((10,))
        predicted[95:103] = np.zeros(8, )
        window_size = 10
        hop_size = 5

        pes, eps, silent_true_source_frames, silent_prediction_frames = eval_silent_frames(true, predicted, window_size,
                                                                                           hop_size, False, False)

        silent_true_source_frames_expected = [2, 9]
        silent_prediction_frames_expected = [10, 14]

        pes_expected = np.array([10 * np.log10(10 * 4 + 10 ** (-12)), 10 * np.log10(5 * 4 + 10 ** (-12))])
        eps_expected = np.array([10 * np.log10(5 * 4 + 10 ** (-12)), 10 * np.log10(10 * 4 + 10 ** (-12))])

        correct_pes = np.array_equal(pes, pes_expected)
        correct_eps = np.array_equal(eps, eps_expected)

        with self.subTest():
            self.assertEqual(correct_pes, True, "PES is not as expected")
        with self.subTest():
            self.assertEqual(correct_eps, True, "EPS is not as expected")
        with self.subTest():
            self.assertEqual(silent_true_source_frames, silent_true_source_frames_expected, "silent true source frames "
                                                                                            "not correctly detected")
        with self.subTest():
            self.assertEqual(silent_prediction_frames, silent_prediction_frames_expected, "Silent prediction frames not"
                                                                                          " correctly detected")
Ejemplo n.º 2
0
    def test_eps_for_silent_target(self):
        true = np.ones((100,)) * 2
        true[50:60] = np.zeros((10,))
        predicted = np.ones((100,)) * 2
        predicted[50:60] = np.zeros((10,))

        window_size = 10
        hop_size = 5

        eps_expected = -120

        pes, eps, silent_true_source_frames, silent_prediction_frames = eval_silent_frames(true, predicted, window_size,
                                                                                           hop_size, False, True)

        self.assertEqual(eps, eps_expected, "EPS does not take silent target into account but it should")
Ejemplo n.º 3
0
    def test_pes_simple(self):
        true = np.ones((40,)) * 2
        true[10:20] = np.zeros((10,))
        true[25:35] = np.zeros((10,))
        predicted = np.ones((40,)) * (-3)

        window_size = 10
        hop_size = 5

        pes, eps, silent_true_source_frames, silent_prediction_frames = eval_silent_frames(true, predicted, window_size,
                                                                                           hop_size,
                                                                                           eval_incomplete_last_frame=False,
                                                                                           eps_for_silent_target=False)
        pes_expected = np.array([10 * np.log10(10 * (-3)**2 + 10**(-12)), 10 * np.log10(10 * (-3)**2 + 10**(-12))])

        correct_pes = np.array_equal(pes, pes_expected)

        self.assertEqual(correct_pes, True, "PES array not as expected")
Ejemplo n.º 4
0
    def test_eps_silent_target_last_frame(self):
        true = np.ones((43,)) * 2
        true[30:43] = np.zeros((13,))
        predicted = np.ones((43,)) * 4
        predicted[5:15] = np.zeros((10,))
        predicted[30:43] = np.zeros((13,))

        window_size = 10
        hop_size = 5

        pes, eps, silent_true_source_frames, silent_prediction_frames = eval_silent_frames(true, predicted, window_size,
                                                                                           hop_size, True, True)

        eps_expected = np.array([10 * np.log10(10 * 2**2 + 10**(-12)), -120, -120])

        correct_eps = np.array_equal(eps, eps_expected)

        self.assertEqual(correct_eps, True, "EPS does not take the last frame into account but it should")
Ejemplo n.º 5
0
    def test_eps_simple(self):
        true = np.ones((43,)) * 2
        predicted = np.ones((43,)) * 4
        predicted[5:15] = np.zeros((10,))
        predicted[30:43] = np.zeros((13,))

        window_size = 10
        hop_size = 5

        pes, eps, silent_true_source_frames, silent_prediction_frames = eval_silent_frames(true, predicted, window_size,
                                                                                           hop_size,
                                                                                           eval_incomplete_last_frame=False,
                                                                                           eps_for_silent_target=False)
        eps_expected = np.array(
            [10 * np.log10(10 * 2 ** 2 + 10 ** (-12)), 10 * np.log10(10 * 2 ** 2 + 10 ** (-12))])

        correct_pes = np.array_equal(eps, eps_expected)

        self.assertEqual(correct_pes, True, "EPS array not as expected")
Ejemplo n.º 6
0
def evaluate(references,
             estimates,
             output_dir,
             track_name,
             sample_rate,
             win=1.0,
             hop=1.0,
             mode='v4'):
    """
    Compute the BSS_eval metrics as well as PES and EPS. It is following the design concept of museval.eval_mus_track
    :param references: dict of reference sources {target_name: signal}, signal has shape: (nb_timesteps, np_channels)
    :param estimates: dict of user estimates {target_name: signal}, signal has shape: (nb_timesteps, np_channels)
    :param output_dir: path to output directory used to save evaluation results
    :param track_name: name that is assigned to TrackStore object for evaluated track
    :param win: evaluation window length in seconds, default 1
    :param hop: evaluation window hop length in second, default 1
    :param sample_rate: sample rate of test tracks (should be same as rate the model has been trained on)
    :param mode: BSSEval version, default to `v4`
    :return:
        bss_eval_data: museval.TrackStore object containing bss_eval evaluation scores
        silent_frames_data: Pandas data frame containing EPS and PES scores
    """

    eval_targets = list(estimates.keys())

    estimates_list = []
    references_list = []
    for target in eval_targets:
        estimates_list.append(estimates[target])
        references_list.append(references[target])

    # eval bass_eval and EPS, PES metrics
    # save in TrackStore object
    bss_eval_data = museval.TrackStore(win=win, hop=hop, track_name=track_name)

    # skip examples with a silent source because BSSeval metrics are not defined in this case
    skip = False
    for target in eval_targets:
        reference_energy = np.sum(references[target]**2)
        estimate_energy = np.sum(estimates[target]**2)
        if reference_energy == 0 or estimate_energy == 0:
            skip = True
            SDR = ISR = SIR = SAR = (np.ones((1, )) * (-np.inf), np.ones(
                (1, )) * (-np.inf))
            print("skip {}, {} source is all zero".format(track_name, target))

    if not skip:

        SDR, ISR, SIR, SAR = museval.evaluate(references_list,
                                              estimates_list,
                                              win=int(win * sample_rate),
                                              hop=int(hop * sample_rate),
                                              mode=mode,
                                              padding=True)

    # add evaluation of ESP and PES
    PES, EPS, _, __ = silent_frames_evaluation.eval_silent_frames(
        true_source=np.array(references_list),
        predicted_source=np.array(estimates_list),
        window_size=int(win * sample_rate),
        hop_size=int(hop * sample_rate))

    # iterate over all targets
    for i, target in enumerate(eval_targets):
        values = {
            "SDR": SDR[i].tolist(),
            "SIR": SIR[i].tolist(),
            "ISR": ISR[i].tolist(),
            "SAR": SAR[i].tolist(),
        }

        bss_eval_data.add_target(target_name=target, values=values)

    silent_frames_data = pd.DataFrame({
        'target': [],
        'PES': [],
        'EPS': [],
        'track': []
    })
    for i, target in enumerate(eval_targets):
        silent_frames_data = silent_frames_data.append(
            {
                'target': target,
                'PES': PES[i],
                'EPS': EPS[i],
                'track': track_name
            },
            ignore_index=True)

    # save evaluation results if output directory is defined
    if output_dir:
        # validate against the schema
        bss_eval_data.validate()

        try:
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            with open(
                    os.path.join(output_dir, track_name.replace('/', '_')) +
                    '.json', 'w+') as f:
                f.write(bss_eval_data.json)
        except (IOError):
            pass

    return bss_eval_data, silent_frames_data