コード例 #1
0
def explain(model, input, output_filename_prefix, sample_rate, output_path):

    batch_size = 1
    if len(input['noisy']) < model.receptive_field_length:
        raise ValueError(
            'Input is not long enough to be used with this model.')

    num_output_samples = input['noisy'].shape[0] - (
        model.receptive_field_length - 1)
    num_fragments = int(np.ceil(num_output_samples /
                                model.target_field_length))
    num_batches = int(np.ceil(num_fragments / batch_size))

    denoised_output = []
    noise_output = []
    num_pad_values = 0
    fragment_i = 0
    for batch_i in tqdm.tqdm(range(0, num_batches)):

        if batch_i == num_batches - 1:  #If its the last batch'
            batch_size = num_fragments - batch_i * batch_size

        input_batch = np.zeros((batch_size, model.input_length))

        #Assemble batch
        for batch_fragment_i in range(0, batch_size):

            if fragment_i + model.target_field_length > num_output_samples:
                remainder = input['noisy'][fragment_i:]
                current_fragment = np.zeros((model.input_length, ))
                current_fragment[:remainder.shape[0]] = remainder
                num_pad_values = model.input_length - remainder.shape[0]
            else:
                current_fragment = input['noisy'][fragment_i:fragment_i +
                                                  model.input_length]

            input_batch[batch_fragment_i, :] = current_fragment
            fragment_i += model.target_field_length

        denoised_output_fragments = model.denoise_batch(
            {'data_input': input_batch})
        layer_outputs = model.get_layer_outputs(input_batch)
        plot_layer_outputs(layer_outputs, 2, output_path)
        if type(denoised_output_fragments) is list:
            noise_output_fragment = denoised_output_fragments[1]
            denoised_output_fragment = denoised_output_fragments[0]

        denoised_output_fragment = denoised_output_fragment[:, model.
                                                            target_padding:
                                                            model.
                                                            target_padding +
                                                            model.
                                                            target_field_length]
        denoised_output_fragment = denoised_output_fragment.flatten().tolist()

        if noise_output_fragment is not None:
            noise_output_fragment = noise_output_fragment[:, model.
                                                          target_padding:model.
                                                          target_padding +
                                                          model.
                                                          target_field_length]
            noise_output_fragment = noise_output_fragment.flatten().tolist()

        if type(denoised_output_fragments) is float:
            denoised_output_fragment = [denoised_output_fragment]
        if type(noise_output_fragment) is float:
            noise_output_fragment = [noise_output_fragment]

        denoised_output = denoised_output + denoised_output_fragment
        noise_output = noise_output + noise_output_fragment

    denoised_output = np.array(denoised_output)
    noise_output = np.array(noise_output)

    if num_pad_values != 0:
        denoised_output = denoised_output[:-num_pad_values]
        noise_output = noise_output[:-num_pad_values]

    valid_noisy_signal = input['noisy'][model.half_receptive_field_length:model
                                        .half_receptive_field_length +
                                        len(denoised_output)]

    if input['clean'] is not None:
        input['noise'] = input['noisy'] - input['clean']

        valid_clean_signal = input['clean'][model.half_receptive_field_length:
                                            model.half_receptive_field_length +
                                            len(denoised_output)]

        noise_in_denoised_output = denoised_output - valid_clean_signal

        rms_clean = util.rms(valid_clean_signal)
        rms_noise_out = util.rms(noise_in_denoised_output)
        rms_noise_in = util.rms(input['noise'])

        new_snr_db = int(np.round(util.snr_db(rms_clean, rms_noise_out)))
        initial_snr_db = int(np.round(util.snr_db(rms_clean, rms_noise_in)))

        output_clean_filename = output_filename_prefix + 'clean.wav'
        output_clean_filepath = os.path.join(output_path,
                                             output_clean_filename)
        util.write_wav(valid_clean_signal, output_clean_filepath, sample_rate)

        output_denoised_filename = output_filename_prefix + 'denoised_%ddB.wav' % new_snr_db
        output_noisy_filename = output_filename_prefix + 'noisy_%ddB.wav' % initial_snr_db
    else:
        output_denoised_filename = output_filename_prefix + 'denoised.wav'
        output_noisy_filename = output_filename_prefix + 'noisy.wav'

    output_noise_filename = output_filename_prefix + 'noise.wav'

    output_denoised_filepath = os.path.join(output_path,
                                            output_denoised_filename)
    output_noisy_filepath = os.path.join(output_path, output_noisy_filename)
    output_noise_filepath = os.path.join(output_path, output_noise_filename)

    util.write_wav(denoised_output, output_denoised_filepath, sample_rate)
    util.write_wav(valid_noisy_signal, output_noisy_filepath, sample_rate)
    util.write_wav(noise_output, output_noise_filepath, sample_rate)
コード例 #2
0
ファイル: test.py プロジェクト: yarinbar/sta_project
def save_result(exp_path, model_dict, dataloader):

    rows = 2
    cols = len(model_dict) // rows

    fig, my_plots = plt.subplots(rows, cols)

    fig.set_figheight(8)
    fig.set_figwidth(16)
    fig.tight_layout(pad=6)

    data = {
        "model": ["out_of_the_box"],
        "l2_loss": [],
        "l1_loss": [],
        "snr_db": []
    }

    l2 = 0
    l1 = 0
    snr_db = 0
    n_batches = len(dataloader)

    for batch, (clean, noisy) in enumerate(dataloader):

        if batch == 0:

            clean = clean.unsqueeze(dim=1)
            noisy = noisy.unsqueeze(dim=1)

            plot_clean = clean[0, 0, :]
            plot_noisy = noisy[0, 0, :]

        l2 += float(F.mse_loss(noisy.float(), clean.float()).data)
        l1 += float(F.l1_loss(noisy.float(), clean.float()).data)
        snr_db += float(util.snr_db(clean.cpu(), noisy.cpu()))

    # adding out of the box loss and snr
    data["l2_loss"].append(l2 / n_batches)
    data["l1_loss"].append(l1 / n_batches)
    data["snr_db"].append(snr_db / n_batches)

    curr_row = 0
    for i, (model_name, model) in enumerate(model_dict.items()):

        l2 = 0
        l1 = 0
        snr_db = 0

        for batch, (clean, noisy) in enumerate(dataloader):

            clean = clean.unsqueeze(dim=1)
            noisy = noisy.unsqueeze(dim=1)

            curr_denoised = model(noisy.float())

            if batch == 0:
                plot_denoised = model(plot_noisy.view((1, 1, -1)).float())

                curr_loss = torch.nn.functional.mse_loss(
                    plot_denoised.view(-1).float(), plot_clean.float())
                curr_plt = my_plots[curr_row][i % cols]

                axis = torch.arange(0, plot_clean.shape[-1], 1)

                curr_plt.plot(axis,
                              plot_noisy.cpu(),
                              label="noisy",
                              color="lightsteelblue")
                curr_plt.plot(axis,
                              plot_clean.cpu(),
                              label="clean",
                              color="yellowgreen")
                curr_plt.plot(axis,
                              plot_denoised.view(-1).detach().cpu(),
                              label=model_name,
                              color="salmon")

                curr_plt.legend()
                curr_plt.axis(xmin=0, xmax=128)
                curr_plt.axis(ymin=-1, ymax=2)

                exp_data = str(exp_path.name).split("_")

                curr_plt.set_title(
                    "{}\ntested on {}, noise std {}\nL2 Loss {:.6f}".format(
                        model_name, exp_data[0], exp_data[-1], curr_loss),
                    fontsize=12)

            l2 += float(F.mse_loss(curr_denoised, clean.float()).data)
            l1 += float(F.l1_loss(curr_denoised, clean.float()).data)
            snr_db += float(util.snr_db(clean.cpu(), curr_denoised.cpu()))

        data["model"].append(model_name)
        data["l2_loss"].append(l2 / n_batches)
        data["l1_loss"].append(l1 / n_batches)
        data["snr_db"].append(snr_db / n_batches)

        if (i + 1) % cols == 0:
            curr_row += 1

    #-------------- DATA LOGGING ------------

    fig_path = exp_path / "result_figure.pdf"
    fig.savefig(fig_path)

    fig_path = exp_path / "result_figure.svg"
    fig.savefig(fig_path)

    csv_path = exp_path / "result_table.csv"
    df = pd.DataFrame(data)
    df.to_csv(csv_path)