Beispiel #1
0
    def save(self,
             save_audio=False,
             save_array=True,
             save_image=True,
             save_info=True,
             naming_by="midi_number"):
        # The output_name for the saving data
        if naming_by == "midi_number":
            output_name = str(self.note_number) + "_" + str(round(self.duration, 3)) \
                          + "_" + str(self.velocity)
        elif naming_by == "nameWithOctave":
            output_name = self.pitch.nameWithOctave + "_" + str(round(self.duration, 3)) \
                          + "_" + str(self.velocity)
        else:
            raise Exception("Parameter naming_by not understood.")

        # Save the audio
        if save_audio:
            wav.write(
                Path(SAMPLES_AUDIO_PATH) / Path(output_name + '.wav'), FS,
                self.signal)

        # Save array
        if save_array:
            np.save(Path(SAMPLES_ARRAYS_PATH) /
                    Path(output_name + '_spectrogram' + '.npy'),
                    self.spectrogram_log,
                    allow_pickle=True)

        # Save image
        if save_image:
            plot_cqt(self.spectrogram_log,
                     self.time_vector,
                     fig_title="Sample " + str(self),
                     show=False)
            plt.savefig(Path(SAMPLES_IMAGES_PATH) / Path(output_name + '.png'),
                        dpi=DPI,
                        format='png')
            plt.close()
        # Save info
        if save_info:
            np.save(Path(SAMPLES_INFO_PATH) /
                    Path(output_name + '_distribution' + '.npy'),
                    self.partials_distribution.linear_regressions,
                    allow_pickle=True)
            np.save(Path(SAMPLES_INFO_PATH) /
                    Path(output_name + '_bins' + '.npy'),
                    self.partials_bins,
                    allow_pickle=True)
            np.save(Path(SAMPLES_INFO_PATH) /
                    Path(output_name + '_amplitudes' + '.npy'),
                    self.partials_amplitudes,
                    allow_pickle=True)
Beispiel #2
0
def example_1():
    _sta = time.time()
    _signal = signal_from_file('anastasia')
    _end = time.time()
    log.info("Time to recover signal: " + str(round(_end - _sta, 3)) + " seconds.")

    _sta = time.time()
    _spectrogram, _time_vector = cqt(_signal)
    _end = time.time()
    log.info("Time compute the CQT: " + str(round(_end - _sta, 3)) + " seconds.")

    _sta = time.time()
    plot_cqt(_spectrogram, _time_vector)
    _end = time.time()
    log.info("Time to plot: " + str(round(_end - _sta, 3)) + " seconds.")
Beispiel #3
0
            harmonic=True,
            frequency_decay_dependency=decay)
        samples_set = SamplesSet.from_synthesis(partials_distribution)

        return samples_set
    else:
        raise ValueError('Invalid parameter samples_type.')


if __name__ == '__main__':
    _samples_set = get_samples_set('basic')

    _piece = midi2piece('tempest_3rd-start')

    _signal = _samples_set.synthesize(_piece)
    _spectrogram, _time_vector = cqt(_signal)

    plot_cqt(_spectrogram, _time_vector)

    play = False

    if play:
        sd.play(_signal, FS)

    # _samples_name = 'samples'
    # _instrument = "MyPiano"
    #
    # # _samples_set = SamplesSet.from_directory("MyPiano", "samples", load_all=LOAD_ALL)
    # _samples_set = SamplesSet.from_midi_file("MyPiano", "samples", save_audio=True, save_array=True,
    #                                          save_image=True, save_info=True)
def cqt(signal, numpy=True, db=True):
    time_array = np.arange(np.ceil(
        signal.size / HOP_LENGTH).astype(int)) / (FS / HOP_LENGTH)

    signal_tensor = torch.tensor(signal, device=DEVICE, dtype=torch.float)
    cqt_tensor = cqt_layer(signal_tensor, normalization_type='wrap')

    if db:
        cqt_tensor = 20 * torch.log10(cqt_tensor + EPS)

    if numpy:
        cqt_array = cqt_tensor.cpu().numpy()[0, :, :]
        torch.cuda.empty_cache()
        return cqt_array, time_array
    else:
        # time_tensor = torch.tensor(time_array)
        return cqt_tensor[0, :, :], time_array


if __name__ == '__main__':
    _signal = np.zeros(FS * 2)
    dirac_width = 1

    for i in range(dirac_width):
        _signal[i::int(FS * 0.033)] = 1

    _signal += 0.1 * np.sin(2 * np.pi * 440 * np.arange(FS * 2) / FS)

    _spectrogram, _time_array = cqt(_signal)
    plot_cqt(_spectrogram, _time_array, v_min=-100)
Beispiel #5
0
sample_name = "A2_12.787_113"

# Partials
fundamental_bin = 2 * BINS_PER_OCTAVE

partials_pos = fundamental_bin + np.round(
    np.log2(np.arange(N_PARTIALS) + 1) * BINS_PER_OCTAVE).astype(int)

start = 2  # in seconds
end = 6  # in seconds
_signal = signal_from_file(sample_name, SAMPLES_AUDIO_PATH)
_spectrogram, _time_vector = cqt(
    _signal, numpy=True)[:,
                         np.floor(start / TIME_RESOLUTION).astype(int):np.
                         ceil(end / TIME_RESOLUTION).astype(int)]
plot_cqt(_spectrogram, _time_vector, fig_title=sample_name)

a_erosion_1 = erode(_spectrogram, _strel_1, _origin_1)
plot_cqt(a_erosion_1,
         _time_vector,
         fig_title="Erosion - 1",
         v_min=-40,
         c_map='Greys')

# Spectrogram
partials_amplitudes = _spectrogram[partials_pos, :]
partials_distribution = partials_amplitudes[:, 0] - np.max(
    partials_amplitudes[:, 0])

plt.figure()
for i in range(len(partials_pos)):
Beispiel #6
0

if __name__ == '__main__':
    # Parameters
    play = True

    # Create the signal
    sta = time.time()
    samples_set = get_samples_set('basic')
    end = time.time()
    log.info("Time to create samples set: " + str(round(end - sta, 3)) + " seconds.")

    sta = time.time()
    piece = midi2piece('prelude_em')
    signal = samples_set.synthesize(piece)
    end = time.time()
    log.info("Time to synthesize the signal: " + str(round(end - sta, 3)) + " seconds.")

    # Time-frequency transform of the signal
    sta = time.time()
    spectrogram, time_vector = cqt(signal)
    end = time.time()
    log.info("Time to compute the CQT of the signal: " + str(round(end - sta, 3)) + " seconds.")

    if play:
        sd.play(signal, FS)
    plot_cqt(spectrogram, time_vector)

    # Morphological transform of the signal

Beispiel #7
0
from time_frequency import cqt
import sounddevice as sd
from parameters import *
from utils import to_db, db_to_velocity
from librosa import hz_to_midi
from tqdm import tqdm
from nnMorpho.operations import erosion, dilation, opening, closing

samples_set = get_samples_set('basic')

piece = midi2piece('prelude_em')

signal = samples_set.synthesize(piece)
spectrogram, time_vector = cqt(signal, numpy=False)

plot_cqt(spectrogram, time_vector, numpy=False)

# sd.play(signal, FS)

# Leakage
structural_element_leakage = torch.tensor([[-5.8], [0], [-6]], device=DEVICE)
origin_leakage = (1, 0)

erosion_leakage = erosion(spectrogram,
                          structural_element_leakage,
                          origin_leakage,
                          border_value='euclidean')
plot_cqt(erosion_leakage,
         time_vector,
         fig_title="Erosion of the leakage",
         numpy=False)