Пример #1
0
cl_freq = 440
cl_pitch_label = "?"
cl_samp_rate = 44100

sine_samp_rate = 800

signal_params = {"duration": duration, "options": {"normalize": True}}

#
# Generate signals
#

# Clarinet signal using predefined amplitudes
cl_signal = sts.ClarinetApproxSignal(freq=cl_freq,
                                     samp_rate=cl_samp_rate,
                                     **signal_params)

# Superposition of two sines
sine_signal = sts.StationarySignal(sin_freqs=[25, 80],
                                   sin_coeffs=[1, 0.5],
                                   samp_rate=sine_samp_rate,
                                   **signal_params)

# FFT of clarinet
cl_fft = fft(cl_signal.data)
cl_recon = ifft(cl_fft)

# Only consider positive frequencies (since the spectrum is symmetric)
cl_slice_num = cl_signal.samp_nums.shape[0] // 2
# Nyquist: max discernable freq is sampling rate / 2
Пример #2
0
short_right_sep_names = [n.split(" ")[0] for n in right_sep_names]
short_sep_names = short_left_sep_names + short_right_sep_names

# Generate signal
chords = [
    m21.chord.Chord((soprano[i], alto[i], tenor[i], bass[i]))
    for i in range(len(soprano))
]

# row = chord, column = melody
chord_freqs = np.array([[pitch.frequency for pitch in chord.pitches]
                        for chord in chords])

note_sigs = [[
    sts.ClarinetApproxSignal(freq=freq,
                             duration=note_duration,
                             samp_rate=samp_rate) for freq in chord_freqs.T[i]
] for i in range(len(chord_freqs))]

melody_data = np.array([nst.NonStationarySignal(ns).data for ns in note_sigs])

left_signal = np.sum((left_intensities * melody_data.T).T, axis=0)
right_signal = np.sum(((1 - left_intensities) * melody_data.T).T, axis=0)
src = np.array((left_signal, right_signal))

_, left_recons, right_recons, extra = adress.adress(
    left_signal=left_signal,
    right_signal=right_signal,
    samp_rate=samp_rate,
    left_ds=left_ds,
    left_Hs=left_Hs,
Пример #3
0
#
# Computation
#

# Format names
short_sep_names = [n.split(" ")[0] for n in sep_names]

# Generate signal
chords = [m21.chord.Chord((soprano[i], alto[i], tenor[i], bass[i])) for i in range(len(soprano))]

# row = chord, column = melody
chord_freqs = np.array([[pitch.frequency for pitch in chord.pitches] for chord in chords])


note_sigs = [[sts.ClarinetApproxSignal(freq=freq,
                                       duration=note_duration,
                                       samp_rate=samp_rate)
             for freq in chord_freqs.T[i]]
             for i in range(len(chord_freqs))]

melody_data = np.array([nst.NonStationarySignal(ns).data for ns in note_sigs])

left_signal = np.sum((left_intensities * melody_data.T).T, axis=0)
right_signal = np.sum(((1 - left_intensities) * melody_data.T).T, axis=0)
src = np.array((left_signal, right_signal))

# Compute the audio at each position
true_signals = {}
for li in set(left_intensities):
    part_args = np.where(left_intensities == li)   # Which parts have the same intensity?
    true_signals[li] = np.sum(melody_data[part_args], axis=0)
Пример #4
0
signal_params = {
    "duration": duration,
    "samp_rate": samp_rate,
    # "freq": cl_freq,
    "options": {
        "normalize": True
    }
}

#
# Generate signals
#

# Generate clarinet signal using predefined amplitudes
cl_signal = sts.ClarinetApproxSignal(freq=cl_freq, **signal_params)

# Windowing the clarinet signal
w = window(cl_signal.samp_nums.shape[0], **window_params)
cl_window = cl_signal.data * w

# Generate clarinet Fourier Transform
cl_fft = fft(cl_signal.data)
cl_window_fft = fft(cl_window)

# Reconstructed signals from FFT
cl_ifft = ifft(cl_fft)
cl_window_ifft = ifft(cl_window_fft)

# Only consider positive frequencies (since the spectrum is symmetric)
cl_slice_num = cl_signal.samp_nums.shape[0] // 2
Пример #5
0
freqs = notes.major_scale(scale_name=scale_name,
                          n_notes=n_notes,
                          output="freq")
durations = [0.25] * n_notes
chop_ranges = [None] * n_notes

window = "blackmanharris"

#
# Generate data
#

# Generate signal
signals = [
    sts.ClarinetApproxSignal(freq=freqs[i],
                             duration=durations[i],
                             samp_rate=samp_rate,
                             chop_range=chop_ranges[i])
    for i in range(len(freqs))
]
nst = nsts.NonStationarySignal(signals)

# Compute FFT to demonstrate that FFT does not retain time information.
nst_fft = fft(nst.data)
nst_fft_x = fftfreq(nst.samp_nums.shape[0], 1 / nst.samp_rate)

# Only consider positive frequencies (since the spectrum is symmetric)
nst_slice_num = nst.samp_nums.shape[0] // 2

# Index of the max frequency present in the signal (less than some tolerance)
# cond = np.abs(nst_fft)[:nst_slice_num] / nst.samp_rate < freq_absence_tol
# fft_max_present_freq_idx = len(cond) - np.where(cond == False)[0][-1] - 1
Пример #6
0
duration = 0.01
samp_rate = 44100

cl_f = 466.16 / 2  # written pitch C4, actual pitch is Bb4
cl_pitch_label = "C4"

signal_params = {
    "duration": duration,
    "samp_rate": samp_rate,
    "freq": cl_f,
    "options": {
        "normalize": True
    }
}

# Generate clarinet signal using predefined amplitudes
cl_signal = pcs.ClarinetApproxSignal(**signal_params)

# Figure options
cl_fig = FigData(xs=cl_signal.samp_nums,
                 ys=[cl_signal.data],
                 line_options=[{"label": f"f0 = {cl_f}Hz"}],
                 xlabel="Time (seconds",
                 ylabel="Amplitude",
                 title=f"Synthesized clarinet (written pitch {cl_pitch_label})",
                 figsize=(5, 3)
                 )

# Plot signals
aplot.single_plot(cl_fig)