コード例 #1
0
def plot():
    # plot onset detection results
    fig = plt.figure(1, figsize=(12, 12))
    plt.subplot(3, 1, 1)
    plt.title('Onset detection with ' + odf.__class__.__name__)
    plt.plot(audio, '0.4')
    plt.subplot(3, 1, 2)
    trplot.plot_detection_function(onset_det.odf, hop_size)
    trplot.plot_detection_function(onset_det.threshold, hop_size, "green")
    plt.subplot(3, 1, 3)
    trplot.plot_onsets(onsets, 1.0)
    plt.plot(audio, '0.4')
    plt.show()
コード例 #2
0
ファイル: ModalPeaks.py プロジェクト: EQ4/EvalOnsets
def plot():
    # plot onset detection results
    fig = plt.figure(1, figsize=(12, 12))
    plt.subplot(3, 1, 1)
    plt.title('Onset detection with ' + odf.__class__.__name__)
    plt.plot(audio, '0.4')
    plt.subplot(3, 1, 2)
    trplot.plot_detection_function(onset_det.odf, hop_size)
    trplot.plot_detection_function(onset_det.threshold, hop_size, "green")
    plt.subplot(3, 1, 3)
    trplot.plot_onsets(onsets, 1.0)
    plt.plot(audio, '0.4')
    plt.show()
コード例 #3
0
i = 0
audio_pos = 0
while audio_pos <= len(audio) - odf.get_frame_size():
    frame = audio[audio_pos:audio_pos + odf.get_frame_size()]
    odf_value = odf.process_frame(frame)
    odf_values.append(odf_value)
    det_results = onset_det.is_onset(odf_value, return_threshold=True)
    if det_results[0]:
        onsets.append(i * odf.get_hop_size())
    threshold.append(det_results[1])
    audio_pos += odf.get_hop_size()
    i += 1
run_time = time.time() - start_time

print "Number of onsets detected:", len(onsets)
print "Running time:", run_time, "seconds"
print "Seconds per frame:", run_time / i

# plot onset detection results
fig = plt.figure(1, figsize=(12, 12))
plt.subplot(3, 1, 1)
plt.title('Real-time onset detection with ' + odf.__class__.__name__)
plt.plot(audio, '0.4')
plt.subplot(3, 1, 2)
trplot.plot_detection_function(odf_values, odf.get_hop_size())
trplot.plot_detection_function(threshold, odf.get_hop_size(), "green")
plt.subplot(3, 1, 3)
trplot.plot_onsets(onsets)
plt.plot(audio, '0.4')
plt.show()
コード例 #4
0
ファイル: example.py プロジェクト: junk16/modal
sampling_rate, audio = wavfile.read(file_name)
audio = np.asarray(audio, dtype=np.double)
audio /= np.max(audio)

frame_size = 2048
hop_size = 512

odf = modal.ComplexODF()
odf.set_hop_size(hop_size)
odf.set_frame_size(frame_size)
odf.set_sampling_rate(sampling_rate)
odf_values = np.zeros(len(audio) / hop_size, dtype=np.double)
odf.process(audio, odf_values)

onset_det = od.OnsetDetection()
onset_det.peak_size = 3
onsets = onset_det.find_onsets(odf_values) * odf.get_hop_size()

# plot onset detection results
fig = plt.figure(1, figsize=(12, 12))
plt.subplot(3, 1, 1)
plt.title("Onset detection with " + odf.__class__.__name__)
plt.plot(audio, "0.4")
plt.subplot(3, 1, 2)
trplot.plot_detection_function(onset_det.odf, hop_size)
trplot.plot_detection_function(onset_det.threshold, hop_size, "green")
plt.subplot(3, 1, 3)
trplot.plot_onsets(onsets, 1.0)
plt.plot(audio, "0.4")
plt.show()
コード例 #5
0
from modal.ui.plot import (plot_detection_function, plot_onsets)
import matplotlib.pyplot as plt
from scipy.io.wavfile import read

# read audio file
audio = read("c:/temp/test.wav")[1]
# values between -1 and 1
audio = audio / 32768.0
# create detection function
codf = ComplexODF()
odf = codf.process(audio)
# create onset detection object
od = OnsetDetection()
hop_size = codf.get_hop_size()
onsets = od.find_onsets(odf) * hop_size
# plot onset detection results
plt.subplot(2, 1, 1)
plt.title("Audio And Detected Onsets")
plt.ylabel("Sample Value")
plt.xlabel("Sample Number")
plt.plot(audio, "0.4")
plot_onsets(onsets)
plt.subplot(2, 1, 2)
plt.title("Detection Function And Threshold")
plt.ylabel("Detection Function Value")
plt.xlabel("Sample Number")
plot_detection_function(odf, hop_size)
thresh = od.threshold
plot_detection_function(thresh, hop_size, "green")
plt.show()
コード例 #6
0
sampling_rate, audio = wavfile.read(file_name)
audio = np.asarray(audio, dtype=np.double)
audio /= np.max(audio)

frame_size = 2048
hop_size = 512

odf = modal.ComplexODF()
odf.set_hop_size(hop_size)
odf.set_frame_size(frame_size)
odf.set_sampling_rate(sampling_rate)
odf_values = np.zeros(len(audio) / hop_size, dtype=np.double)
odf.process(audio, odf_values)

onset_det = od.OnsetDetection()
onset_det.peak_size = 3
onsets = onset_det.find_onsets(odf_values) * odf.get_hop_size()

# plot onset detection results
fig = plt.figure(1, figsize=(12, 12))
plt.subplot(3, 1, 1)
plt.title('Onset detection with ' + odf.__class__.__name__)
plt.plot(audio, '0.4')
plt.subplot(3, 1, 2)
trplot.plot_detection_function(onset_det.odf, hop_size)
trplot.plot_detection_function(onset_det.threshold, hop_size, "green")
plt.subplot(3, 1, 3)
trplot.plot_onsets(onsets, 1.0)
plt.plot(audio, '0.4')
plt.show()