Exemplo n.º 1
0
def timeMain(filename, kit):
    start_time = timeit.default_timer()
    y,sr = librosa.load(filename, sr=None)
    print("librosa.load: ")
    print(timeit.default_timer() - start_time)

    start_time = timeit.default_timer()
    segments = segmentr.segment_audio(y, sr)
    print("segmenter: ")
    print(timeit.default_timer() - start_time)

    start_time = timeit.default_timer()
    model = klassifier.load_classifier()
    print("Load Klassifier: ")
    print(timeit.default_timer() - start_time)

    samples = [s[0] for s in segments]
    times = [s[1] for s in segments]
    labels = []
    i = 1
    start_time = timeit.default_timer()
    for seg in samples:
        label = klassifier.use_classifier(model, seg)
        labels.append(label)
        i += 1

    print("all classifications: ")
    print(timeit.default_timer() - start_time)

    start_time = timeit.default_timer()
    quantized_times = quantize_times(y, sr, times)
    print("Quantize_times: ")
    print(timeit.default_timer() - start_time)
Exemplo n.º 2
0
def quantize_and_classify(filename, model, quantized=False):
    # load and segment audio signal
    y, sr = librosa.load(filename, sr=None)
    segments = segmentr.segment_audio(y, sr)
    samples = [s[0] for s in segments]
    times = [s[1] for s in segments]
    labels = []
    for seg in samples:
        label = klassifier.use_classifier(model, seg)
        labels.append(label)

    # quantize onset times to estimated tempo
    if quantized:
        quantized_times = quantize_times(y, sr, times)
    else:
        quantized_times = times

    return times, quantized_times, labels, len(y)
Exemplo n.º 3
0
def main():
    model = klassifier.load_classifier()
    new_sample = listen_for_speech()
    play_wav(new_sample)
    new_features = klassifier.get_feature_from_mfcc(klassifier.get_mfcc(new_sample, 44100))
    klassifier.use_classifier(model, new_features)