Exemplo n.º 1
0
def timeMain(filename, kit):
    start_time = timeit.default_timer()
    y,sr = librosa.load(filename, sr=None)
    print("librosa.load: ")
    print(timeit.default_timer() - start_time)

    start_time = timeit.default_timer()
    segments = segmentr.segment_audio(y, sr)
    print("segmenter: ")
    print(timeit.default_timer() - start_time)

    start_time = timeit.default_timer()
    model = klassifier.load_classifier()
    print("Load Klassifier: ")
    print(timeit.default_timer() - start_time)

    samples = [s[0] for s in segments]
    times = [s[1] for s in segments]
    labels = []
    i = 1
    start_time = timeit.default_timer()
    for seg in samples:
        label = klassifier.use_classifier(model, seg)
        labels.append(label)
        i += 1

    print("all classifications: ")
    print(timeit.default_timer() - start_time)

    start_time = timeit.default_timer()
    quantized_times = quantize_times(y, sr, times)
    print("Quantize_times: ")
    print(timeit.default_timer() - start_time)
Exemplo n.º 2
0
def load_samples():
    result = load_pickle('samples.p')
    table_of_contents = load_pickle('toc.p')
    samples_dir = sickBeetz.relative_path('samples/unparsed-samples')
    for filename in os.listdir(samples_dir):
        filetype = filename.split('.')[-1]
        if filetype != 'wav':
            continue
        if filename in table_of_contents:
            continue
        full_filename = sickBeetz.relative_path(os.path.join('samples/unparsed-samples', filename))
        if not os.path.isfile(full_filename):
            continue
        label_toks = filename.split('-')
        label = '-'.join(label_toks[i] for i in range(len(label_toks)-1))
        y, sr = librosa.load(full_filename, sr=None)

        segments = segmentr.segment_audio(y, sr)
        print filename + ' - ' + str(len(segments))
        for segment in segments:
            result.append((segment[0], label, sr))
        table_of_contents.append(filename)
    save_pickle(result, 'samples.p')
    save_pickle(table_of_contents, 'toc.p')
    return result
Exemplo n.º 3
0
def quantize_and_classify(filename, model, quantized=False):
    # load and segment audio signal
    y, sr = librosa.load(filename, sr=None)
    segments = segmentr.segment_audio(y, sr)
    samples = [s[0] for s in segments]
    times = [s[1] for s in segments]
    labels = []
    for seg in samples:
        label = klassifier.use_classifier(model, seg)
        labels.append(label)

    # quantize onset times to estimated tempo
    if quantized:
        quantized_times = quantize_times(y, sr, times)
    else:
        quantized_times = times

    return times, quantized_times, labels, len(y)