def timeMain(filename, kit): start_time = timeit.default_timer() y,sr = librosa.load(filename, sr=None) print("librosa.load: ") print(timeit.default_timer() - start_time) start_time = timeit.default_timer() segments = segmentr.segment_audio(y, sr) print("segmenter: ") print(timeit.default_timer() - start_time) start_time = timeit.default_timer() model = klassifier.load_classifier() print("Load Klassifier: ") print(timeit.default_timer() - start_time) samples = [s[0] for s in segments] times = [s[1] for s in segments] labels = [] i = 1 start_time = timeit.default_timer() for seg in samples: label = klassifier.use_classifier(model, seg) labels.append(label) i += 1 print("all classifications: ") print(timeit.default_timer() - start_time) start_time = timeit.default_timer() quantized_times = quantize_times(y, sr, times) print("Quantize_times: ") print(timeit.default_timer() - start_time)
def get_model(self, button): global model model = klassifier.load_classifier() button.config(state=Tkinter.NORMAL) self.status.set("Waiting for user")
def main(file_path, kit): time, quantized, labels, inputLength = quantize_and_classify(file_path, klassifier.load_classifier(), False) print build_output(time, quantized, labels, kit, file_path, inputLength, False)
def main(): model = klassifier.load_classifier() new_sample = listen_for_speech() play_wav(new_sample) new_features = klassifier.get_feature_from_mfcc(klassifier.get_mfcc(new_sample, 44100)) klassifier.use_classifier(model, new_features)