def plot_audio(full_file_path, plot_type, radioIntVar): spf = wave.open(full_file_path, 'r') #Extract Raw Audio from Wav File opened_signal = spf.readframes(-1) opened_signal = np.fromstring(opened_signal, 'Int16') #opened_signal = np.array_split(opened_signal, Plot.sub_arrays) global fs fs = spf.getframerate() #If Stereo if spf.getnchannels() == 2: print 'Just mono files' sys.exit(0) if (radioIntVar.get() == 1): # is 2D plot if (plot_type == "raw"): Plot.plot_raw_audio2D(opened_signal) elif (plot_type == "fft"): Plot.plot_fft_audio2D(opened_signal) elif (plot_type == "spectrogram"): #Plot.plot_spectrogram2D(opened_signal) plotstft(full_file_path, generatefig=False) elif (radioIntVar.get() == 2): #multi dimensional plot opened_signal = np.array_split(opened_signal, Plot.sub_arrays) if (plot_type == "raw"): Plot.plot_raw_audio(opened_signal) elif (plot_type == "fft"): Plot.plot_fft_audio(opened_signal)
def plot_audio(full_file_path, plot_type, radioIntVar): spf = wave.open(full_file_path,'r') #Extract Raw Audio from Wav File opened_signal = spf.readframes(-1) opened_signal = np.fromstring(opened_signal, 'Int16') #opened_signal = np.array_split(opened_signal, Plot.sub_arrays) global fs fs = spf.getframerate() #If Stereo if spf.getnchannels() == 2: print 'Just mono files' sys.exit(0) if(radioIntVar.get() == 1): # is 2D plot if(plot_type == "raw"): Plot.plot_raw_audio2D(opened_signal) elif(plot_type == "fft"): Plot.plot_fft_audio2D(opened_signal) elif(plot_type == "spectrogram"): #Plot.plot_spectrogram2D(opened_signal) plotstft(full_file_path, generatefig=False) elif(radioIntVar.get() == 2): #multi dimensional plot opened_signal = np.array_split(opened_signal, Plot.sub_arrays) if(plot_type == "raw"): Plot.plot_raw_audio(opened_signal) elif(plot_type == "fft"): Plot.plot_fft_audio(opened_signal)
def predict_results(): # generating graph spectogram.plotstft('test.wav', generatefig=True) # display user recorded sound and save plot test_img = cv2.imread('test.png', 0) # load grayscale pic cv2.imshow('test.png', test_img) # display new pic X_test = [] # preparing input X_test.append(test_img) X_test = np.array(X_test) X_test = X_test.reshape(X_test.shape[0], 1, 33, 70) # 33 num of rows, 70 num of columns # testing input results_test = NeuralNetwork.ann.predict_classes(X_test, batch_size=1) result_probability = NeuralNetwork.ann.predict_proba(X_test, batch_size=1) result_probability *= 100 # convert to percent print "[Winner]: " + NeuralNetwork.alphabet[results_test] print "[Probabilities:] " + "ASC:" + str(result_probability[0, 0])+"%" + ", DESC:" + str(result_probability[0, 1])+"%" + \ ", FLAT:" + str(result_probability[0, 2])+"%" + ", SOY: " + str(result_probability[0, 3])+"%"
def predict_results(): # generating graph spectogram.plotstft( 'test.wav', generatefig=True) # display user recorded sound and save plot test_img = cv2.imread('test.png', 0) # load grayscale pic cv2.imshow('test.png', test_img) # display new pic X_test = [] # preparing input X_test.append(test_img) X_test = np.array(X_test) X_test = X_test.reshape(X_test.shape[0], 1, 33, 70) # 33 num of rows, 70 num of columns # testing input results_test = NeuralNetwork.ann.predict_classes(X_test, batch_size=1) result_probability = NeuralNetwork.ann.predict_proba(X_test, batch_size=1) result_probability *= 100 # convert to percent print "[Winner]: " + NeuralNetwork.alphabet[results_test] print "[Probabilities:] " + "ASC:" + str(result_probability[0, 0])+"%" + ", DESC:" + str(result_probability[0, 1])+"%" + \ ", FLAT:" + str(result_probability[0, 2])+"%" + ", SOY: " + str(result_probability[0, 3])+"%"