def prediction_mode(in_dir, file_name, out_dir): #Example: python main.py --mode="prediction" #path to find pre-trained weights / save models weights_path = './weights' #pre trained model name_model = 'model_unet' #directory where read noisy sound to denoise audio_dir_prediction = in_dir #directory to save the denoise sound dir_save_prediction = out_dir #Name noisy sound file to denoise audio_input_prediction = [file_name] #Name of denoised sound file to save audio_output_prediction = file_name # Sample rate to read audio sample_rate = 8000 # Minimum duration of audio files to consider min_duration = 1.0 #Frame length for training data frame_length = 8064 # hop length for sound files hop_length_frame = 8064 #nb of points for fft(for spectrogram computation) n_fft = 255 #hop length for fft hop_length_fft = 63 prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction, audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft, hop_length_fft)
elif prediction_mode: #Example: python main.py --mode="prediction" #path to find pre-trained weights / save models weights_path = args.weights_folder #pre trained model name_model = args.name_model #directory where read noisy sound to denoise audio_dir_prediction = args.audio_dir_prediction #directory to save the denoise sound dir_save_prediction = args.dir_save_prediction #Name noisy sound file to denoise audio_input_prediction = args.audio_input_prediction #Name of denoised sound file to save audio_output_prediction = args.audio_output_prediction # Sample rate to read audio sample_rate = args.sample_rate # Minimum duration of audio files to consider min_duration = args.min_duration #Frame length for training data frame_length = args.frame_length # hop length for sound files hop_length_frame = args.hop_length_frame #nb of points for fft(for spectrogram computation) n_fft = args.n_fft #hop length for fft hop_length_fft = args.hop_length_fft prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction, audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft, hop_length_fft)
# create_data(noise_dir, voice_dir, dataset_noise, dataset_voice, time_wave_dir, sound_dir, # spectrogram_dir, sample_rate, min_duration, frame_length, hop_length_frame, # hop_length_frame_noise, nb_samples, n_fft, hop_length_fft, list_noise_files, # list_voice_files) training(dataset_noise, dataset_voice, spectrogram_dir, weights_dir, model_name, training_from_scratch, epochs, batch_size) elif prediction_mode: weights_dir = args.weights_dir model_name = args.model_name input_dir = args.input_dir output_dir = args.output_dir sample_rate = args.sample_rate_predict min_duration = args.min_duration frame_length = args.frame_length hop_length_frame = args.hop_length_frame n_fft = args.n_fft hop_length_fft = args.hop_length_fft prediction(weights_dir, model_name, input_dir, output_dir, sample_rate, frame_length, hop_length_frame, n_fft, hop_length_fft) elif plot_mode: temp_dir = args.temp_dir sample_rate = args.sample_rate_plot hop_length_fft = args.hop_length_fft plot_out(temp_dir, sample_rate, hop_length_fft)