def batch_synthesis(fs,CONSECUTIVE_BLOCKS,TIME_STAMPS,selected_inflect_block_new,typeOfEmotion): """ batch_synthesis(fs,CONSECUTIVE_BLOCKS,TIME_STAMPS,selected_inflect_block_new,typeOfEmotion) This is the synthesis stage. This modules gives emotions of "Happy","Happy-Tensed","Sad","Afraid" for the wavefile using the process variables and selected_inflect_blocks Parameters: fs CONSECUTIVE_BLOCKS TIME_STAMPS selected_inflect_block_new typeOfEmotion Returns: output- Modified/Synthesised wavefile """ #synth.FILE_NAME_PATH=rospy.get_param('file_path') if typeOfEmotion == "happy": selected_inflect_block = selected_inflect_block_new utterance_time_stamps = synth.appended_utterance_time_stamps(CONSECUTIVE_BLOCKS,TIME_STAMPS,selected_inflect_block) output = synth.happy_patch(fs,utterance_time_stamps) if typeOfEmotion == "happy_tensed": selected_inflect_block = selected_inflect_block_new utterance_time_stamps = synth.appended_utterance_time_stamps(CONSECUTIVE_BLOCKS,TIME_STAMPS,selected_inflect_block) output = synth.happy_tensed_patch(fs,utterance_time_stamps) if typeOfEmotion == "sad": output = synth.sad_patch(fs) if typeOfEmotion == "afraid": selected_inflect_block = selected_inflect_block_new utterance_time_stamps = synth.appended_utterance_time_stamps(CONSECUTIVE_BLOCKS,TIME_STAMPS,selected_inflect_block) output = synth.afraid_patch(fs,utterance_time_stamps) return output
def batch_synthesis(fs, time_stamps, typeOfEmotion, semitones, cutfreq, gain, qfactor, speed, depth, tempo, intensity, parameter_control): """ batch_synthesis(fs,time_stamps,typeOfEmotion,semitones, cutfreq,gain,qfactor,speed, depth,tempo,intensity,parameter_control) This is the synthesis stage. This modules gives emotions of "Happy","Happy-Tensed","Sad","Afraid" for the wavefile using the process variables and selected_inflect_blocks Parameters: Sampling Frequency Time Stamps Type Of Emotion Semitones Cutfreq Gain Q-factor Speed Depth Tempo Intensity Parameter_control Returns: output- Modified/Synthesised wavefile """ if typeOfEmotion == "happy": output = synth.happy_patch(fs, semitones, cutfreq, gain, qfactor, tempo, intensity, parameter_control) if typeOfEmotion == "happy_tensed": output = synth.happy_tensed_patch(fs, semitones, cutfreq, gain, qfactor, tempo, intensity, parameter_control) if typeOfEmotion == "sad": output = synth.sad_patch(fs, semitones, cutfreq, gain, tempo, parameter_control) if typeOfEmotion == "afraid": output = synth.afraid_patch(fs, speed, depth, tempo, intensity, parameter_control) return output
def batch_synthesis(fs, consecutive_blocks, time_stamps, selected_inflect_block_new, emotion, semitones, cutfreq, gain, qfactor, speed, depth, tempo, intensity, parameter_control): """ batch_synthesis(fs,consecutive_blocks,time_stamps,selected_inflect_block_new,emotion) This is the synthesis stage. This modules gives emotions of "Happy","Happy-Tensed","Sad","Afraid" for the wavefile using the process variables and selected_inflect_blocks Parameters: fs consecutive_blocks time_stamps selected_inflect_block_new emotion Returns: output- Modified/Synthesised wavefile """ if emotion == "happy": selected_inflect_block = selected_inflect_block_new utterance_time_stamps = synth.appended_utterance_time_stamps( consecutive_blocks, time_stamps, selected_inflect_block) output = synth.happy_patch(fs, utterance_time_stamps, semitones, cutfreq, gain, qfactor, tempo, intensity, parameter_control) if emotion == "happy_tensed": selected_inflect_block = selected_inflect_block_new utterance_time_stamps = synth.appended_utterance_time_stamps( consecutive_blocks, time_stamps, selected_inflect_block) output = synth.happy_tensed_patch(fs, utterance_time_stamps, semitones, cutfreq, gain, qfactor, tempo, intensity, parameter_control) if emotion == "sad": output = synth.sad_patch(fs, semitones, cutfreq, gain, tempo, parameter_control) if emotion == "afraid": selected_inflect_block = selected_inflect_block_new utterance_time_stamps = synth.appended_utterance_time_stamps( consecutive_blocks, time_stamps, selected_inflect_block) output = synth.afraid_patch(fs, utterance_time_stamps, speed, depth, tempo, intensity, parameter_control) return output
def emotive_speech(x, fs, typeOfEmotion): CHUNK_SIZE = 1024 NUM_BLOCKS = int(np.ceil(len(x) / CHUNK_SIZE)) SAMPLE_PERIOD = 1 / float(fs) * CHUNK_SIZE TIME_STAMPS = (np.arange(0, NUM_BLOCKS - 1) * (CHUNK_SIZE / float(fs))) QFACTOR = 1 #---------------------Analysis---------------------------------------# data_in_blocks = alysis.data_blocks(x, CHUNK_SIZE) fundamental_frequency_in_blocks = alysis.pitch_detect(x, fs, CHUNK_SIZE) voiced_unvoiced_starting_info_object = alysis.starting_info( x, fundamental_frequency_in_blocks, fs, CHUNK_SIZE) voiced_samples = voiced_unvoiced_starting_info_object['VSamp'] voiced_regions = alysis.voiced_regions( x, fundamental_frequency_in_blocks, voiced_unvoiced_starting_info_object, CHUNK_SIZE) consecutive_blocks = 1 + int(0.5 / SAMPLE_PERIOD) #---------------------preprocess-------------------------------------# inflection_voice_samples = prep.pre_process(voiced_samples) frequency_of_voiced_samples = fundamental_frequency_in_blocks[ inflection_voice_samples] rms = prep.root_mean_square(x, CHUNK_SIZE, fs)[0] frequency_for_inflection = prep.potential_inflection_fundamental_frequency( frequency_of_voiced_samples) inflection_sample_numbers = prep.matrix_of_sample_numbers( rms, inflection_voice_samples) inflect_blocks = prep.consecutive_blocks_for_inflection( inflection_sample_numbers, consecutive_blocks) selected_inflect_block = prep.alteration_of_discrete_data( inflection_sample_numbers, consecutive_blocks, inflect_blocks) n = prep.consecutive_blocks_in_selected_blocks(selected_inflect_block, consecutive_blocks) reshaped_inflect_blocks = prep.reshaped_inflection_blocks( n, selected_inflect_block, consecutive_blocks) differece_arrays = prep.difference_arrays(NUM_BLOCKS, reshaped_inflect_blocks) #----------------------synthesis-------------------------------------# if typeOfEmotion == "Happy": consecutive_blocks = 1 + int(0.5 / SAMPLE_PERIOD) selected_inflect_block = prep.alteration_of_discrete_data( inflection_sample_numbers, consecutive_blocks, inflect_blocks) utterance_time_stamps = TIME_STAMPS[selected_inflect_block] gain = 3.0 semitones = 0.5 synth.happy_patch(fs, semitones, QFACTOR, gain, utterance_time_stamps) if typeOfEmotion == "HappyTensed": consecutive_blocks = int(0.5 / SAMPLE_PERIOD) inflection_sample_numbers = prep.matrix_of_sample_numbers( rms, inflection_voice_samples) inflect_blocks = prep.consecutive_blocks_for_inflection( inflection_sample_numbers, consecutive_blocks) selected_inflect_block = prep.alteration_of_discrete_data( inflection_sample_numbers, consecutive_blocks, inflect_blocks) utterance_time_stamps = TIME_STAMPS[selected_inflect_block] gain = 3.0 semitones = 1.0 synth.happy_tensed_patch(fs, semitones, QFACTOR, gain, utterance_time_stamps) if typeOfEmotion == "Sad": gain = 0.25 semitones = -0.5 synth.sad_patch(fs, semitones, QFACTOR, gain) if typeOfEmotion == "Afraid": speed = 8.5 depth = 50 utterance_time_stamps = TIME_STAMPS[selected_inflect_block] synth.afraid_patch(fs, speed, depth, utterance_time_stamps)