コード例 #1
0
def relParToWave(x_rel):
    global infant

    x_abs = get_abs_coord(x_rel)

    if infant:
        wavFile = parToWave(x_abs, speaker='infant', simulation_name='infant', pitch_var=0.0, len_var=1.0, verbose=False, rank=1, different_folder='aux_infant.wav', monotone=False)
    else:
        wavFile = parToWave(x_abs, speaker='adult', simulation_name='adult', pitch_var=0.0, len_var=1.0, verbose=False, rank=1, different_folder='aux_adult.wav', monotone=False)

    sound = Sound(wavFile)
    sound = correct_initial(sound)      # call correct_initial to remove initial burst
    sound_resampled = get_resampled(sound)
                                        # call get_resampled to adapt generated sound to AN model
    sound_extended = get_extended(sound_resampled)
                                        # call get_extended to equalize duration of all sounds
    sound_extended.save(wavFile)

    return sound_extended
コード例 #2
0
def evaluate_environment(params, i_global, simulation_name, outputfolder, i_target=0, rank=1, speaker='adult', n_vow=5, normalize=False):

    folder = outputfolder

    ############### Sound generation

    if output:
     print 'simulating vocal tract'

    wavFile = parToWave(params, speaker, simulation_name, verbose=output, rank=rank) # call parToWave to generate sound file
#    wavFile = par_to_wav(params, speaker, simulation_name, verbose=output, rank=rank) # call parToWave to generate sound file
    if output:
     print 'wav file '+str(wavFile)+' produced'

    sound = loadsound(wavFile)          # load sound file for brian.hears processing
    if output:
     print 'sound loaded'



    ############### Audio processing

    sound = correct_initial(sound)      # call correct_initial to remove initial burst

    sound_resampled = get_resampled(sound)
                                        # call get_resampled to adapt generated sound to AN model
    sound_extended = get_extended(sound_resampled)
                                        # call get_extended to equalize duration of all sounds
    sound_extended.save(wavFile)        # save current sound as sound file

    os.system('cp '+wavFile+' '+folder+'data/vowel_'+str(i_target)+'_'+str(rank)+'.wav')

    if playback:
        print 'playing back...'
        sound_extended.play(sleep=True) # play back sound file

    if output:
     print 'sound acquired, preparing auditory processing'

    out = drnl(sound_extended)          # call drnl to get cochlear activation



    ############### Classifier evaluation

    flow_name = 'data/current_auditory_system.flow'
    flow_file = open(flow_name, 'r')    # open classifier file
    flow = cPickle.load(flow_file)      # load classifier
    flow_file.close()                   # close classifier file

    sample_vote_unnormalized = flow(out)                       # evaluate trained output units' responses for current item
    if normalize:
        sample_vote = normalize_activity(sample_vote_unnormalized)
    else:
        sample_vote = sample_vote_unnormalized
    mean_sample_vote = np.mean(sample_vote, axis=0)
                                        # average each output neurons' response over time


    confidences = get_confidences(mean_sample_vote)

    plot_reservoir_states(flow, sample_vote, i_target, folder, n_vow, rank)


    return confidences
コード例 #3
0
def evaluate_environment(params,
                         i_global,
                         simulation_name,
                         outputfolder,
                         i_target=0,
                         rank=1,
                         speaker='adult',
                         n_vow=5,
                         normalize=False):

    folder = outputfolder

    ############### Sound generation

    if output:
        print 'simulating vocal tract'

    wavFile = parToWave(params,
                        speaker,
                        simulation_name,
                        verbose=output,
                        rank=rank)  # call parToWave to generate sound file
    #    wavFile = par_to_wav(params, speaker, simulation_name, verbose=output, rank=rank) # call parToWave to generate sound file
    if output:
        print 'wav file ' + str(wavFile) + ' produced'

    sound = loadsound(wavFile)  # load sound file for brian.hears processing
    if output:
        print 'sound loaded'

    ############### Audio processing

    sound = correct_initial(
        sound)  # call correct_initial to remove initial burst

    sound_resampled = get_resampled(sound)
    # call get_resampled to adapt generated sound to AN model
    sound_extended = get_extended(sound_resampled)
    # call get_extended to equalize duration of all sounds
    sound_extended.save(wavFile)  # save current sound as sound file

    os.system('cp ' + wavFile + ' ' + folder + 'data/vowel_' + str(i_target) +
              '_' + str(rank) + '.wav')

    if playback:
        print 'playing back...'
        sound_extended.play(sleep=True)  # play back sound file

    if output:
        print 'sound acquired, preparing auditory processing'

    out = drnl(sound_extended)  # call drnl to get cochlear activation

    ############### Classifier evaluation

    flow_name = 'data/current_auditory_system.flow'
    flow_file = open(flow_name, 'r')  # open classifier file
    flow = cPickle.load(flow_file)  # load classifier
    flow_file.close()  # close classifier file

    sample_vote_unnormalized = flow(
        out)  # evaluate trained output units' responses for current item
    if normalize:
        sample_vote = normalize_activity(sample_vote_unnormalized)
    else:
        sample_vote = sample_vote_unnormalized
    mean_sample_vote = np.mean(sample_vote, axis=0)
    # average each output neurons' response over time

    confidences = get_confidences(mean_sample_vote)

    plot_reservoir_states(flow, sample_vote, i_target, folder, n_vow, rank)

    return confidences