コード例 #1
0
ファイル: localize_sound.py プロジェクト: oesst/HRTF_Model
def main(model_name='different_learned_maps',
         exp_name='localization_default',
         azimuth=12,
         snr=0.2,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script takes the filtered data and tries to localize sounds with different, learned map
        for all participants.
    """
    logger = logging.getLogger(__name__)
    logger.info('Localizing sounds for all participants, different maps')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    participant_numbers = np.array([
        1, 2, 3, 8, 9, 10, 11, 12, 15, 17, 18, 19, 20, 21, 27, 28, 33, 40, 44,
        48, 50, 51, 58, 59, 60, 61, 65, 119, 124, 126, 127, 131, 133, 134, 135,
        137, 147, 148, 152, 153, 154, 155, 156, 158, 162, 163, 165
    ])

    # participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
    #                                 12, 15, 17, 18, 19, 20,
    #                                 21, 27, 28, 33, 40])

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file' + exp_file.as_posix())
            [
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ] = pickle.load(f)
    else:

        x_mono = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                           len(elevations), 2))
        y_mono = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        x_mono_mean = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                                len(elevations), 2))
        y_mono_mean = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        x_bin = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                          len(elevations), 2))
        y_bin = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        x_bin_mean = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                               len(elevations), 2))
        y_bin_mean = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        for i_par, par in enumerate(participant_numbers):

            # create or read the data
            psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                            par,
                                                            snr,
                                                            normalize,
                                                            azimuth,
                                                            time_window,
                                                            max_freq=max_freq,
                                                            diff_noise=False)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            # filter data and integrate it
            psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
                sigma_gauss_norm)

            ### Read different noise data ###
            # create or read the data
            psd_all_c_diff_noise, psd_all_i_diff_noise = generateData.create_data(
                freq_bands,
                par,
                snr,
                normalize,
                azimuth,
                time_window,
                max_freq=max_freq,
                diff_noise=True)

            # Take only given elevations
            psd_all_c_diff_noise = psd_all_c_diff_noise[:, elevations, :]
            psd_all_i_diff_noise = psd_all_i_diff_noise[:, elevations, :]

            # filter data and integrate it
            psd_mono_diff_noise, psd_mono_mean_diff_noise, psd_binaural_diff_noise, psd_binaural_mean_diff_noise = hp.process_inputs(
                psd_all_i_diff_noise, psd_all_c_diff_noise, ear,
                normalization_type, sigma_smoothing, sigma_gauss_norm)

            # walk over the 4 different maps: mono, mono_mean, bina, bina_mean
            for i_map in range(4):
                # create map from defined processed data

                if i_map == 0:
                    learned_map = psd_mono.mean(0)
                elif i_map == 1:
                    learned_map = psd_mono_mean.mean(0)
                elif i_map == 2:
                    learned_map = psd_binaural.mean(0)
                elif i_map == 3:
                    # bina_mean
                    learned_map = psd_binaural_mean.mean(0)
                else:
                    logger.error('Something went wrong in if i_map statement')

                # localize the sounds and save the results
                x_mono[i_map,
                       i_par, :, :, :], y_mono[i_map,
                                               i_par, :] = hp.localize_sound(
                                                   psd_mono_diff_noise,
                                                   learned_map)

                # localize the sounds and save the results
                x_mono_mean[i_map, i_par, :, :, :], y_mono_mean[
                    i_map,
                    i_par, :, :] = hp.localize_sound(psd_mono_mean_diff_noise,
                                                     learned_map)

                # localize the sounds and save the results
                x_bin[i_map,
                      i_par, :, :, :], y_bin[i_map,
                                             i_par, :, :] = hp.localize_sound(
                                                 psd_binaural_diff_noise,
                                                 learned_map)

                # localize the sounds and save the results
                x_bin_mean[i_map, i_par, :, :, :], y_bin_mean[
                    i_map, i_par, :, :] = hp.localize_sound(
                        psd_binaural_mean_diff_noise, learned_map)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ], f)
コード例 #2
0
ファイル: parameter_sweep.py プロジェクト: oesst/HRTF_Model
def main(model_name='parameter_sweep', exp_name='default', azimuth=12, snr=0.2, freq_bands=128, max_freq=20000, elevations=25, mean_subtracted_map=True, ear='ipsi', normalization_type='sum_1', clean=False):
    """ TODO
    """
    logger = logging.getLogger(__name__)
    logger.info('Parameter Sweep Experiment.')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    # participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
    #                                 12, 15, 17, 18, 19, 20, 21, 27, 28, 33, 40])

    participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
                                    12, 15, 17, 18, 19, 20,
                                    21, 27, 28, 33, 40, 44,
                                    48, 50, 51, 58, 59, 60,
                                    61, 65, 119, 124, 126,
                                    127, 131, 133, 134, 135,
                                    137, 147, 148, 152, 153,
                                    154, 155, 156, 158, 162,
                                    163, 165])
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)

    sigma_smoothing_vals = np.arange(1, 3.0, 0.1)
    sigma_gauss_norm_vals = np.arange(1, 3.0, 0.1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([exp_name, normalization_type, mean_subtracted_map, time_window, int(
        snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize, len(elevations), ear])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [scores, sigma_smoothing_vals, sigma_gauss_norm_vals] = pickle.load(f)
    else:

        scores = np.zeros((sigma_smoothing_vals.shape[0], sigma_gauss_norm_vals.shape[0], 3))

        for i_par, par in enumerate(participant_numbers):

            # create or read the data
            psd_all_c, psd_all_i = generateData.create_data(
                freq_bands, par, snr, normalize, azimuth, time_window, max_freq=max_freq, diff_noise=False)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            ### Get different noise data ###
            psd_all_c_diff_noise, psd_all_i_diff_noise = generateData.create_data(
                freq_bands, par, snr, normalize, azimuth, time_window, max_freq=max_freq, diff_noise=True)

            # Take only given elevations
            psd_all_c_diff_noise = psd_all_c_diff_noise[:, elevations, :]
            psd_all_i_diff_noise = psd_all_i_diff_noise[:, elevations, :]

            for i_smooth, sigma_smooth in enumerate(sigma_smoothing_vals):
                for i_gauss, sigma_gauss in enumerate(sigma_gauss_norm_vals):

                    # filter data and integrate it
                    psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                        psd_all_i, psd_all_c, ear, normalization_type, sigma_smooth, sigma_gauss)

                    # create map from defined processed data
                    if mean_subtracted_map:
                        learned_map = psd_binaural_mean.mean(0)
                    else:
                        learned_map = psd_binaural.mean(0)

                    # filter data and integrate it
                    psd_mono_diff_noise, psd_mono_mean_diff_noise, psd_binaural_diff_noise, psd_binaural_mean_diff_noise = hp.process_inputs(
                        psd_all_i_diff_noise, psd_all_c_diff_noise, ear, normalization_type, sigma_smooth, sigma_gauss)

                    # # localize the sounds and save the results
                    # x_mono[i_par, :, :, :], y_mono[i_par, :] = hp.localize_sound(psd_mono, learned_map)
                    #
                    # # localize the sounds and save the results
                    # x_mono_mean[i_par, :, :, :], y_mono_mean[i_par, :, :] = hp.localize_sound(psd_mono_mean, learned_map)
                    #
                    # # localize the sounds and save the results
                    # x_bin[i_par, :, :, :], y_bin[i_par, :, :] = hp.localize_sound(psd_binaural, learned_map)

                    # localize the sounds and save the results
                    x_test, y_test = hp.localize_sound(psd_binaural_diff_noise, learned_map)
                    x_test, y_test = hp_vis.scale_v(x_test, y_test, len(elevations))
                    scores[i_smooth, i_gauss, :] += hp.get_localization_coefficients_score(x_test, y_test)
        # get the mean scores over participants
        scores = scores / len(participant_numbers)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([scores, sigma_smoothing_vals, sigma_gauss_norm_vals], f)
コード例 #3
0
def main(model_name='map_learning',
         exp_name='localization_all_maps',
         azimuth=12,
         snr=0.2,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         n_trials=100,
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ Learns the elevation spectra map gradually over presented sounds and saves the localization quality for each trial
    """
    logger = logging.getLogger(__name__)
    logger.info('Learning different maps for all participants')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    # participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
    #                                 12, 15, 17, 18, 19, 20,
    #                                 21, 27, 28, 33, 40, 44,
    #                                 48, 50, 51, 58, 59, 60,
    #                                 61, 65, 119, 124, 126,
    #                                 127, 131, 133, 134, 135,
    #                                 137, 147, 148, 152, 153,
    #                                 154, 155, 156, 158, 162,
    #                                 163, 165])

    participant_numbers = np.array([
        127,
        131,
        133,
        134,
        135,
    ])

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize,
        len(elevations), ear, n_trials
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [mono_res, mono_mean_res, bin_res, bin_mean_res,
             trial_used_ss] = pickle.load(f)
    else:

        # store only the localization coefficeints (gain,bias,score)
        mono_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        mono_mean_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        bin_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        bin_mean_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        trial_used_ss = np.zeros((4, len(participant_numbers), n_trials))

        for i_par, par in enumerate(participant_numbers):
            logger.info(
                'Localizing {0:d} trials for participant {1:d}. \n'.format(
                    n_trials, par))

            # create or read the data. psd_all_c = (sounds,elevations,frequency bands)
            psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                            par,
                                                            snr,
                                                            normalize,
                                                            azimuth,
                                                            time_window,
                                                            max_freq=max_freq,
                                                            diff_noise=False)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            # filter data and integrate it
            psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
                sigma_gauss_norm)

            ### Load different noise data ###
            # create or read the data. psd_all_c = (sounds,elevations,frequency bands)
            psd_all_c_diff_noise, psd_all_i_diff_noise = generateData.create_data(
                freq_bands,
                par,
                snr,
                normalize,
                azimuth,
                time_window,
                max_freq=max_freq,
                diff_noise=True)

            # Take only given elevations
            psd_all_c_diff_noise = psd_all_c_diff_noise[:, elevations, :]
            psd_all_i_diff_noise = psd_all_i_diff_noise[:, elevations, :]

            # filter data and integrate it
            psd_mono_diff_noise, psd_mono_mean_diff_noise, psd_binaural_diff_noise, psd_binaural_mean_diff_noise = hp.process_inputs(
                psd_all_i_diff_noise, psd_all_c_diff_noise, ear,
                normalization_type, sigma_smoothing, sigma_gauss_norm)

            # walk over test n_trials, in this case the number of sound samples
            for i_trials in range(n_trials):

                # decide how many sound samples should be used for the map. this is between 1 and number_of_sounds * number_of_elevations
                number_of_ss = np.random.randint(
                    1, psd_all_c.shape[0] * psd_all_c.shape[1])
                # choose the sound samples to learn the map
                ind = np.random.randint(0,
                                        high=(psd_all_c.shape[0] *
                                              psd_all_c.shape[1]),
                                        size=number_of_ss)
                # get the indices for the sound_inds
                sounds_ind = np.unravel_index(
                    ind, (psd_all_c.shape[0], psd_all_c.shape[1]))

                # decide which type of map is learned_map
                for i_maps in range(4):

                    # monaural condition
                    if i_maps == 0:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_mono.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_mono[sounds_ind[0],
                                                              sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)
                    elif i_maps == 1:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_mono_mean.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_mono_mean[
                                     sounds_ind[0], sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)
                    elif i_maps == 2:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_binaural.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_binaural[
                                     sounds_ind[0], sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)
                    elif i_maps == 3:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_binaural_mean.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_binaural_mean[
                                     sounds_ind[0], sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)

                    # store the map
                    # learned_maps_participants[i_par, :, :] = learned_map
                    # store the number of sounds used
                    trial_used_ss[i_maps, i_par, i_trials] = number_of_ss

                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_mono_diff_noise, learned_map)
                    mono_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)
                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_mono_mean_diff_noise,
                                             learned_map)
                    mono_mean_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)

                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_binaural_diff_noise,
                                             learned_map)
                    bin_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)

                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_binaural_mean_diff_noise,
                                             learned_map)
                    bin_mean_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                mono_res, mono_mean_res, bin_res, bin_mean_res, trial_used_ss
            ], f)
コード例 #4
0
def main(model_name='single_participant',
         exp_name='single_participant_different_azis'):
    """ Localizes sounds at azimuth 'azimuth' with a learned map at azimuth 0.
    """
    logger = logging.getLogger(__name__)
    logger.info(
        'Localizing sounds for a single participant at different azimuths')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    azimuth = 12
    snr = 0.2
    freq_bands = 128
    max_freq = 20000
    participant_number = 9

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, 25, 1)

    # filtering parameters
    normalization_type = 'sum_1'
    sigma_smoothing = 0
    sigma_gauss_norm = 1

    # use the mean subtracted map as the learned map
    mean_subtracted_map = True

    # choose which ear to use 'contra' or 'ipsi'
    ear = 'ipsi'

    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ] = pickle.load(f)
    else:
        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        12,
                                                        time_window,
                                                        max_freq=max_freq)

        # Take only given elevations
        psd_all_c = psd_all_c[:, elevations, :]
        psd_all_i = psd_all_i[:, elevations, :]

        ####### Map Learning #######
        # filter data and integrate it for map learning
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # create map from defined processed data
        if mean_subtracted_map:
            learned_map = psd_binaural_mean.mean(0)
        else:
            learned_map = psd_binaural.mean(0)

        ####### Input Processing #######
        # process data for actual input
        psd_all_c, psd_all_i = generateData.create_data(
            freq_bands, participant_number, snr, normalize, azimuth,
            time_window)

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        ####### Localization #######
        # localize the sounds and save the results
        x_mono, y_mono = hp.localize_sound(psd_mono, learned_map)

        # localize the sounds and save the results
        x_mono_mean, y_mono_mean = hp.localize_sound(psd_mono_mean,
                                                     learned_map)

        # localize the sounds and save the results
        x_bin, y_bin = hp.localize_sound(psd_binaural, learned_map)

        # localize the sounds and save the results
        x_bin_mean, y_bin_mean = hp.localize_sound(psd_binaural_mean,
                                                   learned_map)

        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ], f)
コード例 #5
0
def main(model_name='snr_experiment',
         exp_name='default',
         azimuth=12,
         freq_bands=128,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script takes the filtered data and tries to localize sounds with a learned map
        for all participants.
    """
    logger = logging.getLogger(__name__)
    logger.info('Testing localization performance for different SNRs')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################

    participant_numbers = np.array([
        1, 2, 3, 8, 9, 10, 11, 12, 15, 17, 18, 19, 20, 21, 27, 28, 33, 40, 44,
        48, 50, 51, 58, 59, 60, 61, 65, 119, 124, 126, 127, 131, 133, 134, 135,
        137, 147, 148, 152, 153, 154, 155, 156, 158, 162, 163, 165
    ])

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)

    snrs = np.arange(0.0, 1.1, 0.1)

    #snrs = snrs[::-1]
    #participant_numbers = participant_numbers[::-1]

    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window, freq_bands, max_freq,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [scores] = pickle.load(f)
    else:
        # scores per participant, per snr, for 4 different learned maps, (gain,bias,score)
        scores = np.zeros((len(participant_numbers), len(snrs), 4, 3))

        for i_par, par in enumerate(participant_numbers):
            for i_snr, snr in enumerate(snrs):
                # create or read the data
                psd_all_c, psd_all_i = generateData.create_data(
                    freq_bands,
                    par,
                    snr,
                    normalize,
                    azimuth,
                    time_window,
                    max_freq=max_freq,
                    diff_noise=False)

                # Take only given elevations
                psd_all_c = psd_all_c[:, elevations, :]
                psd_all_i = psd_all_i[:, elevations, :]

                # filter data and integrate it
                psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                    psd_all_i, psd_all_c, ear, normalization_type,
                    sigma_smoothing, sigma_gauss_norm)

                # create map from defined processed data
                if mean_subtracted_map:
                    learned_map = psd_binaural_mean.mean(0)
                else:
                    learned_map = psd_binaural.mean(0)

                ### Different noise data ####
                # create or read the data
                psd_all_c, psd_all_i = generateData.create_data(
                    freq_bands,
                    par,
                    snr,
                    normalize,
                    azimuth,
                    time_window,
                    max_freq=max_freq,
                    diff_noise=True)

                # Take only given elevations
                psd_all_c = psd_all_c[:, elevations, :]
                psd_all_i = psd_all_i[:, elevations, :]

                # filter data and integrate it
                psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                    psd_all_i, psd_all_c, ear, normalization_type,
                    sigma_smoothing, sigma_gauss_norm)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_mono, learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       0, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_mono_mean, learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       1, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_binaural, learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       2, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_binaural_mean,
                                                   learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       3, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([scores], f)
コード例 #6
0
def main(model_name='single_participant',
         exp_name='single_participant_default',
         azimuth=12,
         participant_number=9,
         snr=0.2,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script takes the filtered data and tries to localize sounds with a learned map
        for a single participant.
    """
    logger = logging.getLogger(__name__)
    logger.info('Localizing sounds for a single participant')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str

    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ] = pickle.load(f)
    else:
        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq,
                                                        diff_noise=False)

        # Take only given elevations
        psd_all_c = psd_all_c[:, elevations, :]
        psd_all_i = psd_all_i[:, elevations, :]

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # create map from defined processed data
        if mean_subtracted_map:
            learned_map = psd_binaural_mean.mean(0)
        else:
            learned_map = psd_binaural.mean(0)

        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq,
                                                        diff_noise=True)

        # Take only given elevations
        psd_all_c = psd_all_c[:, elevations, :]
        psd_all_i = psd_all_i[:, elevations, :]

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # localize the sounds and save the results
        x_mono, y_mono = hp.localize_sound(psd_mono, learned_map)

        # localize the sounds and save the results
        x_mono_mean, y_mono_mean = hp.localize_sound(psd_mono_mean,
                                                     learned_map)

        # localize the sounds and save the results
        x_bin, y_bin = hp.localize_sound(psd_binaural, learned_map)

        # localize the sounds and save the results
        x_bin_mean, y_bin_mean = hp.localize_sound(psd_binaural_mean,
                                                   learned_map)

        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ], f)