Esempio n. 1
0
def main(model_name='localize_single_participant',
         exp_name='single_participant_default',
         azimuth=12,
         participant_number=9,
         snr=0.0,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False,
         steady_state=False):
    """ This script takes the filtered data and tries to localize sounds with a learned map
        for a single participant.
    """
    logger = logging.getLogger(__name__)
    logger.info('Localizing sounds for a single participant')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str

    # store responses
    r_ipsi_all = np.zeros((len(SOUND_FILES), len(elevations), freq_bands))
    q_ele_all = np.zeros((len(SOUND_FILES), len(elevations), len(elevations)))
    localization_results_binaural = np.zeros(
        (len(SOUND_FILES), len(elevations), 3))
    localization_results_monaural = np.zeros(
        (len(SOUND_FILES), len(elevations), 3))

    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [
                localization_results_binaural, localization_results_monaural,
                q_ele_all, r_ipsi_all
            ] = pickle.load(f)
    else:
        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq)

        # Take only given elevations
        input_c = psd_all_c[:, elevations, :]
        input_i = psd_all_i[:, elevations, :]

        # normalize inputs over frequencies
        input_c = input_c / input_c.sum(2)[:, :, np.newaxis]
        input_i = input_i / input_i.sum(2)[:, :, np.newaxis]

        # initialize network. if steady_state is True run do not use euler but calculate the response immediatley
        net = network.Network(steady_state=steady_state)

        # read previously read network weights
        exp_file_weights = Path(exp_file.as_posix() + '_weights')
        with exp_file_weights.open('rb') as f:
            logger.info('Reading model data from file')
            [w, w_sounds_i, w_sounds_c] = pickle.load(f)

        # normalize weights
        net.w = net.normalize_weights(w)
        net.w_sounds_i = w_sounds_i
        net.w_sounds_c = w_sounds_c

        ############## MONAURAL #################
        # walk over sounds
        for sound, _ in enumerate(SOUND_FILES):
            for i_ele, ele in enumerate(elevations):

                in_i = input_i[sound, ele]
                in_c = np.zeros(input_c[sound, ele].shape) + 0.1

                # NO PRIOR
                q_ele, r_ipsi, w, w_sounds_i, w_sounds_c = net.run(
                    in_i, in_c, ele, sound, train=False, prior_info=False)
                # localize and save results
                localization_results_monaural[sound, i_ele, 0] = ele
                localization_results_monaural[sound, i_ele,
                                              1] = q_ele[-1, :].argmax()

                # PRIOR
                q_ele, r_ipsi, w, w_sounds_i, w_sounds_c = net.run(
                    in_i, in_c, ele, sound, train=False, prior_info=True)
                # localize and save results
                localization_results_monaural[sound, i_ele,
                                              2] = q_ele[-1, :].argmax()

        ############## BINAURAL #################
        # walk over sounds
        for sound, _ in enumerate(SOUND_FILES):
            for i_ele, ele in enumerate(elevations):

                in_i = input_i[sound, ele]
                in_c = input_c[sound, ele]

                # NO PRIOR
                q_ele, r_ipsi, w, w_sounds_i, w_sounds_c = net.run(
                    in_i, in_c, ele, sound, train=False, prior_info=False)
                # localize and save results
                localization_results_binaural[sound, i_ele, 0] = ele
                localization_results_binaural[sound, i_ele,
                                              1] = q_ele[-1, :].argmax()

                # PRIOR
                q_ele, r_ipsi, w, w_sounds_i, w_sounds_c = net.run(
                    in_i, in_c, ele, sound, train=False, prior_info=True)
                # localize and save results
                localization_results_binaural[sound, i_ele,
                                              2] = q_ele[-1, :].argmax()

        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                localization_results_binaural, localization_results_monaural,
                q_ele_all, r_ipsi_all
            ], f)
Esempio n. 2
0
def main(model_name='different_learned_maps',
         exp_name='localization_default',
         azimuth=12,
         snr=0.2,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script takes the filtered data and tries to localize sounds with different, learned map
        for all participants.
    """
    logger = logging.getLogger(__name__)
    logger.info('Localizing sounds for all participants, different maps')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    participant_numbers = np.array([
        1, 2, 3, 8, 9, 10, 11, 12, 15, 17, 18, 19, 20, 21, 27, 28, 33, 40, 44,
        48, 50, 51, 58, 59, 60, 61, 65, 119, 124, 126, 127, 131, 133, 134, 135,
        137, 147, 148, 152, 153, 154, 155, 156, 158, 162, 163, 165
    ])

    # participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
    #                                 12, 15, 17, 18, 19, 20,
    #                                 21, 27, 28, 33, 40])

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file' + exp_file.as_posix())
            [
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ] = pickle.load(f)
    else:

        x_mono = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                           len(elevations), 2))
        y_mono = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        x_mono_mean = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                                len(elevations), 2))
        y_mono_mean = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        x_bin = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                          len(elevations), 2))
        y_bin = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        x_bin_mean = np.zeros((4, len(participant_numbers), len(SOUND_FILES),
                               len(elevations), 2))
        y_bin_mean = np.zeros(
            (4, len(participant_numbers), len(SOUND_FILES), len(elevations)))
        for i_par, par in enumerate(participant_numbers):

            # create or read the data
            psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                            par,
                                                            snr,
                                                            normalize,
                                                            azimuth,
                                                            time_window,
                                                            max_freq=max_freq,
                                                            diff_noise=False)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            # filter data and integrate it
            psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
                sigma_gauss_norm)

            ### Read different noise data ###
            # create or read the data
            psd_all_c_diff_noise, psd_all_i_diff_noise = generateData.create_data(
                freq_bands,
                par,
                snr,
                normalize,
                azimuth,
                time_window,
                max_freq=max_freq,
                diff_noise=True)

            # Take only given elevations
            psd_all_c_diff_noise = psd_all_c_diff_noise[:, elevations, :]
            psd_all_i_diff_noise = psd_all_i_diff_noise[:, elevations, :]

            # filter data and integrate it
            psd_mono_diff_noise, psd_mono_mean_diff_noise, psd_binaural_diff_noise, psd_binaural_mean_diff_noise = hp.process_inputs(
                psd_all_i_diff_noise, psd_all_c_diff_noise, ear,
                normalization_type, sigma_smoothing, sigma_gauss_norm)

            # walk over the 4 different maps: mono, mono_mean, bina, bina_mean
            for i_map in range(4):
                # create map from defined processed data

                if i_map == 0:
                    learned_map = psd_mono.mean(0)
                elif i_map == 1:
                    learned_map = psd_mono_mean.mean(0)
                elif i_map == 2:
                    learned_map = psd_binaural.mean(0)
                elif i_map == 3:
                    # bina_mean
                    learned_map = psd_binaural_mean.mean(0)
                else:
                    logger.error('Something went wrong in if i_map statement')

                # localize the sounds and save the results
                x_mono[i_map,
                       i_par, :, :, :], y_mono[i_map,
                                               i_par, :] = hp.localize_sound(
                                                   psd_mono_diff_noise,
                                                   learned_map)

                # localize the sounds and save the results
                x_mono_mean[i_map, i_par, :, :, :], y_mono_mean[
                    i_map,
                    i_par, :, :] = hp.localize_sound(psd_mono_mean_diff_noise,
                                                     learned_map)

                # localize the sounds and save the results
                x_bin[i_map,
                      i_par, :, :, :], y_bin[i_map,
                                             i_par, :, :] = hp.localize_sound(
                                                 psd_binaural_diff_noise,
                                                 learned_map)

                # localize the sounds and save the results
                x_bin_mean[i_map, i_par, :, :, :], y_bin_mean[
                    i_map, i_par, :, :] = hp.localize_sound(
                        psd_binaural_mean_diff_noise, learned_map)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ], f)
Esempio n. 3
0
def main(model_name='hrtf_comparison',
         exp_name='single_participant',
         azimuth=12,
         participant_number=9,
         snr=0.0,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script calculates the correlation coefficient between the ipsi- and contralateral HRTF and the learned maps for a single participant.
    """
    logger = logging.getLogger(__name__)
    logger.info(
        'Comparing learned HRTF maps with the actual HRTF of a participant')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [
                hrtfs_i, hrtfs_c, learned_map_mono, learned_map_mono_mean,
                learned_map_bin, learned_map_bin_mean
            ] = pickle.load(f)
    else:

        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq)

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # create map from defined processed data
        learned_map_mono = psd_mono.mean(0)
        learned_map_mono_mean = psd_mono_mean.mean(0)
        learned_map_bin = psd_binaural.mean(0)
        learned_map_bin_mean = psd_binaural_mean.mean(0)
        # learned_map = hp.create_map(psd_mono, False)
        # Get the actual HRTF
        hrtfs_c, hrtfs_i, _ = generateHRTFs.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq,
                                                        clean=clean)

        # filter data and integrate it
        # hrtfs_c = hp.filter_dataset(hrtfs_c, normalization_type=normalization_type,
        #                                sigma_smoothing=0, sigma_gauss_norm=0)
        hrtfs_c, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            hrtfs_i, hrtfs_c, 'contra', normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        hrtfs_i, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            hrtfs_i, hrtfs_c, 'ipsi', normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # remove mean for later comparison
        hrtfs_c = np.squeeze(hrtfs_c[0, elevations, :])
        hrtfs_c -= hrtfs_c.mean()
        hrtfs_i = np.squeeze(hrtfs_i[0, elevations, :])
        hrtfs_i -= hrtfs_i.mean()

        # remove unwanted elevations
        learned_map_mono = learned_map_mono[elevations, :]
        learned_map_mono_mean = learned_map_mono_mean[elevations, :]
        learned_map_bin = learned_map_bin[elevations, :]
        learned_map_bin_mean = learned_map_bin_mean[elevations, :]

        learned_map_mono -= learned_map_mono.mean()
        learned_map_mono_mean -= learned_map_mono_mean.mean()
        learned_map_bin -= learned_map_bin.mean()
        learned_map_bin_mean -= learned_map_bin_mean.mean()

        # ## calculate pearson index
        # correlations[i_par, 0, 0] = pearson2d(learned_map_mono, hrtfs_i)
        # correlations[i_par, 0, 1] = pearson2d(learned_map_mono, hrtfs_c)
        #
        # correlations[i_par, 1, 0] = pearson2d(
        #     learned_map_mono_mean, hrtfs_i)
        # correlations[i_par, 1, 1] = pearson2d(
        #     learned_map_mono_mean, hrtfs_c)
        #
        # correlations[i_par, 2, 0] = pearson2d(learned_map_bin, hrtfs_i)
        # correlations[i_par, 2, 1] = pearson2d(learned_map_bin, hrtfs_c)
        #
        # correlations[i_par, 3, 0] = pearson2d(
        #     learned_map_bin_mean, hrtfs_i)
        # correlations[i_par, 3, 1] = pearson2d(
        #     learned_map_bin_mean, hrtfs_c)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                hrtfs_i, hrtfs_c, learned_map_mono, learned_map_mono_mean,
                learned_map_bin, learned_map_bin_mean
            ], f)
Esempio n. 4
0
def main(model_name='train_network_single_participant',
         exp_name='single_participant_default',
         azimuth=12,
         participant_number=9,
         snr=0.0,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False,
         steady_state=False):
    """ This script takes the filtered data and tries to localize sounds with a learned map
        for a single participant.
    """
    logger = logging.getLogger(__name__)
    logger.info('Localizing sounds for a single participant')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / (exp_name_str + '_weights')

    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [w] = pickle.load(f)
    else:

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq)

        # Take only given elevations
        input_c = psd_all_c[:, elevations, :]
        input_i = psd_all_i[:, elevations, :]

        # normalize inputs over frequencies
        input_c = input_c / input_c.sum(2)[:, :, np.newaxis]
        input_i = input_i / input_i.sum(2)[:, :, np.newaxis]

        # initialize network. if steady_state is True run do not use euler but calculate the response immediatley
        net = network.Network(steady_state=steady_state)

        # if we use the steady state response to learn, we need more trials
        if steady_state:
            trials = 1500 * 10
        else:
            trials = 25

        for ele in range(trials):
            # for i_ele, ele in enumerate(elevations):
            ele = np.random.randint(0, len(elevations))
            sound = np.random.randint(0, len(SOUND_FILES))
            # sound = 1
            # ele = 1
            in_i = input_i[sound, ele]
            in_c = input_c[sound, ele]

            q_ele, r_ipsi, w, w_sounds_i, w_sounds_c = net.run(in_i,
                                                               in_c,
                                                               ele,
                                                               sound,
                                                               train=True,
                                                               prior_info=True)

            # logger.info('Sound No: ' + str(sound + 1) + ' of ' + str(len(SOUND_FILES)) +
            #             '.  -> Elevation : ' + str(ele + 1) + ' of ' + str(len(elevations)))

        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([w, w_sounds_i, w_sounds_c], f)
Esempio n. 5
0
def main(model_name='parameter_sweep', exp_name='default', azimuth=12, snr=0.2, freq_bands=128, max_freq=20000, elevations=25, mean_subtracted_map=True, ear='ipsi', normalization_type='sum_1', clean=False):
    """ TODO
    """
    logger = logging.getLogger(__name__)
    logger.info('Parameter Sweep Experiment.')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    # participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
    #                                 12, 15, 17, 18, 19, 20, 21, 27, 28, 33, 40])

    participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
                                    12, 15, 17, 18, 19, 20,
                                    21, 27, 28, 33, 40, 44,
                                    48, 50, 51, 58, 59, 60,
                                    61, 65, 119, 124, 126,
                                    127, 131, 133, 134, 135,
                                    137, 147, 148, 152, 153,
                                    154, 155, 156, 158, 162,
                                    163, 165])
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)

    sigma_smoothing_vals = np.arange(1, 3.0, 0.1)
    sigma_gauss_norm_vals = np.arange(1, 3.0, 0.1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([exp_name, normalization_type, mean_subtracted_map, time_window, int(
        snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize, len(elevations), ear])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [scores, sigma_smoothing_vals, sigma_gauss_norm_vals] = pickle.load(f)
    else:

        scores = np.zeros((sigma_smoothing_vals.shape[0], sigma_gauss_norm_vals.shape[0], 3))

        for i_par, par in enumerate(participant_numbers):

            # create or read the data
            psd_all_c, psd_all_i = generateData.create_data(
                freq_bands, par, snr, normalize, azimuth, time_window, max_freq=max_freq, diff_noise=False)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            ### Get different noise data ###
            psd_all_c_diff_noise, psd_all_i_diff_noise = generateData.create_data(
                freq_bands, par, snr, normalize, azimuth, time_window, max_freq=max_freq, diff_noise=True)

            # Take only given elevations
            psd_all_c_diff_noise = psd_all_c_diff_noise[:, elevations, :]
            psd_all_i_diff_noise = psd_all_i_diff_noise[:, elevations, :]

            for i_smooth, sigma_smooth in enumerate(sigma_smoothing_vals):
                for i_gauss, sigma_gauss in enumerate(sigma_gauss_norm_vals):

                    # filter data and integrate it
                    psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                        psd_all_i, psd_all_c, ear, normalization_type, sigma_smooth, sigma_gauss)

                    # create map from defined processed data
                    if mean_subtracted_map:
                        learned_map = psd_binaural_mean.mean(0)
                    else:
                        learned_map = psd_binaural.mean(0)

                    # filter data and integrate it
                    psd_mono_diff_noise, psd_mono_mean_diff_noise, psd_binaural_diff_noise, psd_binaural_mean_diff_noise = hp.process_inputs(
                        psd_all_i_diff_noise, psd_all_c_diff_noise, ear, normalization_type, sigma_smooth, sigma_gauss)

                    # # localize the sounds and save the results
                    # x_mono[i_par, :, :, :], y_mono[i_par, :] = hp.localize_sound(psd_mono, learned_map)
                    #
                    # # localize the sounds and save the results
                    # x_mono_mean[i_par, :, :, :], y_mono_mean[i_par, :, :] = hp.localize_sound(psd_mono_mean, learned_map)
                    #
                    # # localize the sounds and save the results
                    # x_bin[i_par, :, :, :], y_bin[i_par, :, :] = hp.localize_sound(psd_binaural, learned_map)

                    # localize the sounds and save the results
                    x_test, y_test = hp.localize_sound(psd_binaural_diff_noise, learned_map)
                    x_test, y_test = hp_vis.scale_v(x_test, y_test, len(elevations))
                    scores[i_smooth, i_gauss, :] += hp.get_localization_coefficients_score(x_test, y_test)
        # get the mean scores over participants
        scores = scores / len(participant_numbers)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([scores, sigma_smoothing_vals, sigma_gauss_norm_vals], f)
Esempio n. 6
0
def main(model_name='map_learning',
         exp_name='localization_all_maps',
         azimuth=12,
         snr=0.2,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         n_trials=100,
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ Learns the elevation spectra map gradually over presented sounds and saves the localization quality for each trial
    """
    logger = logging.getLogger(__name__)
    logger.info('Learning different maps for all participants')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    # participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
    #                                 12, 15, 17, 18, 19, 20,
    #                                 21, 27, 28, 33, 40, 44,
    #                                 48, 50, 51, 58, 59, 60,
    #                                 61, 65, 119, 124, 126,
    #                                 127, 131, 133, 134, 135,
    #                                 137, 147, 148, 152, 153,
    #                                 154, 155, 156, 158, 162,
    #                                 163, 165])

    participant_numbers = np.array([
        127,
        131,
        133,
        134,
        135,
    ])

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, (azimuth - 12) * 10, normalize,
        len(elevations), ear, n_trials
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [mono_res, mono_mean_res, bin_res, bin_mean_res,
             trial_used_ss] = pickle.load(f)
    else:

        # store only the localization coefficeints (gain,bias,score)
        mono_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        mono_mean_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        bin_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        bin_mean_res = np.zeros((4, len(participant_numbers), n_trials, 3))
        trial_used_ss = np.zeros((4, len(participant_numbers), n_trials))

        for i_par, par in enumerate(participant_numbers):
            logger.info(
                'Localizing {0:d} trials for participant {1:d}. \n'.format(
                    n_trials, par))

            # create or read the data. psd_all_c = (sounds,elevations,frequency bands)
            psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                            par,
                                                            snr,
                                                            normalize,
                                                            azimuth,
                                                            time_window,
                                                            max_freq=max_freq,
                                                            diff_noise=False)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            # filter data and integrate it
            psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
                sigma_gauss_norm)

            ### Load different noise data ###
            # create or read the data. psd_all_c = (sounds,elevations,frequency bands)
            psd_all_c_diff_noise, psd_all_i_diff_noise = generateData.create_data(
                freq_bands,
                par,
                snr,
                normalize,
                azimuth,
                time_window,
                max_freq=max_freq,
                diff_noise=True)

            # Take only given elevations
            psd_all_c_diff_noise = psd_all_c_diff_noise[:, elevations, :]
            psd_all_i_diff_noise = psd_all_i_diff_noise[:, elevations, :]

            # filter data and integrate it
            psd_mono_diff_noise, psd_mono_mean_diff_noise, psd_binaural_diff_noise, psd_binaural_mean_diff_noise = hp.process_inputs(
                psd_all_i_diff_noise, psd_all_c_diff_noise, ear,
                normalization_type, sigma_smoothing, sigma_gauss_norm)

            # walk over test n_trials, in this case the number of sound samples
            for i_trials in range(n_trials):

                # decide how many sound samples should be used for the map. this is between 1 and number_of_sounds * number_of_elevations
                number_of_ss = np.random.randint(
                    1, psd_all_c.shape[0] * psd_all_c.shape[1])
                # choose the sound samples to learn the map
                ind = np.random.randint(0,
                                        high=(psd_all_c.shape[0] *
                                              psd_all_c.shape[1]),
                                        size=number_of_ss)
                # get the indices for the sound_inds
                sounds_ind = np.unravel_index(
                    ind, (psd_all_c.shape[0], psd_all_c.shape[1]))

                # decide which type of map is learned_map
                for i_maps in range(4):

                    # monaural condition
                    if i_maps == 0:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_mono.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_mono[sounds_ind[0],
                                                              sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)
                    elif i_maps == 1:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_mono_mean.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_mono_mean[
                                     sounds_ind[0], sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)
                    elif i_maps == 2:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_binaural.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_binaural[
                                     sounds_ind[0], sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)
                    elif i_maps == 3:
                        # get only the defined sounds and elevations
                        tmp_data = np.zeros(psd_binaural_mean.shape)
                        tmp_data[sounds_ind[0],
                                 sounds_ind[1], :] = psd_binaural_mean[
                                     sounds_ind[0], sounds_ind[1], :]
                        # create learned_map
                        learned_map = tmp_data.mean(0)

                    # store the map
                    # learned_maps_participants[i_par, :, :] = learned_map
                    # store the number of sounds used
                    trial_used_ss[i_maps, i_par, i_trials] = number_of_ss

                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_mono_diff_noise, learned_map)
                    mono_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)
                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_mono_mean_diff_noise,
                                             learned_map)
                    mono_mean_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)

                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_binaural_diff_noise,
                                             learned_map)
                    bin_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)

                    # localize the sounds and save the results
                    x, y = hp.localize_sound(psd_binaural_mean_diff_noise,
                                             learned_map)
                    bin_mean_res[
                        i_maps, i_par,
                        i_trials, :] = hp.get_localization_coefficients_score(
                            x, y)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                mono_res, mono_mean_res, bin_res, bin_mean_res, trial_used_ss
            ], f)
def main(model_name='single_participant',
         exp_name='single_participant_different_azis'):
    """ Localizes sounds at azimuth 'azimuth' with a learned map at azimuth 0.
    """
    logger = logging.getLogger(__name__)
    logger.info(
        'Localizing sounds for a single participant at different azimuths')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    azimuth = 12
    snr = 0.2
    freq_bands = 128
    max_freq = 20000
    participant_number = 9

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, 25, 1)

    # filtering parameters
    normalization_type = 'sum_1'
    sigma_smoothing = 0
    sigma_gauss_norm = 1

    # use the mean subtracted map as the learned map
    mean_subtracted_map = True

    # choose which ear to use 'contra' or 'ipsi'
    ear = 'ipsi'

    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ] = pickle.load(f)
    else:
        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        12,
                                                        time_window,
                                                        max_freq=max_freq)

        # Take only given elevations
        psd_all_c = psd_all_c[:, elevations, :]
        psd_all_i = psd_all_i[:, elevations, :]

        ####### Map Learning #######
        # filter data and integrate it for map learning
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # create map from defined processed data
        if mean_subtracted_map:
            learned_map = psd_binaural_mean.mean(0)
        else:
            learned_map = psd_binaural.mean(0)

        ####### Input Processing #######
        # process data for actual input
        psd_all_c, psd_all_i = generateData.create_data(
            freq_bands, participant_number, snr, normalize, azimuth,
            time_window)

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        ####### Localization #######
        # localize the sounds and save the results
        x_mono, y_mono = hp.localize_sound(psd_mono, learned_map)

        # localize the sounds and save the results
        x_mono_mean, y_mono_mean = hp.localize_sound(psd_mono_mean,
                                                     learned_map)

        # localize the sounds and save the results
        x_bin, y_bin = hp.localize_sound(psd_binaural, learned_map)

        # localize the sounds and save the results
        x_bin_mean, y_bin_mean = hp.localize_sound(psd_binaural_mean,
                                                   learned_map)

        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ], f)
Esempio n. 8
0
def main(model_name='snr_experiment',
         exp_name='default',
         azimuth=12,
         freq_bands=128,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script takes the filtered data and tries to localize sounds with a learned map
        for all participants.
    """
    logger = logging.getLogger(__name__)
    logger.info('Testing localization performance for different SNRs')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################

    participant_numbers = np.array([
        1, 2, 3, 8, 9, 10, 11, 12, 15, 17, 18, 19, 20, 21, 27, 28, 33, 40, 44,
        48, 50, 51, 58, 59, 60, 61, 65, 119, 124, 126, 127, 131, 133, 134, 135,
        137, 147, 148, 152, 153, 154, 155, 156, 158, 162, 163, 165
    ])

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)

    snrs = np.arange(0.0, 1.1, 0.1)

    #snrs = snrs[::-1]
    #participant_numbers = participant_numbers[::-1]

    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window, freq_bands, max_freq,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str
    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [scores] = pickle.load(f)
    else:
        # scores per participant, per snr, for 4 different learned maps, (gain,bias,score)
        scores = np.zeros((len(participant_numbers), len(snrs), 4, 3))

        for i_par, par in enumerate(participant_numbers):
            for i_snr, snr in enumerate(snrs):
                # create or read the data
                psd_all_c, psd_all_i = generateData.create_data(
                    freq_bands,
                    par,
                    snr,
                    normalize,
                    azimuth,
                    time_window,
                    max_freq=max_freq,
                    diff_noise=False)

                # Take only given elevations
                psd_all_c = psd_all_c[:, elevations, :]
                psd_all_i = psd_all_i[:, elevations, :]

                # filter data and integrate it
                psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                    psd_all_i, psd_all_c, ear, normalization_type,
                    sigma_smoothing, sigma_gauss_norm)

                # create map from defined processed data
                if mean_subtracted_map:
                    learned_map = psd_binaural_mean.mean(0)
                else:
                    learned_map = psd_binaural.mean(0)

                ### Different noise data ####
                # create or read the data
                psd_all_c, psd_all_i = generateData.create_data(
                    freq_bands,
                    par,
                    snr,
                    normalize,
                    azimuth,
                    time_window,
                    max_freq=max_freq,
                    diff_noise=True)

                # Take only given elevations
                psd_all_c = psd_all_c[:, elevations, :]
                psd_all_i = psd_all_i[:, elevations, :]

                # filter data and integrate it
                psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
                    psd_all_i, psd_all_c, ear, normalization_type,
                    sigma_smoothing, sigma_gauss_norm)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_mono, learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       0, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_mono_mean, learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       1, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_binaural, learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       2, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

                # localize the sounds and save the results
                x_test, y_test = hp.localize_sound(psd_binaural_mean,
                                                   learned_map)
                x_test, y_test = hp_vis.scale_v(x_test, y_test,
                                                len(elevations))
                scores[i_par, i_snr,
                       3, :] = hp.get_localization_coefficients_score(
                           x_test, y_test)

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([scores], f)
Esempio n. 9
0
def main(model_name='elevation_spectra_maps', exp_name='unfiltered', azimuth=12, participant_numbers=None, snr=0.2, freq_bands=24, max_freq=20000, elevations=25, clean=False):
    """ TODO
    """
    logger = logging.getLogger(__name__)
    logger.info('Creating maps for all participants and sounds')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################

    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    # if participant_numbers is not given we use all of them
    if not participant_numbers:
        participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
                                        12, 15, 17, 18, 19, 20,
                                        21, 27, 28, 33, 40, 44,
                                        48, 50, 51, 58, 59, 60,
                                        61, 65, 119, 124, 126,
                                        127, 131, 133, 134, 135,
                                        137, 147, 148, 152, 153,
                                        154, 155, 156, 158, 162,
                                        163, 165])

        exp_name_str = hp.create_exp_name([exp_name, time_window, int(snr * 100), freq_bands, max_freq,
                                           len(participant_numbers), (azimuth - 12) * 10, normalize, len(elevations)])
        exp_path = ROOT / 'models' / model_name
        exp_file = exp_path / exp_name_str
    else:
        # participant_numbers are given. need to be cast to int array
        participant_numbers = np.array([int(i) for i in participant_numbers.split(',')])
        print(participant_numbers)

        exp_name_str = hp.create_exp_name([exp_name, time_window, int(snr * 100), freq_bands, max_freq,
                                           participant_numbers, (azimuth - 12) * 10, normalize, len(elevations)])
        exp_path = ROOT / 'models' / model_name
        exp_file = exp_path / exp_name_str

    ########################################################################
    ########################################################################

    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [ipsi_maps,contra_maps] = pickle.load(f)
    else:

        ipsi_maps = np.zeros((len(participant_numbers), len(SOUND_FILES), len(elevations), freq_bands))
        contra_maps = np.zeros((len(participant_numbers), len(SOUND_FILES), len(elevations), freq_bands))

        for i_par, par in enumerate(participant_numbers):

            # create or read the data
            psd_all_c, psd_all_i = generateData.create_data(
                freq_bands, par, snr, normalize, azimuth, time_window,max_freq=max_freq)

            # Take only given elevations
            psd_all_c = psd_all_c[:, elevations, :]
            psd_all_i = psd_all_i[:, elevations, :]

            ipsi_maps[i_par, :, :, :] = psd_all_i
            contra_maps[i_par, :, :, :] = psd_all_c

        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([ipsi_maps,contra_maps], f)
Esempio n. 10
0
def main(model_name='single_participant',
         exp_name='single_participant_default',
         azimuth=12,
         participant_number=9,
         snr=0.2,
         freq_bands=24,
         max_freq=20000,
         elevations=25,
         mean_subtracted_map=True,
         ear='ipsi',
         normalization_type='sum_1',
         sigma_smoothing=0,
         sigma_gauss_norm=1,
         clean=False):
    """ This script takes the filtered data and tries to localize sounds with a learned map
        for a single participant.
    """
    logger = logging.getLogger(__name__)
    logger.info('Localizing sounds for a single participant')

    ########################################################################
    ######################## Set parameters ################################
    ########################################################################
    normalize = False
    time_window = 0.1  # time window in sec

    elevations = np.arange(0, elevations, 1)
    ########################################################################
    ########################################################################

    # create unique experiment name
    exp_name_str = hp.create_exp_name([
        exp_name, normalization_type, sigma_smoothing, sigma_gauss_norm,
        mean_subtracted_map, time_window,
        int(snr * 100), freq_bands, max_freq, participant_number,
        (azimuth - 12) * 10, normalize,
        len(elevations), ear
    ])

    exp_path = ROOT / 'models' / model_name
    exp_file = exp_path / exp_name_str

    # check if model results exist already and load
    if not clean and exp_path.exists() and exp_file.is_file():
        # try to load the model files
        with exp_file.open('rb') as f:
            logger.info('Reading model data from file')
            [
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ] = pickle.load(f)
    else:
        # create Path
        exp_path.mkdir(parents=True, exist_ok=True)
        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq,
                                                        diff_noise=False)

        # Take only given elevations
        psd_all_c = psd_all_c[:, elevations, :]
        psd_all_i = psd_all_i[:, elevations, :]

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # create map from defined processed data
        if mean_subtracted_map:
            learned_map = psd_binaural_mean.mean(0)
        else:
            learned_map = psd_binaural.mean(0)

        # create or read the data
        psd_all_c, psd_all_i = generateData.create_data(freq_bands,
                                                        participant_number,
                                                        snr,
                                                        normalize,
                                                        azimuth,
                                                        time_window,
                                                        max_freq=max_freq,
                                                        diff_noise=True)

        # Take only given elevations
        psd_all_c = psd_all_c[:, elevations, :]
        psd_all_i = psd_all_i[:, elevations, :]

        # filter data and integrate it
        psd_mono, psd_mono_mean, psd_binaural, psd_binaural_mean = hp.process_inputs(
            psd_all_i, psd_all_c, ear, normalization_type, sigma_smoothing,
            sigma_gauss_norm)

        # localize the sounds and save the results
        x_mono, y_mono = hp.localize_sound(psd_mono, learned_map)

        # localize the sounds and save the results
        x_mono_mean, y_mono_mean = hp.localize_sound(psd_mono_mean,
                                                     learned_map)

        # localize the sounds and save the results
        x_bin, y_bin = hp.localize_sound(psd_binaural, learned_map)

        # localize the sounds and save the results
        x_bin_mean, y_bin_mean = hp.localize_sound(psd_binaural_mean,
                                                   learned_map)

        with exp_file.open('wb') as f:
            logger.info('Creating model file')
            pickle.dump([
                x_mono, y_mono, x_mono_mean, y_mono_mean, x_bin, y_bin,
                x_bin_mean, y_bin_mean
            ], f)