Exemplo n.º 1
0
def create_design_matrix(k_subject):
    '''Creation of X per subject'''

    start = time.time()
    X = [[[[None for k_session in range(n_sessions)]
           for k_direction in range(n_directions)] for k_fit_N in range(n_N)]
         for k_fit_scheme in range(n_schemes)]

    X_tmp = [[[[None for k_session in range(n_sessions)]
               for k_direction in range(n_directions)]
              for k_fit_N in range(n_N)] for k_fit_scheme in range(n_schemes)]

    print('X is initialised!')

    # Experimental design information
    eps = 1e-5  # For floating points issues

    between_stimuli_duration = 1.3
    initial_time = between_stimuli_duration + eps
    final_time_tmp = between_stimuli_duration * (n_stimuli + 1) + eps
    # Every 15+-3 trials : one interruption of 8-12s
    stimulus_onsets = np.linspace(initial_time, final_time_tmp, n_stimuli)
    # We add some time to simulate breaks
    stimulus = 0

    while True:
        # Number of regularly spaced stimuli
        n_local_regular_stimuli = rand.randint(12, 18)
        stimulus_shifted = stimulus + n_local_regular_stimuli  # Current stimulus before the break
        if stimulus_shifted > n_stimuli:  # The next break is supposed to occur after all stimuli are shown
            break
        stimulus_onsets[stimulus_shifted:] += rand.randint(
            8, 12) - between_stimuli_duration  # We consider a break of 8-12s
        stimulus = stimulus_shifted

    dt = 0.125  # Temporal resolution of the fMRI scanner

    stimulus_durations = dt * np.ones_like(
        stimulus_onsets)  # Dirac-like stimuli

    # fMRI information
    final_time = stimulus_onsets[-1]
    final_frame_offset = 10  # Frame recording duration after the last stimulus has been shown
    initial_frame_time = 0
    final_frame_time = final_time + final_frame_offset

    between_scans_duration = 2  # in seconds
    final_scan_offset = 10  # Scan recording duration after the last stimulus has been shown
    initial_scan_time = initial_frame_time + between_scans_duration
    final_scan_time = final_time + final_scan_offset
    scan_times = np.arange(initial_scan_time, final_scan_time,
                           between_scans_duration)

    # Loop over the directions
    for k_direction in range(n_directions):

        # Loop over the sessions : we start with it in order to have the same length whatever N_fit is
        for k_session in range(n_sessions):

            # Get the data of interest
            if directions[k_direction, k_session] == 0:
                mu = p1_mu_array[k_subject, k_session, :n_stimuli]
                dist = p1_dist_array[k_subject, k_session, :, :n_stimuli]
            else:
                mu = 1 - (p1_mu_array[k_subject, k_session, :n_stimuli])
                dist = np.flipud(p1_dist_array[k_subject,
                                               k_session, :, :n_stimuli])

            sigma = p1_sd_array[k_subject, k_session, :n_stimuli]
            conf = -np.log(sigma)

            # Formatting
            simulated_distrib = [None for k in range(n_stimuli)]
            for k in range(n_stimuli):
                # Normalization of the distribution
                norm_dist = dist[:, k] * (len(dist[1:, k]) - 1) / np.sum(
                    dist[1:, k])
                simulated_distrib[k] = distrib(mu[k], sigma[k], norm_dist)

            # Creation of fmri object
            simu_fmri = fmri(initial_frame_time, final_frame_time, dt,
                             scan_times)

            # Creation of experiment object
            exp = experiment(initial_time, final_time, n_sessions,
                             stimulus_onsets, stimulus_durations,
                             simulated_distrib)

            # LOOP OVER THE SCHEME
            for k_fit_scheme in range(n_schemes):

                # Current schemes
                fit_scheme = scheme_array[k_fit_scheme]

                # LOOP OVER THE FIT N's
                for k_fit_N in range(n_N):

                    # Current N
                    fit_N = N_array[k_fit_N]

                    # Creation of the true tuning curve objects

                    # We replace the right value of the "t"'s according to the type of tuning curve and the N
                    if fit_scheme.find('gaussian') != -1:
                        fit_t_mu = t_mu_gaussian_array[k_fit_N]
                        fit_t_conf = t_conf_gaussian_array[k_fit_N]
                        fit_tc_type = 'gaussian'

                    elif fit_scheme.find('sigmoid') != -1:
                        fit_t_mu = t_mu_sigmoid_array[k_fit_N]
                        fit_t_conf = t_conf_sigmoid_array[k_fit_N]
                        fit_tc_type = 'sigmoid'

                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                    if fit_scheme.find('ppc') != -1:
                        fit_tc = [fit_tc_mu, fit_tc_conf]
                    elif fit_scheme.find('dpc') != -1:
                        fit_tc = [fit_tc_mu]
                    elif fit_scheme.find('rate') != -1:
                        fit_tc = []

                    # Regressor and BOLD computation
                    X_tmp[k_fit_scheme][k_fit_N][k_direction][
                        k_session] = simu_fmri.get_regressor(
                            exp, fit_scheme, fit_tc)
                    # Just to have Xz with np array of the right structure

    # We create the design matrix X for each subject and end initializing the zscore version
    for k_fit_scheme, k_fit_N, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_N), range(n_directions),
            range(n_sessions)):
        X[k_fit_scheme][k_fit_N][k_direction][k_session] = copy.deepcopy(
            X_tmp[k_fit_scheme][k_fit_N][k_direction][k_session])

    end = time.time()
    print('Design matrix creation : Subject n' + str(k_subject) +
          ' is done ! Time elapsed : ' + str(end - start) + 's')
    return X
Exemplo n.º 2
0
def compute_response_vector_weights(X):
    X = whiten_design_matrix(k_subject)

    # Initialization of the response vectors

    y = [[[[[None for k_session in range(n_sessions)]
            for k_direction in range(n_directions)]
           for k_fraction in range(n_fractions)] for k_true_N in range(n_N)]
         for k_scheme in range(n_schemes)]

    # Initialization of the weights
    weights = [[[None for k_fraction in range(n_fractions)]
                for k_true_N in range(n_N)] for k_scheme in range(n_schemes)]

    # LOOP OVER THE SCHEME
    for k_scheme in range(n_schemes):
        # print('Start loop for scheme')
        true_scheme = scheme_array[k_scheme]

        # We replace the right value of the "t"'s according to the type of tuning curve

        if true_scheme.find('gaussian') != -1:
            true_t_mu_array = copy.deepcopy(t_mu_gaussian_array)
            true_t_conf_array = copy.deepcopy(t_conf_gaussian_array)
            true_tc_type = 'gaussian'

        elif true_scheme.find('sigmoid') != -1:
            true_t_mu_array = copy.deepcopy(t_mu_sigmoid_array)
            true_t_conf_array = copy.deepcopy(t_conf_sigmoid_array)
            true_tc_type = 'sigmoid'

        # We consider combinations of population fractions for PPC and rate codes
        if true_scheme.find('ppc') != -1 or true_scheme.find('rate') != -1:
            # The number of population fraction tested (related to W)
            population_fraction_array = copy.deepcopy(
                np.array([[0.5, 0.5], [0.25, 0.75], [0, 1], [0.75, 0.25],
                          [1, 0]]))
        elif true_scheme.find('dpc') != -1:  # DPC case
            population_fraction_array = copy.deepcopy(np.array([[1]]))
        n_population_fractions = len(population_fraction_array)

        # LOOP OVER N_true
        for k_true_N in range(n_N):
            true_N = N_array[k_true_N]
            # Creation of the true tuning curve objects
            true_t_mu = true_t_mu_array[k_true_N]
            true_t_conf = true_t_conf_array[k_true_N]
            true_tc_mu = tuning_curve(true_tc_type, true_N, true_t_mu,
                                      tc_lower_bound_mu, tc_upper_bound_mu)
            true_tc_conf = tuning_curve(true_tc_type, true_N, true_t_conf,
                                        tc_lower_bound_conf,
                                        tc_upper_bound_conf)

            if true_scheme.find('ppc') != -1:
                true_tc = [true_tc_mu, true_tc_conf]
            elif true_scheme.find('dpc') != -1:
                true_tc = [true_tc_mu]
            elif true_scheme.find('rate') != -1:
                true_tc = []

            # LOOP OVER THE SUBJECTS
            # for k_subject in range(n_subjects):
            # LOOP OVER THE W's
            # The number of subpopulation fractions acc. to the scheme
            n_subpopulation_fractions = int(n_fractions /
                                            n_population_fractions)
            fraction_counter = 0
            for k_subpopulation_fraction in range(n_subpopulation_fractions):
                # print(k_subpopulation_fraction)
                for k_population_fraction, population_fraction in enumerate(
                        population_fraction_array):
                    # print(k_population_fraction)
                    # The number of populations acc. to the scheme (2 for PPC and rate, 1 for DPC)
                    n_population = len(population_fraction)
                    if true_scheme.find('ppc') != -1 or true_scheme.find(
                            'dpc') != -1:
                        # We consider one sparsity per remainder value of the counter divided by the number
                        # of combinations to be tested
                        subpopulation_sparsity_exp = sparsity_exp_array[
                            fraction_counter % n_sparsity_exp]
                        # Fraction of each neural subpopulation
                        subpopulation_fraction = neural_proba.get_subpopulation_fraction(
                            n_population, true_N, subpopulation_sparsity_exp)
                    else:  # Rate case
                        population_fraction = np.array([1, 1])

                    # Generate the data from the voxel
                    true_voxel = voxel(true_scheme, population_fraction,
                                       subpopulation_fraction, true_tc)
                    n_true_features = n_population * true_N
                    weights_tmp = copy.deepcopy(
                        np.reshape(true_voxel.weights, (n_true_features, )))

                    # Allocation of the weight tensor
                    weights[k_scheme][k_true_N][fraction_counter] \
                        = copy.deepcopy(weights_tmp)

                    # LOOP OVER THE SESSIONS : simulating the response
                    for k_direction in range(n_directions):
                        for k_session in range(n_sessions):
                            # We use X to compute y order to save some computation time
                            # Temporary variables to lighten the reading
                            X_tmp = copy.deepcopy(
                                X[k_scheme][k_true_N][k_direction][k_session])
                            y_tmp = copy.deepcopy(np.matmul(
                                X_tmp, weights_tmp))

                            # Allocation of the tensor
                            y[k_scheme][k_true_N][fraction_counter][
                                k_direction][k_session] = copy.deepcopy(y_tmp)

                    fraction_counter += 1

    # Normalization for each true_N

    y_sd_all = np.zeros(
        (n_schemes, n_N, n_fractions, n_directions, n_sessions))

    for k_scheme, k_true_N, k_fraction, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_N), range(n_fractions),
            range(n_directions), range(n_sessions)):
        y_sd_all[k_scheme, k_true_N, k_fraction, k_direction,
                 k_session] = np.std(
                     y[k_scheme][k_true_N][k_fraction][k_direction][k_session])

    y_sd = np.zeros((n_schemes, n_N))

    for k_scheme, k_true_N in itertools.product(range(n_schemes), range(n_N)):
        y_sd[k_scheme, k_true_N] = np.mean(y_sd_all[k_scheme,
                                                    k_true_N, :, :, :])
        for k_fraction, k_direction, k_session in itertools.product(
                range(n_fractions), range(n_directions), range(n_sessions)):
            y[k_scheme][k_true_N][k_fraction][k_direction][
                k_session] = copy.deepcopy(
                    y[k_scheme][k_true_N][k_fraction][k_direction][k_session] /
                    y_sd[k_scheme, k_true_N])

    y_without_noise = copy.deepcopy(y)

    # Compute the amplitude of the noise
    noise_sd = np.zeros((n_schemes, n_N))
    for k_scheme, k_true_N in itertools.product(range(n_schemes), range(n_N)):
        noise_sd[k_scheme, k_true_N] = np.sqrt(
            1 / snr - 1)  # std of the added gaussian noise

    # Add the noise
    for k_scheme, k_true_N, k_fraction, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_N), range(n_fractions),
            range(n_directions), range(n_sessions)):
        y[k_scheme][k_true_N][k_fraction][k_direction][k_session] = y[k_scheme][k_true_N][k_fraction][k_direction][
                                                                        k_session] \
                                                                    + np.random.normal(0, noise_sd[k_scheme, k_true_N],
                                                                                       len(y[k_scheme][k_true_N][
                                                                                               k_fraction][k_direction][
                                                                                               k_session]))

    y_with_noise = copy.deepcopy(y)

    # High-pass filtering

    for k_scheme, k_true_N, k_fraction, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_N), range(n_fractions),
            range(n_directions), range(n_sessions)):
        y_tmp = copy.deepcopy(
            y[k_scheme][k_true_N][k_fraction][k_direction][k_session])
        N = len(y_tmp)  # Resolution of the signal
        K = 11  # Highest order of the filter
        n_grid = np.linspace(0, N - 1, N, endpoint=True)  # 1D grid over values
        k_grid = np.linspace(2, K, K - 1, endpoint=True)  # 1D grid over orders
        X_filter = np.zeros((N, K - 1))  # Constant regressor too
        for kk, k in enumerate(k_grid):
            X_filter[:,
                     kk] = np.sqrt(2 / N) * np.cos(np.pi * (2 * n_grid + 1) /
                                                   (2 * N) * (k - 1))
        y_tmp = copy.deepcopy(y_tmp - np.matmul(
            np.matmul(X_filter, np.transpose(X_filter)), y_tmp))  # Regression
        y[k_scheme][k_true_N][k_fraction][k_direction][
            k_session] = copy.deepcopy(y_tmp)

    print('y has been filtered!')
    return X, y_without_noise, y, weights
def compute_response_vector_weights(k_subject):
    '''Computes y from X and weights'''
    X = whiten_design_matrix(k_subject)

    # Creation of y from X to save computational resources
    # Initialization of the response vectors
    y = [[[[None for k_session in range(n_sessions)]
           for k_direction in range(n_directions)]
          for k_fraction in range(n_fractions)]
         for k_true_scheme in range(n_schemes)]

    # Initialization of the weights
    weights = [[None for k_fraction in range(n_fractions)]
               for k_true_scheme in range(n_schemes)]

    ### LOOP OVER THE SCHEME
    for k_true_scheme in range(n_schemes):
        true_scheme = scheme_array[k_true_scheme]
        # We replace the right value of the "t"'s according to the type of tuning curve

        if true_scheme.find('gaussian') != -1:
            true_N = int(optimal_fit_N_array[k_true_scheme])
            true_t_mu = optimal_t_mu_array[k_true_scheme]
            true_t_conf = optimal_t_conf_array[k_true_scheme]
            true_tc_type = 'gaussian'
            # Creation of the true tuning curve objects
            true_tc_mu = tuning_curve(true_tc_type, true_N, true_t_mu,
                                      tc_lower_bound_mu, tc_upper_bound_mu)
            true_tc_conf = tuning_curve(true_tc_type, true_N, true_t_conf,
                                        tc_lower_bound_conf,
                                        tc_upper_bound_conf)

        elif true_scheme.find('sigmoid') != -1:
            true_N = int(optimal_fit_N_array[k_true_scheme])
            true_t_mu = optimal_t_mu_array[k_true_scheme]
            true_t_conf = optimal_t_conf_array[k_true_scheme]
            true_tc_type = 'sigmoid'
            # Creation of the true tuning curve objects
            true_tc_mu = tuning_curve(true_tc_type, true_N, true_t_mu,
                                      tc_lower_bound_mu, tc_upper_bound_mu)
            true_tc_conf = tuning_curve(true_tc_type, true_N, true_t_conf,
                                        tc_lower_bound_conf,
                                        tc_upper_bound_conf)

        # We consider combinations of population fractions for PPC and rate codes
        if true_scheme.find('ppc') != -1 or true_scheme.find('rate') != -1:
            # The number of population fraction tested (related to W)
            population_fraction_array = copy.deepcopy(
                between_population_sparsity_array)
        elif true_scheme.find('dpc') != -1:  # DPC case
            population_fraction_array = copy.deepcopy(np.array([[1]]))
        n_population_fractions = len(population_fraction_array)

        if true_scheme.find('ppc') != -1:
            true_tc = [true_tc_mu, true_tc_conf]
        elif true_scheme.find('dpc') != -1:
            true_tc = [true_tc_mu]
        elif true_scheme.find('rate') != -1:
            true_tc = []

        ### LOOP OVER THE SUBJECTS
        for k_subject in range(n_subjects):

            ### LOOP OVER THE W's
            # The number of subpopulation fractions acc. to the scheme
            n_subpopulation_fractions = int(n_fractions /
                                            n_population_fractions)
            fraction_counter = 0
            for k_subpopulation_fraction in range(n_subpopulation_fractions):
                for k_population_fraction, population_fraction in enumerate(
                        population_fraction_array):
                    # The number of populations acc. to the scheme (2 for PPC and rate, 1 for DPC)
                    n_population = len(population_fraction)
                    if true_scheme.find('ppc') != -1 or true_scheme.find(
                            'dpc') != -1:
                        # We consider one sparsity per remainder value of the counter divided by the number
                        # of combinations to be tested

                        subpopulation_sparsity_exp = sparsity_exp_array[
                            fraction_counter % n_sparsity_exp]
                        # Fraction of each neural subpopulation
                        subpopulation_fraction = neural_proba.get_subpopulation_fraction(
                            n_population, true_N, subpopulation_sparsity_exp)
                    elif true_scheme.find('rate') != -1:  # Rate case
                        subpopulation_fraction = np.array([[1.0], [1.0]])

                    # Generate the data from the voxel
                    true_voxel = voxel(true_scheme, population_fraction,
                                       subpopulation_fraction, true_tc)
                    n_true_features = n_population * len(
                        subpopulation_fraction[0])
                    weights_tmp = np.reshape(true_voxel.weights,
                                             (n_true_features, ))

                    # Allocation of the weight tensor
                    weights[k_true_scheme][fraction_counter] \
                        = copy.deepcopy(weights_tmp)

                    ### LOOP OVER THE DIRECTIONS
                    for k_direction in range(n_directions):
                        ### LOOP OVER THE SESSIONS : simulating the response
                        for k_session in range(n_sessions):
                            # We use X to compute y in order to save some computation time
                            # Temporary variables to lighten the reading
                            y[k_true_scheme][fraction_counter][k_direction][
                                k_session] = copy.deepcopy(
                                    np.matmul(
                                        X[k_true_scheme][k_direction]
                                        [k_session], weights_tmp))

                    fraction_counter += 1

    # y_without_noise = copy.deepcopy(y)

    # Normalization for each scheme

    y_sd_all = np.zeros((n_schemes, n_fractions, n_directions, n_sessions))

    for k_true_scheme, k_fraction, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_fractions), range(n_directions),
            range(n_sessions)):
        y_sd_all[k_true_scheme, k_fraction, k_direction, k_session] = np.std(
            y[k_true_scheme][k_fraction][k_direction][k_session])

    y_sd = np.zeros(n_schemes)

    for k_true_scheme in range(n_schemes):
        y_sd[k_true_scheme] = np.mean(y_sd_all[k_true_scheme, :, :, :])
        for k_fraction, k_direction, k_session in itertools.product(
                range(n_fractions), range(n_directions), range(n_sessions)):
            y[k_true_scheme][k_fraction][k_direction][
                k_session] = copy.deepcopy(
                    y[k_true_scheme][k_fraction][k_direction][k_session] /
                    y_sd[k_true_scheme])

        for k_fraction in range(n_fractions):
            weights[k_true_scheme][k_fraction] = copy.deepcopy(
                weights[k_true_scheme][k_fraction] / y_sd[k_true_scheme])

    y_without_noise = copy.deepcopy(y)

    # Compute the amplitude of the noise
    noise_sd = np.zeros(n_schemes)
    for k_true_scheme in range(n_schemes):
        noise_sd[k_true_scheme] = np.sqrt(1 / snr -
                                          1)  # std of the added gaussian noise
    print(noise_sd)

    # Add the noise
    for k_true_scheme, k_fraction, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_fractions), range(n_directions),
            range(n_sessions)):
        y[k_true_scheme][k_fraction][k_direction][
            k_session] = y[k_true_scheme][k_fraction][k_direction][
                k_session] + np.random.normal(
                    0, noise_sd[k_true_scheme],
                    len(y[k_true_scheme][k_fraction][k_direction][k_session]))

    # y_with_noise = copy.deepcopy(y)

    # Create the filtering design matrices and filters out the response

    for k_true_scheme, k_fraction, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_fractions), range(n_directions),
            range(n_sessions)):
        y_tmp = copy.deepcopy(
            y[k_true_scheme][k_fraction][k_direction][k_session])
        N = len(y_tmp)  # Resolution of the signal
        K = 11  # Highest order of the filter
        n_grid = np.linspace(0, N - 1, N, endpoint=True)  # 1D grid over values
        k_grid = np.linspace(2, K, K - 1, endpoint=True)  # 1D grid over orders
        X_filter = np.zeros((N, K - 1))
        for kk, k in enumerate(k_grid):
            X_filter[:,
                     kk] = np.sqrt(2 / N) * np.cos(np.pi * (2 * n_grid + 1) /
                                                   (2 * N) * (k - 1))
        y_tmp = copy.deepcopy(y_tmp - np.matmul(
            np.matmul(X_filter, np.transpose(X_filter)), y_tmp))  # Regression
        y[k_true_scheme][k_fraction][k_direction][k_session] = copy.deepcopy(
            y_tmp)

    # y_after_filtering = copy.deepcopy(y)
    print('Response vectors and weights computed!')
    return X, y_without_noise, y, weights
def create_design_matrix(k_subject):
    '''Creates the design matrix for each subject from the ideal observer model output'''

    # Initialization of the design matrices and their zscore versions
    X = [[[None for k_session in range(n_sessions)]
          for k_direction in range(n_directions)]
         for k_fit_scheme in range(n_schemes)]

    ### WE BEGIN BY CREATING THE DESIGN MATRIX X
    start = time.time()

    # Experimental design information
    eps = 1e-5  # For floating points issues
    initial_time = between_stimuli_duration + eps
    final_time_tmp = between_stimuli_duration * (n_stimuli + 1) + eps
    # Every 15+-3 trials : one interruption of 8-12s
    stimulus_onsets = np.linspace(initial_time, final_time_tmp, n_stimuli)
    # We add some time to simulate breaks
    stimulus = 0

    while True:
        # Number of regularly spaced stimuli
        n_local_regular_stimuli = rand.randint(min_n_local_regular_stimuli,
                                               max_n_local_regular_stimuli)
        stimulus_shifted = stimulus + n_local_regular_stimuli  # Current stimulus before the break
        if stimulus_shifted > n_stimuli:  # The next break is supposed to occur after all stimuli are shown
            break
        stimulus_onsets[stimulus_shifted:] += rand.randint(
            min_break_time, max_break_time
        ) - between_stimuli_duration  # We consider a break of 8-12s
        stimulus = stimulus_shifted

    stimulus_durations = dt * np.ones_like(
        stimulus_onsets)  # Dirac-like stimuli

    # fMRI information
    final_time = stimulus_onsets[-1]
    final_frame_time = final_time + final_frame_offset

    initial_scan_time = initial_frame_time + between_scans_duration
    final_scan_time = final_time + final_scan_offset
    scan_times = np.arange(initial_scan_time, final_scan_time,
                           between_scans_duration)

    ### Loop over the directions:
    for k_direction in range(n_directions):
        ### Loop over the sessions : we start with it in order to have the same length whatever N_fit is
        for k_session in range(n_sessions):
            # Get the data of interest
            if directions[k_direction, k_session] == 0:
                mu = p1_mu_array[k_subject, k_session, :n_stimuli]
                dist = p1_dist_array[k_subject, k_session, :, :n_stimuli]
            else:
                mu = 1 - p1_mu_array[k_subject, k_session, :n_stimuli]
                dist = np.flipud(p1_dist_array[k_subject,
                                               k_session, :, :n_stimuli])
            sigma = p1_sd_array[k_subject, k_session, :n_stimuli]
            conf = -np.log(sigma)

            # Formatting
            simulated_distrib = [None for k in range(n_stimuli)]
            for k in range(n_stimuli):
                # Normalization of the distribution
                norm_dist = dist[:, k] * (len(dist[1:, k]) - 1) / np.sum(
                    dist[1:, k])
                simulated_distrib[k] = distrib(mu[k], sigma[k], norm_dist)

            # Creation of fmri object
            simu_fmri = fmri(initial_frame_time, final_frame_time, dt,
                             scan_times)

            # Creation of experiment object
            exp = experiment(initial_time, final_time, n_sessions,
                             stimulus_onsets, stimulus_durations,
                             simulated_distrib)

            ### LOOP OVER THE SCHEME
            for k_fit_scheme in range(n_schemes):

                # Current schemes
                fit_scheme = scheme_array[k_fit_scheme]

                # We replace the right value of the "t"'s according to the type of tuning curve and the N
                if fit_scheme.find('gaussian') != -1:
                    fit_N = optimal_fit_N_array[k_fit_scheme]
                    fit_t_mu = optimal_t_mu_array[k_fit_scheme]
                    fit_t_conf = optimal_t_conf_array[k_fit_scheme]

                    fit_tc_type = 'gaussian'
                    # Creation of the true tuning curve objects
                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                elif fit_scheme.find('sigmoid') != -1:
                    fit_N = optimal_fit_N_array[k_fit_scheme]
                    fit_t_mu = optimal_t_mu_array[k_fit_scheme]
                    fit_t_conf = optimal_t_conf_array[k_fit_scheme]

                    fit_tc_type = 'sigmoid'
                    # Creation of the true tuning curve objects
                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                if fit_scheme.find('ppc') != -1:
                    fit_tc = [fit_tc_mu, fit_tc_conf]
                elif fit_scheme.find('dpc') != -1:
                    fit_tc = [fit_tc_mu]
                elif fit_scheme.find('rate') != -1:
                    fit_tc = []

                # Regressor and BOLD computation
                X[k_fit_scheme][k_direction][
                    k_session] = simu_fmri.get_regressor(
                        exp, fit_scheme, fit_tc)

                # Rescale the regressors for rate code
                if fit_scheme.find('rate') != -1:
                    X[k_fit_scheme][k_direction][
                        k_session][:, 1] = mu_sd / conf_sd * X[k_fit_scheme][
                            k_direction][k_session][:, 1]

    end = time.time()
    print('Design matrix creation : Subject n' + str(k_subject) +
          ' is done ! Time elapsed : ' + str(end - start) + 's')
    return X
    # We consider combinations of population fractions for PPC and rate codes
    if true_scheme.find('ppc') != -1 or true_scheme.find('rate') != -1:
        # The number of population fraction tested (related to W)
        population_fraction_array = copy.deepcopy(
            np.array([[0.5, 0.5], [0.25, 0.75], [0, 1], [0.75, 0.25], [1, 0]]))
    elif true_scheme.find('dpc') != -1:  # DPC case
        population_fraction_array = copy.deepcopy(np.array([[1]]))
    n_population_fractions = len(population_fraction_array)

    ### LOOP OVER N_true
    for k_true_N in range(n_N):
        true_N = N_array[k_true_N]
        # Creation of the true tuning curve objects
        true_t_mu = true_t_mu_array[k_true_N]
        true_t_conf = true_t_conf_array[k_true_N]
        true_tc_mu = tuning_curve(true_tc_type, true_N, true_t_mu,
                                  tc_lower_bound_mu, tc_upper_bound_mu)
        true_tc_conf = tuning_curve(true_tc_type, true_N, true_t_conf,
                                    tc_lower_bound_conf, tc_upper_bound_conf)

        if true_scheme.find('ppc') != -1:
            true_tc = [true_tc_mu, true_tc_conf]
        elif true_scheme.find('dpc') != -1:
            true_tc = [true_tc_mu]
        elif true_scheme.find('rate') != -1:
            true_tc = []
        ### LOOP OVER THE SUBJECTS
        for k_subject in range(n_subjects):
            ### LOOP OVER THE W's
            # The number of subpopulation fractions acc. to the scheme
            n_subpopulation_fractions = int(n_fractions /
                                            n_population_fractions)
Exemplo n.º 6
0
                    # Creation of the true tuning curve objects

                    # We replace the right value of the "t"'s according to the type of tuning curve and the N
                    if fit_scheme.find('gaussian') != -1:
                        fit_t_mu = t_mu_gaussian_array[k_fit_N]
                        fit_t_conf = t_conf_gaussian_array[k_fit_N]
                        fit_tc_type = 'gaussian'

                    elif fit_scheme.find('sigmoid') != -1:
                        fit_t_mu = t_mu_sigmoid_array[k_fit_N]
                        fit_t_conf = t_conf_sigmoid_array[k_fit_N]
                        fit_tc_type = 'sigmoid'

                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                    if fit_scheme.find('ppc') != -1:
                        fit_tc = [fit_tc_mu, fit_tc_conf]
                    elif fit_scheme.find('dpc') != -1:
                        fit_tc = [fit_tc_mu]
                    elif fit_scheme.find('rate') != -1:
                        fit_tc = []

                    # Regressor and BOLD computation
                    X[k_fit_scheme][k_fit_N][k_subject][
                        k_session] = simu_fmri.get_regressor(
# Computes the signal
rate_activity = rate_voxel.generate_activity(simulated_distrib, mu_sd, conf_sd)

### 2) PPC simulation

# Properties of the voxel to be simulated
coding_scheme = 'ppc'
population_fraction = np.array([0.5, 0.5
                                ])  # Population fraction (one mean, one std)
# TC related to the mean
tc_type_mu = scheme_array[k_scheme]  # Tuning curve type
N_mu = N_array[k_N]  # Number of tuning curves
t_mu = 0.2 * t_mu_array[k_N]  # The best value from the previous "sum" analysis
# Creates the tuning_curve object
tc_mu = tuning_curve(tc_type_mu, N_mu, t_mu, tc_lower_bound_mu,
                     tc_upper_bound_mu)

# TC related to the uncertainty
tc_type_conf = scheme_array[k_scheme]  # Tuning curve type
N_conf = N_array[k_N]  # Number of tuning curves
t_conf = t_conf_array[k_N]  # The best value from the previous "sum" analysis
# Creates the tuning_curve object
tc_conf = tuning_curve(tc_type_conf, N_conf, t_conf, tc_lower_bound_conf,
                       tc_upper_bound_conf)

# Subpopulation fraction random creation (we assume N_mu=N_conf)
subpopulation_fraction = neural_proba.get_subpopulation_fraction(
    len(population_fraction), N_mu)

# Creation of the "ppc voxel"
ppc_voxel = voxel(coding_scheme, population_fraction, subpopulation_fraction,