Ejemplo n.º 1
0
def create_design_matrix(k_subject):
    '''Creation of X per subject'''

    start = time.time()
    X = [[[[None for k_session in range(n_sessions)]
           for k_direction in range(n_directions)] for k_fit_N in range(n_N)]
         for k_fit_scheme in range(n_schemes)]

    X_tmp = [[[[None for k_session in range(n_sessions)]
               for k_direction in range(n_directions)]
              for k_fit_N in range(n_N)] for k_fit_scheme in range(n_schemes)]

    print('X is initialised!')

    # Experimental design information
    eps = 1e-5  # For floating points issues

    between_stimuli_duration = 1.3
    initial_time = between_stimuli_duration + eps
    final_time_tmp = between_stimuli_duration * (n_stimuli + 1) + eps
    # Every 15+-3 trials : one interruption of 8-12s
    stimulus_onsets = np.linspace(initial_time, final_time_tmp, n_stimuli)
    # We add some time to simulate breaks
    stimulus = 0

    while True:
        # Number of regularly spaced stimuli
        n_local_regular_stimuli = rand.randint(12, 18)
        stimulus_shifted = stimulus + n_local_regular_stimuli  # Current stimulus before the break
        if stimulus_shifted > n_stimuli:  # The next break is supposed to occur after all stimuli are shown
            break
        stimulus_onsets[stimulus_shifted:] += rand.randint(
            8, 12) - between_stimuli_duration  # We consider a break of 8-12s
        stimulus = stimulus_shifted

    dt = 0.125  # Temporal resolution of the fMRI scanner

    stimulus_durations = dt * np.ones_like(
        stimulus_onsets)  # Dirac-like stimuli

    # fMRI information
    final_time = stimulus_onsets[-1]
    final_frame_offset = 10  # Frame recording duration after the last stimulus has been shown
    initial_frame_time = 0
    final_frame_time = final_time + final_frame_offset

    between_scans_duration = 2  # in seconds
    final_scan_offset = 10  # Scan recording duration after the last stimulus has been shown
    initial_scan_time = initial_frame_time + between_scans_duration
    final_scan_time = final_time + final_scan_offset
    scan_times = np.arange(initial_scan_time, final_scan_time,
                           between_scans_duration)

    # Loop over the directions
    for k_direction in range(n_directions):

        # Loop over the sessions : we start with it in order to have the same length whatever N_fit is
        for k_session in range(n_sessions):

            # Get the data of interest
            if directions[k_direction, k_session] == 0:
                mu = p1_mu_array[k_subject, k_session, :n_stimuli]
                dist = p1_dist_array[k_subject, k_session, :, :n_stimuli]
            else:
                mu = 1 - (p1_mu_array[k_subject, k_session, :n_stimuli])
                dist = np.flipud(p1_dist_array[k_subject,
                                               k_session, :, :n_stimuli])

            sigma = p1_sd_array[k_subject, k_session, :n_stimuli]
            conf = -np.log(sigma)

            # Formatting
            simulated_distrib = [None for k in range(n_stimuli)]
            for k in range(n_stimuli):
                # Normalization of the distribution
                norm_dist = dist[:, k] * (len(dist[1:, k]) - 1) / np.sum(
                    dist[1:, k])
                simulated_distrib[k] = distrib(mu[k], sigma[k], norm_dist)

            # Creation of fmri object
            simu_fmri = fmri(initial_frame_time, final_frame_time, dt,
                             scan_times)

            # Creation of experiment object
            exp = experiment(initial_time, final_time, n_sessions,
                             stimulus_onsets, stimulus_durations,
                             simulated_distrib)

            # LOOP OVER THE SCHEME
            for k_fit_scheme in range(n_schemes):

                # Current schemes
                fit_scheme = scheme_array[k_fit_scheme]

                # LOOP OVER THE FIT N's
                for k_fit_N in range(n_N):

                    # Current N
                    fit_N = N_array[k_fit_N]

                    # Creation of the true tuning curve objects

                    # We replace the right value of the "t"'s according to the type of tuning curve and the N
                    if fit_scheme.find('gaussian') != -1:
                        fit_t_mu = t_mu_gaussian_array[k_fit_N]
                        fit_t_conf = t_conf_gaussian_array[k_fit_N]
                        fit_tc_type = 'gaussian'

                    elif fit_scheme.find('sigmoid') != -1:
                        fit_t_mu = t_mu_sigmoid_array[k_fit_N]
                        fit_t_conf = t_conf_sigmoid_array[k_fit_N]
                        fit_tc_type = 'sigmoid'

                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                    if fit_scheme.find('ppc') != -1:
                        fit_tc = [fit_tc_mu, fit_tc_conf]
                    elif fit_scheme.find('dpc') != -1:
                        fit_tc = [fit_tc_mu]
                    elif fit_scheme.find('rate') != -1:
                        fit_tc = []

                    # Regressor and BOLD computation
                    X_tmp[k_fit_scheme][k_fit_N][k_direction][
                        k_session] = simu_fmri.get_regressor(
                            exp, fit_scheme, fit_tc)
                    # Just to have Xz with np array of the right structure

    # We create the design matrix X for each subject and end initializing the zscore version
    for k_fit_scheme, k_fit_N, k_direction, k_session in itertools.product(
            range(n_schemes), range(n_N), range(n_directions),
            range(n_sessions)):
        X[k_fit_scheme][k_fit_N][k_direction][k_session] = copy.deepcopy(
            X_tmp[k_fit_scheme][k_fit_N][k_direction][k_session])

    end = time.time()
    print('Design matrix creation : Subject n' + str(k_subject) +
          ' is done ! Time elapsed : ' + str(end - start) + 's')
    return X
Ejemplo n.º 2
0
        for k_session in range(n_sessions):
            # k_session = 0

            # Get the data of interest
            mu = p1_mu_array[k_subject, k_session, :n_stimuli]
            sigma = p1_sd_array[k_subject, k_session, :n_stimuli]
            conf = -np.log(sigma)
            dist = p1_dist_array[k_subject, k_session, :, :n_stimuli]

            # Formatting
            simulated_distrib = [None for k in range(n_stimuli)]
            for k in range(n_stimuli):
                # Normalization of the distribution
                norm_dist = dist[:, k] * (len(dist[1:, k]) - 1) / np.sum(
                    dist[1:, k])
                simulated_distrib[k] = distrib(mu[k], sigma[k], norm_dist)

            # Experimental design information
            eps = 1e-5  # For floating points issues

            between_stimuli_duration = 1.3
            initial_time = between_stimuli_duration + eps
            final_time_tmp = between_stimuli_duration * (n_stimuli + 1) + eps
            # Every 15+-3 trials : one interruption of 8-12s
            stimulus_onsets = np.linspace(initial_time, final_time_tmp,
                                          n_stimuli)
            # We add some time to simulate breaks
            stimulus = 0

            while True:
                # Number of regularly spaced stimuli
def create_design_matrix(k_subject):
    '''Creates the design matrix for each subject from the ideal observer model output'''

    # Initialization of the design matrices and their zscore versions
    X = [[[None for k_session in range(n_sessions)]
          for k_direction in range(n_directions)]
         for k_fit_scheme in range(n_schemes)]

    ### WE BEGIN BY CREATING THE DESIGN MATRIX X
    start = time.time()

    # Experimental design information
    eps = 1e-5  # For floating points issues
    initial_time = between_stimuli_duration + eps
    final_time_tmp = between_stimuli_duration * (n_stimuli + 1) + eps
    # Every 15+-3 trials : one interruption of 8-12s
    stimulus_onsets = np.linspace(initial_time, final_time_tmp, n_stimuli)
    # We add some time to simulate breaks
    stimulus = 0

    while True:
        # Number of regularly spaced stimuli
        n_local_regular_stimuli = rand.randint(min_n_local_regular_stimuli,
                                               max_n_local_regular_stimuli)
        stimulus_shifted = stimulus + n_local_regular_stimuli  # Current stimulus before the break
        if stimulus_shifted > n_stimuli:  # The next break is supposed to occur after all stimuli are shown
            break
        stimulus_onsets[stimulus_shifted:] += rand.randint(
            min_break_time, max_break_time
        ) - between_stimuli_duration  # We consider a break of 8-12s
        stimulus = stimulus_shifted

    stimulus_durations = dt * np.ones_like(
        stimulus_onsets)  # Dirac-like stimuli

    # fMRI information
    final_time = stimulus_onsets[-1]
    final_frame_time = final_time + final_frame_offset

    initial_scan_time = initial_frame_time + between_scans_duration
    final_scan_time = final_time + final_scan_offset
    scan_times = np.arange(initial_scan_time, final_scan_time,
                           between_scans_duration)

    ### Loop over the directions:
    for k_direction in range(n_directions):
        ### Loop over the sessions : we start with it in order to have the same length whatever N_fit is
        for k_session in range(n_sessions):
            # Get the data of interest
            if directions[k_direction, k_session] == 0:
                mu = p1_mu_array[k_subject, k_session, :n_stimuli]
                dist = p1_dist_array[k_subject, k_session, :, :n_stimuli]
            else:
                mu = 1 - p1_mu_array[k_subject, k_session, :n_stimuli]
                dist = np.flipud(p1_dist_array[k_subject,
                                               k_session, :, :n_stimuli])
            sigma = p1_sd_array[k_subject, k_session, :n_stimuli]
            conf = -np.log(sigma)

            # Formatting
            simulated_distrib = [None for k in range(n_stimuli)]
            for k in range(n_stimuli):
                # Normalization of the distribution
                norm_dist = dist[:, k] * (len(dist[1:, k]) - 1) / np.sum(
                    dist[1:, k])
                simulated_distrib[k] = distrib(mu[k], sigma[k], norm_dist)

            # Creation of fmri object
            simu_fmri = fmri(initial_frame_time, final_frame_time, dt,
                             scan_times)

            # Creation of experiment object
            exp = experiment(initial_time, final_time, n_sessions,
                             stimulus_onsets, stimulus_durations,
                             simulated_distrib)

            ### LOOP OVER THE SCHEME
            for k_fit_scheme in range(n_schemes):

                # Current schemes
                fit_scheme = scheme_array[k_fit_scheme]

                # We replace the right value of the "t"'s according to the type of tuning curve and the N
                if fit_scheme.find('gaussian') != -1:
                    fit_N = optimal_fit_N_array[k_fit_scheme]
                    fit_t_mu = optimal_t_mu_array[k_fit_scheme]
                    fit_t_conf = optimal_t_conf_array[k_fit_scheme]

                    fit_tc_type = 'gaussian'
                    # Creation of the true tuning curve objects
                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                elif fit_scheme.find('sigmoid') != -1:
                    fit_N = optimal_fit_N_array[k_fit_scheme]
                    fit_t_mu = optimal_t_mu_array[k_fit_scheme]
                    fit_t_conf = optimal_t_conf_array[k_fit_scheme]

                    fit_tc_type = 'sigmoid'
                    # Creation of the true tuning curve objects
                    fit_tc_mu = tuning_curve(fit_tc_type, fit_N, fit_t_mu,
                                             tc_lower_bound_mu,
                                             tc_upper_bound_mu)
                    fit_tc_conf = tuning_curve(fit_tc_type, fit_N, fit_t_conf,
                                               tc_lower_bound_conf,
                                               tc_upper_bound_conf)

                if fit_scheme.find('ppc') != -1:
                    fit_tc = [fit_tc_mu, fit_tc_conf]
                elif fit_scheme.find('dpc') != -1:
                    fit_tc = [fit_tc_mu]
                elif fit_scheme.find('rate') != -1:
                    fit_tc = []

                # Regressor and BOLD computation
                X[k_fit_scheme][k_direction][
                    k_session] = simu_fmri.get_regressor(
                        exp, fit_scheme, fit_tc)

                # Rescale the regressors for rate code
                if fit_scheme.find('rate') != -1:
                    X[k_fit_scheme][k_direction][
                        k_session][:, 1] = mu_sd / conf_sd * X[k_fit_scheme][
                            k_direction][k_session][:, 1]

    end = time.time()
    print('Design matrix creation : Subject n' + str(k_subject) +
          ' is done ! Time elapsed : ' + str(end - start) + 's')
    return X
min_mu = 0.25
max_mu = 0.8

min_conf = 1.8
max_conf = 2.6

mu = np.linspace(min_mu, max_mu, n_mu)
conf = np.linspace(min_conf, max_conf,
                   n_conf)  # We don't go to the max to have bell shaped beta

# Creation of a list of simulated distributions
simulated_distrib = [[None for j in range(n_conf)] for i in range(n_mu)]

for k_mu in range(n_mu):
    for k_conf in range(n_conf):
        simulated_distrib[k_mu][k_conf] = distrib(mu[k_mu],
                                                  np.exp(-conf[k_conf]))

# # Plots the distribution
# fig = plt.figure()
# k_mu = -1
# k_conf = -1
# x = np.linspace(0, 1, 100)
# y = simulated_distrib[k_mu][k_conf].beta(x)
# plt.plot(x, y)    # Full distribution
# plt.show()

# Resolution of the continuous plots
plot_resolution = n_mu  # we consider the same resolution as for DPC

# we define the x-axis for varying mean
x_mu = np.linspace(np.min(mu), np.max(mu), plot_resolution)