def generate_prediction(self, x, y, sigma, n, beta, baseline): # generate the RF rf = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y) # normalize by the integral rf /= ((2 * np.pi * sigma**2) * 1 / np.diff(self.stimulus.deg_x[0, 0:2])**2) # extract the stimulus time-series response = generate_rf_timeseries_nomask(self.stimulus.stim_arr, rf) # compression response **= n # convolve with the HRF hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it with the stimulus model = fftconvolve(response, hrf)[0:len(response)] # units model /= np.max(model) # offset model += baseline # scale it by beta model *= beta return model
def generate_ballpark_prediction(self, x, y, sigma, n): # generate the RF rf = generate_og_receptive_field(x, y, sigma,self.stimulus.deg_x0, self.stimulus.deg_y0) # normalize by the integral rf /= ((2 * np.pi * sigma**2) * 1/np.diff(self.stimulus.deg_x0[0,0:2])**2) # extract the stimulus time-series response = generate_rf_timeseries_nomask(self.stimulus.stim_arr0, rf) # compression response **= n # convolve with the HRF hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it with the stimulus model = fftconvolve(response, hrf)[0:len(response)] # units model = (model - np.mean(model)) / np.mean(model) # regress it p = linregress(model, self.data) # offset model += p[1] # scale it model *= np.abs(p[0]) return model
def generate_prediction(self, x, y, sigma, n, beta, baseline): # generate the RF rf = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y) # normalize by the integral rf /= ((2 * np.pi * sigma**2) * 1/np.diff(self.stimulus.deg_x[0,0:2])**2) # extract the stimulus time-series response = generate_rf_timeseries_nomask(self.stimulus.stim_arr, rf) # compression response **= n # convolve with the HRF hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it with the stimulus model = fftconvolve(response, hrf)[0:len(response)] # convert units model = (model - np.mean(model)) / np.mean(model) # offset model += baseline # scale it by beta model *= beta return model
def generate_prediction(self, x, y, sigma, beta, baseline, hrf, nr_TRs): # generate the RF rf = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y) # normalize by the integral rf /= ((2 * np.pi * sigma**2) * 1 / np.diff(self.stimulus.deg_x[0, 0:2])**2) # extract the stimulus time-series response = generate_rf_timeseries_nomask(self.stimulus.stim_arr, rf) # convolve HRF with the stimulus model = fftconvolve(response, hrf)[0:len(response)] # resample to TR (because hrf and stim in sample frequency) model = signal.resample(model, num=nr_TRs, axis=0) # units model /= np.max(model) # offset model += baseline # scale it by beta model *= beta return model
def generate_prediction(self, x, y, sigma, sigma_ratio, volume_ratio): # extract the center response rf_center = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y) # extract surround response rf_surround = generate_og_receptive_field( x, y, sigma * sigma_ratio, self.stimulus.deg_x, self.stimulus.deg_y) * 1 / sigma_ratio**2 # difference rf = rf_center - np.sqrt(volume_ratio) * rf_surround # extract the response response = generate_rf_timeseries_nomask(self.stimulus.stim_arr, rf) # generate the hrf hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it model = fftconvolve(response, hrf)[0:len(response)] return model
def generate_prediction(self, x, y, sigma, n, beta, baseline, unscaled=False): # generate the RF rf = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y) # normalize by the integral rf /= ((2 * np.pi * sigma**2) * 1 / np.diff(self.stimulus.deg_x[0, 0:2])**2) # extract the stimulus time-series response = generate_rf_timeseries_nomask(self.stimulus.stim_arr, rf) # compression response **= n # convolve with the HRF hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it with the stimulus model = fftconvolve(response, hrf)[0:len(response)] # units model /= np.max(model) # at this point, add filtering with a savitzky-golay filter model_drift = savgol_filter(model, window_length=self.window, polyorder=self.sg_filter_order, deriv=0, mode='nearest') # demain model_drift, so baseline parameter is still interpretable model_drift_demeaned = model_drift - np.mean(model_drift) # and apply to data model -= model_drift_demeaned # offset model += baseline # scale it by beta model *= beta return model
def generate_prediction(self, x, y, sigma, sigma_ratio, volume_ratio): # extract the center response rf_center = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y) # extract surround response rf_surround = generate_og_receptive_field(x, y, sigma*sigma_ratio, self.stimulus.deg_x, self.stimulus.deg_y) * 1/sigma_ratio**2 # difference rf = rf_center - np.sqrt(volume_ratio)*rf_surround # extract the response response = generate_rf_timeseries_nomask(self.stimulus.stim_arr, rf) # generate the hrf hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it model = fftconvolve(response, hrf)[0:len(response)] return model
def generate_ballpark_prediction(self, x, y, sigma, n, beta, baseline): # generate the RF rf = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x0, self.stimulus.deg_y0) # normalize by the integral rf /= ((2 * np.pi * sigma**2) * 1 / np.diff(self.stimulus.deg_x0[0, 0:2])**2) # extract the stimulus time-series response = generate_rf_timeseries_nomask(self.stimulus.stim_arr0, rf) # compression response **= n # convolve with the HRF hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length) # convolve it with the stimulus model = fftconvolve(response, hrf)[0:len(response)] # at this point, add filtering with a savitzky-golay filter model = model - savgol_filter( model, window_length=self.sg_filter_window_length, polyorder=self.sg_filter_order, deriv=0, mode='nearest') # scale it by beta model *= beta # offset model += baseline return model