def _transform(self, var, model='spm', derivative=False, dispersion=False, fir_delays=None): model = model.lower() if isinstance(var, SparseRunVariable): sr = self.collection.sampling_rate var = var.to_dense(sr) df = var.to_df(entities=False) onsets = df['onset'].values vals = df[['onset', 'duration', 'amplitude']].values.T if model in ['spm', 'glover']: if derivative: model += ' + derivative' if dispersion: model += ' + dispersion' elif model != 'fir': raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") convolved = hrf.compute_regressor(vals, model, onsets, fir_delays=fir_delays, min_onset=0) return DenseRunVariable(var.name, convolved[0], var.run_info, var.source, var.sampling_rate)
def _transform(self, var, model='spm', derivative=False, dispersion=False, fir_delays=None): model = model.lower() df = var.to_df(entities=False) if isinstance(var, SparseRunVariable): sampling_rate = self.collection.sampling_rate dur = var.get_duration() resample_frames = np.linspace(0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False) else: resample_frames = df['onset'].values sampling_rate = var.sampling_rate vals = df[['onset', 'duration', 'amplitude']].values.T if model in ['spm', 'glover']: if derivative: model += ' + derivative' if dispersion: model += ' + dispersion' elif model != 'fir': raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") # Minimum interval between event onsets/duration # Used to compute oversampling factor to prevent information loss unique_onsets = np.unique(np.sort(df.onset)) if len(unique_onsets) > 1: min_interval = min( np.ediff1d(unique_onsets).min(), df.duration.min()) oversampling = np.ceil(2 * (1 / (min_interval * sampling_rate))) else: oversampling = 2 convolved = hrf.compute_regressor(vals, model, resample_frames, fir_delays=fir_delays, min_onset=0, oversampling=oversampling) return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info, source=var.source, sampling_rate=sampling_rate)
def _transform(self, var, model='spm', derivative=False, dispersion=False, fir_delays=None): model = model.lower() df = var.to_df(entities=False) if isinstance(var, SparseRunVariable): sampling_rate = self.collection.sampling_rate dur = var.get_duration() resample_frames = np.linspace(0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False) safety = 2 # Double frequency to resolve events else: resample_frames = df['onset'].values sampling_rate = var.sampling_rate safety = 1 # Maximum signal resolution is already 0.5 * SR vals = df[['onset', 'duration', 'amplitude']].values.T if model in ['spm', 'glover']: if derivative: model += ' + derivative' if dispersion: model += ' + dispersion' elif model != 'fir': raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") # Sampling at >100Hz will never be useful, but can be wildly expensive max_freq, min_interval = 100, 0.01 # Sampling at <1Hz can degrade signals min_freq, max_interval = 1, 1 # Given the sampling rate, determine an oversampling factor to ensure that # events can be modeled with reasonable precision unique_onsets = np.unique(df.onset) unique_durations = np.unique(df.duration) # Align existing data ticks with, event onsets and offsets, up to ms resolution # Note that GCD ignores zeros, so 0 onsets and impulse responses (0 durations) do # not harm this. required_resolution = _fractional_gcd(np.concatenate( (unique_onsets, unique_durations)), res=min_interval) # Bound the effective sampling rate between min_freq and max_freq effective_sr = max(min_freq, min(safety / required_resolution, max_freq)) convolved = hrf.compute_regressor(vals, model, resample_frames, fir_delays=fir_delays, min_onset=0, oversampling=np.ceil(effective_sr / sampling_rate)) return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info, source=var.source, sampling_rate=sampling_rate)