def _transform(self, var, model='spm', derivative=False, dispersion=False, fir_delays=None): model = model.lower() if isinstance(var, SparseRunVariable): sr = self.collection.sampling_rate var = var.to_dense(sr) df = var.to_df(entities=False) onsets = df['onset'].values vals = df[['onset', 'duration', 'amplitude']].values.T if model in ['spm', 'glover']: if derivative: model += ' + derivative' if dispersion: model += ' + dispersion' elif model != 'fir': raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") convolved = hrf.compute_regressor(vals, model, onsets, fir_delays=fir_delays, min_onset=0) return DenseRunVariable(var.name, convolved[0], var.run_info, var.source, var.sampling_rate)
def generate_DEV(name='test', sr=20, duration=480): n = duration * sr values = np.random.normal(size=n) ent_names = ['task', 'run', 'session', 'subject'] entities = {e: uuid.uuid4().hex for e in ent_names} image = uuid.uuid4().hex + '.nii.gz' run_info = RunInfo(entities, duration, 2, image) return DenseRunVariable('test', values, run_info, 'dummy', sr)
def _transform(self, var, model='spm', derivative=False, dispersion=False, fir_delays=None): model = model.lower() df = var.to_df(entities=False) if isinstance(var, SparseRunVariable): sampling_rate = self.collection.sampling_rate dur = var.get_duration() resample_frames = np.linspace(0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False) else: resample_frames = df['onset'].values sampling_rate = var.sampling_rate vals = df[['onset', 'duration', 'amplitude']].values.T if model in ['spm', 'glover']: if derivative: model += ' + derivative' if dispersion: model += ' + dispersion' elif model != 'fir': raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") # Minimum interval between event onsets/duration # Used to compute oversampling factor to prevent information loss unique_onsets = np.unique(np.sort(df.onset)) if len(unique_onsets) > 1: min_interval = min( np.ediff1d(unique_onsets).min(), df.duration.min()) oversampling = np.ceil(2 * (1 / (min_interval * sampling_rate))) else: oversampling = 2 convolved = hrf.compute_regressor(vals, model, resample_frames, fir_delays=fir_delays, min_onset=0, oversampling=oversampling) return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info, source=var.source, sampling_rate=sampling_rate)
def _transform(self, var, model='spm', derivative=False, dispersion=False, fir_delays=None): model = model.lower() df = var.to_df(entities=False) if isinstance(var, SparseRunVariable): sampling_rate = self.collection.sampling_rate dur = var.get_duration() resample_frames = np.linspace( 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False) safety = 2 # Double frequency to resolve events else: resample_frames = df['onset'].values sampling_rate = var.sampling_rate safety = 1 # Maximum signal resolution is already 0.5 * SR vals = df[['onset', 'duration', 'amplitude']].values.T if model in ['spm', 'glover']: if derivative: model += ' + derivative' if dispersion: model += ' + dispersion' elif model != 'fir': raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") # Sampling at >100Hz will never be useful, but can be wildly expensive max_freq, min_interval = 100, 0.01 # Sampling at <1Hz can degrade signals min_freq, max_interval = 1, 1 # Given the sampling rate, determine an oversampling factor to ensure that # events can be modeled with reasonable precision unique_onsets = np.unique(df.onset) unique_durations = np.unique(df.duration) # Align existing data ticks with, event onsets and offsets, up to ms resolution # Note that GCD ignores zeros, so 0 onsets and impulse responses (0 durations) do # not harm this. required_resolution = _fractional_gcd( np.concatenate((unique_onsets, unique_durations)), res=min_interval) # Bound the effective sampling rate between min_freq and max_freq effective_sr = max(min_freq, min(safety / required_resolution, max_freq)) convolved = hrf.compute_regressor( vals, model, resample_frames, fir_delays=fir_delays, min_onset=0, oversampling=np.ceil(effective_sr / sampling_rate) ) return DenseRunVariable( name=var.name, values=convolved[0], run_info=var.run_info, source=var.source, sampling_rate=sampling_rate)
def test_Lag(): var = DenseRunVariable( name="rot_x", values=np.arange(5., 20.), run_info=RunInfo({}, 15, 1, "none", 15), source='regressors', sampling_rate=1 ) coll = BIDSRunVariableCollection([var], sampling_rate=1) # Forward shift transform.Lag(coll, "rot_x", output="d_rot_x") d_rot_x = coll["d_rot_x"].values.values assert np.isclose(d_rot_x[0, 0], 5.) assert np.allclose(d_rot_x[1:, 0], np.arange(5., 19.)) # Backward shift transform.Lag(coll, "rot_x", output="d_rot_x", shift=-1) d_rot_x = coll["d_rot_x"].values.values assert np.isclose(d_rot_x[-1, 0], 19.) assert np.allclose(d_rot_x[:-1, 0], np.arange(6., 20.)) # Half shift; don't know why you'd want to do it, but you can transform.Lag(coll, "rot_x", output="half_shift", shift=0.5, order=1) half_shift = coll["half_shift"].values.values assert np.isclose(half_shift[0, 0], 5.) assert np.allclose(half_shift[1:, 0], np.arange(5.5, 19.5)) # Constant mode transform.Lag(coll, "rot_x", output="d_rot_x", mode="constant") d_rot_x = coll["d_rot_x"].values.values assert np.isclose(d_rot_x[0, 0], 0.) assert np.allclose(d_rot_x[1:, 0], np.arange(5., 19.)) # Reflect mode transform.Lag(coll, "rot_x", output="d_rot_x", mode="reflect") d_rot_x = coll["d_rot_x"].values.values assert np.isclose(d_rot_x[0, 0], 5.) assert np.allclose(d_rot_x[1:, 0], np.arange(5., 19.)) # Forward shift -> Backward difference transform.Lag(coll, "rot_x", output="d_rot_x", difference=True) d_rot_x = coll["d_rot_x"].values.values assert np.isclose(d_rot_x[0, 0], 0.) assert np.allclose(d_rot_x[1:, 0], 1.) # Backward shift -> Forward difference transform.Lag(coll, "rot_x", output="d_rot_x", shift=-1, difference=True) d_rot_x = coll["d_rot_x"].values.values assert np.isclose(d_rot_x[-1, 0], 0.) assert np.allclose(d_rot_x[:-1, 0], 1.)