Пример #1
0
                                                 "don't match")

    return Xeva, eva_names


if __name__ == '__main__':
    from wheelerdata.load.fh import FH
    from fmrilearn.preprocess.labels import csv_to_targets
    from fmrilearn.load import load_meta
    from fmrilearn.load import load_nii
    from fmrilearn.preprocess.labels import filter_targets

    data = FH()

    metas = data.get_metapaths_containing('rt')
    targets = csv_to_targets(metas[0])

    paths = data.get_roi_data_paths('Insula')
    X = load_nii(paths[0], clean=True, sparse=False, smooth=False)
    scaler = MinMaxScaler(feature_range=(0, 1))
    X = scaler.fit_transform(X.astype(np.float))
    X = X[targets['TR'], :]
    X = X.mean(1)[:, np.newaxis]

    y = targets['rt']
    tc = targets['trialcount']
    Xfir, flfir = fir(X, y, tc, 20, 1.5)
    #Xeva, fleva = eva(X, y, tc, 11, 1.5)

    import matplotlib.pyplot as plt
Пример #2
0
def make_bold(cond,
              index,
              wheelerdata,
              cond_to_rt,
              filtfile=None,
              TR=2,
              trname="TR",
              n_features=10,
              n_univariate=None,
              n_accumulator=None,
              n_decision=None,
              n_noise=None,
              drift_noise=False,
              step_noise=False,
              z_noise=False,
              drift_noise_param=None,
              step_noise_param=None,
              z_noise_param=None,
              noise_f=white,
              hrf_f=None,
              hrf_params=None,
              prng=None):
    """Make BOLD timecourse features based on Wheelerdata

    Parameters
    ---------
    cond : str
        A condition name found in the wheelerdata objects metadata
    index : str
        A name of a trial index found in the wheelerdata object metadata
    wheelerdata : object, instance of Wheelerdata
        A Wheelerdata object
    cond_to_rt: dict
        A map of cond (key) to reaction time (item, (int, float))
    filtfile : str, None
        A name of json file designed for reprocessing Wheelerdata metadata
    TR : float, int
        The repitition time of the experiement
    trname : str
        The name of the index of TRs in the metadata
    n_features : int
        The number of features in total (other n_* arguements
        must sum to this value
    n_univariate : int
        The number of univariate (boxcar) features
    n_accumulator : int
        The number of accumulator features
    n_decision : int
        The number of decision features
    n_noise : int
        The number of noise features
    drift_noise : boolean, optional
        Add noise to the drift rate of the accumulator features
    step_noise : boolean, optional
        Add Noise to each step accumulator features
    z_noise : boolean, optional
        Add noise to the start value of accumulator features
    drift_noise_param : None or dict, optional
        Parameters for drift_noise which is drawn from a
        Gaussian distribution. None defaults to: 
        `{"loc": 0, "scale" : 0.5}`
    step_noise_param : None or dict, optional
        Parameters for step_noise which is drawn from a 
        Gaussian distribution. None defaults to:
        `{"loc" : 0, "scale" : 0.2, "size" : 1}`
    z_noise_param : None or dict, optional
        Parameters for z_noise which is drawn from the uniform
        distribution. None defaults to:
        `{"low" : 0.01, "high" : 0.5, "size" : 1}`
    noise_f : function, optional
        Produces noise, must have signatures like `noise, prng = f(N, prng)`
    hrf_f : function, optional
        Returns a haemodynamic response, signature hrf_f(**hrf_params)
    hrf_params : dict
        Keyword parameters for hrf_f
    prng : None or RandomState object
        Allows for independent random draws, used for all 
        random sampling
    """

    # ----
    # Feature composition
    if n_noise == None:
        n_noise = 0
    if n_accumulator == None:
        n_accumulator = 0
    if n_decision == None:
        n_decision = 0
    if n_univariate == None:
        n_univariate = (n_features - n_noise - n_accumulator - n_decision)

    if (n_features - n_univariate - n_accumulator - n_noise - n_decision) != 0:
        raise ValueError("The number of features don't add up.")

    # Load wheelerdata
    metas = wheelerdata.get_RT_metadata_paths()

    # Get to work simulating
    Xs, ys, yindices = [], [], []
    for meta in metas:
        # Get data, preprocess too,
        data = csv_to_targets(meta)
        data = tr_pad_targets(data, trname, data[trname].shape[0], pad=np.nan)

        if filtfile is not None:
            data = reprocess_targets(filtfile, data, np.nan,
                                     ("TR", "trialcount"))

        # Check cond_to_rt
        for c in unique_nan(data[cond]):
            try:
                cond_to_rt[c]
            except KeyError:
                raise KeyError("{0} not present in cond_to_rt".format(c))

        # use cond to create y
        y = create_y(data[cond])
        yindex = data[index]

        # make accumulator and decision traces
        if n_accumulator > 0:
            data["accumulator"] = _make_accumulator_array(y,
                                                          yindex,
                                                          cond_to_rt,
                                                          drift_noise,
                                                          step_noise,
                                                          z_noise,
                                                          drift_noise_param,
                                                          step_noise_param,
                                                          z_noise_param,
                                                          prng=prng)
        if n_decision > 0:
            data["decision"] = _make_decision_array(y, yindex, cond_to_rt)

        # Populate Xmeta
        boldsim = Reproduce(y,
                            data,
                            noise_f=noise_f,
                            hrf_f=hrf_f,
                            hrf_params=hrf_params,
                            TR=TR,
                            prng=prng)
        boldsim.create_dm_from_y(convolve=False)

        n_sample_feature = boldsim.dm.shape[0]
        Xmeta = np.zeros((n_sample_feature, n_features))

        # 1. univariate features
        start = 0
        stop = n_univariate
        for j in range(start, stop):
            boldsim.create_bold(np.sum(boldsim.dm[:, 1:], axis=1),
                                convolve=True)
            Xmeta[:, j] = boldsim.bold

        # 2. accumulator features
        start = stop
        stop = start + n_accumulator
        for j in range(start, stop):
            boldsim.create_bold(data["accumulator"], convolve=True)
            Xmeta[:, j] = boldsim.bold

        # 3. decision features
        start = stop
        stop = start + n_decision
        for j in range(start, stop):
            boldsim.create_bold(data["decision"], convolve=True)
            Xmeta[:, j] = boldsim.bold

        # 4. noise features:
        start = stop
        stop = start + n_noise
        for j in range(start, stop):
            # Drop baseline from noise
            randbold = rand(boldsim.dm.shape[0])
            randbold[boldsim.y == 0] = 0.0
            boldsim.create_bold(randbold, convolve=True)
            Xmeta[:, j] = boldsim.bold

        Xs.append(Xmeta)
        ys.append(y)
        yindices.append(yindex)

    return Xs, ys, yindices
Пример #3
0
    def run(self, basename, roi, cond, smooth=False, filtfile=None, event=False):
        # Save here....
        table = join_by_underscore(False, basename, roi, cond)

        # and reinit write flags.
        roicount = 0
        mode = 'w'
        header = True

        # Getting to work, find subjects data
        paths = self.data.get_roi_data_paths(roi)
        if not event:
            metas = self.data.get_metapaths_containing(cond)
        else:
            metas = self.data.get_RT_metadata_event_paths()

        # And decompose it
        for path, meta in zip(paths, metas):
            roiname = get_roiname(path)
            print("\t{0}".format(roiname))  ## ...progress

            # If were past the first Ss data, append.
            if roicount > 0:
                mode = 'a'
                header = False

            # Get data
            targets = csv_to_targets(meta)

            X = load_nii(path, clean=True, sparse=False, smooth=smooth)
            targets = tr_pad_targets(targets, "TR", X.shape[0], pad=np.nan)

            # Preprocess labels
            if filtfile is not None:
                targets = reprocess_targets(filtfile, targets, np.nan)
                assert targets["TR"].shape[0] == X.shape[0], ("target" 
                    "reprocessing is broken")
            
            
            norm = MinMaxScaler((0,1))
            X = norm.fit_transform(X.astype(np.float))
            
            Xcs, csnames, ti_cs = self.spacetime.fit_transform(
                    X, targets[cond], targets["trialcount"], 
                    self.window, self.tr)

            # and write.
            dataname = join_by_underscore(False, roiname)
            known = []
            for Xc, csname, ti in zip(Xcs, csnames, ti_cs):
                if not csname in known:
                    save_tcdf(
                            name=join_by_underscore(True, table, csname), 
                            X=Xc, 
                            cond=csname,
                            dataname=dataname,
                            index=ti.astype(np.int),
                            header=header, 
                            mode=mode,
                            float_format="%.{0}f".format(self.nsig))
                    known.append(csname)
                else:
                    save_tcdf(
                            name=join_by_underscore(True, table, csname), 
                            X=Xc, 
                            cond=csname,
                            dataname=dataname,
                            index=ti.astype(np.int),
                            header=False, 
                            mode='a',
                            float_format="%.{0}f".format(self.nsig))
            roicount += 1
Пример #4
0
        "don't match")

    return Xeva, eva_names


if __name__ == '__main__':
    from wheelerdata.load.fh import FH 
    from fmrilearn.preprocess.labels import csv_to_targets
    from fmrilearn.load import load_meta
    from fmrilearn.load import load_nii
    from fmrilearn.preprocess.labels import filter_targets

    data = FH()
    
    metas = data.get_metapaths_containing('rt')
    targets = csv_to_targets(metas[0])

    paths = data.get_roi_data_paths('Insula')
    X = load_nii(paths[0], clean=True, sparse=False, smooth=False)
    scaler = MinMaxScaler(feature_range=(0, 1))
    X = scaler.fit_transform(X.astype(np.float))
    X = X[targets['TR'],:]
    X = X.mean(1)[:,np.newaxis]

    y = targets['rt']
    tc = targets['trialcount']
    Xfir, flfir = fir(X, y, tc, 20, 1.5)
    #Xeva, fleva = eva(X, y, tc, 11, 1.5)

    import matplotlib.pyplot as plt