Пример #1
0
def load_nii(nifiti, clean=True, sparse=False, smooth=False, **kwargs):
    """Convert the nifiti-1 file into a 2D array (n_sample x n_features).
    
    Parameters
    ----------
    nifti - str
        The name of the data to load
    clean - boolean (True)
        Remove invariant features features?  If used n_features will 
        not match n_voxels in the orignal nifit1 file.  This operation
        is not reversable.  If you clean there is probablity little
        point in converting to a sparse representation.
    sparse - boolean (False)
        Use the (CSC) sparse format (True)?
    smooth - boolean (False)
        High/low pass filter the data?
    [, ...] - Optional parameters for smooth 
        (defaults: tr=1.5, ub=0.06, lb=0.006)

    Return
    ------
    X - 2D array (n_sample x n_features)
        The BOLD data
    """

    # Data is 4d (x,y,z,t) we want 2d, where each column is
    # a voxel and each row is the temporal (t) data
    # i.e. the final shape should be (x*y*x, t)
    nii = nb.nifti1.load(nifiti)

    numt = nii.shape[3]
    numxyz = nii.shape[0] * nii.shape[1] * nii.shape[2]
    dims = (numxyz, numt)

    # Get into 2d (n_feature, n_sample)
    X = nii.get_data().astype('int16').reshape(dims).transpose()
    if clean:
        X = remove_invariant_features(X, sparse=False)

    if smooth:
        # Setup smooth params
        tr = 1.5
        ub = 0.06
        lb = 0.001
        if "tr" in kwargs:
            tr = kwargs["tr"]
        if "ub" in kwargs:
            ub = kwargs["ub"]
        if "lb" in kwargs:
            ub = kwargs["lb"]

        X = smoothfn(X, tr=tr, ub=ub, lb=lb)

    assert checkX(X)

    if sparse:
        X = csc_matrix(X)

    return X
Пример #2
0
def load_nii(nifiti, clean=True, sparse=False, smooth=False, **kwargs):
    """Convert the nifiti-1 file into a 2D array (n_sample x n_features).
    
    Parameters
    ----------
    nifti - str
        The name of the data to load
    clean - boolean (True)
        Remove invariant features features?  If used n_features will 
        not match n_voxels in the orignal nifit1 file.  This operation
        is not reversable.  If you clean there is probablity little
        point in converting to a sparse representation.
    sparse - boolean (False)
        Use the (CSC) sparse format (True)?
    smooth - boolean (False)
        High/low pass filter the data?
    [, ...] - Optional parameters for smooth 
        (defaults: tr=1.5, ub=0.06, lb=0.006)

    Return
    ------
    X - 2D array (n_sample x n_features)
        The BOLD data
    """
    
    # Data is 4d (x,y,z,t) we want 2d, where each column is 
    # a voxel and each row is the temporal (t) data
    # i.e. the final shape should be (x*y*x, t)
    nii = nb.nifti1.load(nifiti)

    numt = nii.shape[3]
    numxyz = nii.shape[0] * nii.shape[1] * nii.shape[2]
    dims = (numxyz, numt)
    
    # Get into 2d (n_feature, n_sample)
    X = nii.get_data().astype('int16').reshape(dims).transpose()
    if clean:
        X = remove_invariant_features(X, sparse=False)
    
    if smooth:
        # Setup smooth params
        tr = 1.5
        ub = 0.06
        lb = 0.001
        if "tr" in kwargs:
            tr = kwargs["tr"]
        if "ub" in kwargs:
            ub = kwargs["ub"]
        if "lb" in kwargs:
            ub = kwargs["lb"]
        
        X = smoothfn(X, tr=tr, ub=ub, lb=lb)
    
    assert checkX(X)
    
    if sparse: 
        X = csc_matrix(X)

    return X
Пример #3
0
    def run(self, basename, cond, index, wheelerdata, cond_to_rt, 
        smooth=False,
        filtfile=None, TR=2, trname="TR", 
        n_features=10, n_univariate=None, n_accumulator=None, n_decision=None, 
        n_noise=None, drift_noise=False, step_noise=False, z_noise=False,
        drift_noise_param=None, step_noise_param=None, z_noise_param=None,
        noise_f=white, hrf_f=None, hrf_params=None, prng=None):       
        """Reproduce the cond from the wheelerdata experiment
        
        Parameters
        ---------
        basename : str
            The name for the Reproduced datafile, will be suffixed
            by each cond and scode and .csv 
            (i.e. `'{0}_{1}_{2}.csv'.format(basename, cond, scode)`).
        cond : str
            A condition name found in the wheelerdata objects metadata
        index : str
            A name of a trial index found in the wheelerdata object metadata
        wheelerdata : object, instance of Wheelerdata
            A Wheelerdata object
        cond_to_rt: dict
            A map of cond (key) to reaction time (item, (int, float))    
        smooth : boolean, optional
            Do bandpass filtering (default False)
        filtfile : str, None
            A name of json file designed for reprocessing Wheelerdata metadata
        TR : float, int
            The repitition time of the experiement
        trname : str
            The name of the index of TRs in the metadata
        n_features : int
            The number of features in total (other n_* arguements
            must sum to this value
        n_univariate : int
            The number of univariate (boxcar) features
        n_accumulator : int
            The number of accumulator features
        n_decision : int
            The number of decision features
        n_noise : int
            The number of noise features
        drift_noise : boolean, optional
            Add noise to the drift rate of the accumulator features
        step_noise : boolean, optional
            Add Noise to each step accumulator features
        z_noise : boolean, optional
            Add noise to the start value of accumulator features
        drift_noise_param : None or dict, optional
            Parameters for drift_noise which is drawn from a
            Gaussian distribution. None defaults to: 
            `{"loc": 0, "scale" : 0.5}`
        step_noise_param : None or dict, optional
            Parameters for step_noise which is drawn from a 
            Gaussian distribution. None defaults to:
            `{"loc" : 0, "scale" : 0.2, "size" : 1}`
        z_noise_param : None or dict, optional
            Parameters for z_noise which is drawn from the uniform
            distribution. None defaults to:
            `{"low" : 0.01, "high" : 0.5, "size" : 1}`
        noise_f : function, optional
            Produces noise, must have signatures like `noise, prng = f(N, prng)`
        hrf_f : function, optional
            Returns a haemodynamic response, signature hrf_f(**hrf_params)
        hrf_params : dict
            Keyword parameters for hrf_f
        prng : None or RandomState object
            Allows for independent random draws, used for all 
            random sampling
        """

        mode = 'w'
        header = True

        # All *s lists correspond to wheelerdata.scodes
        scodes = self.data.scodes
        Xs, ys, yindices = make_bold_re(
                cond, index, self.data,
                cond_to_rt,
                filtfile=filtfile, 
                trname=trname,
                noise_f=noise_f, 
                hrf_f=hrf_f, 
                hrf_params=hrf_params, 
                n_features=n_features, 
                n_univariate=n_univariate, 
                n_accumulator=n_accumulator, 
                n_decision=n_decision, 
                n_noise=n_noise, 
                drift_noise=drift_noise, 
                step_noise=step_noise, 
                z_noise=z_noise,
                drift_noise_param=drift_noise_param, 
                step_noise_param=step_noise_param, 
                z_noise_param=z_noise_param,
                prng=prng)
        
        for scode, X, y, yindex in zip(scodes, Xs, ys, yindices):
            if smooth:
                X = smoothfn(X, tr=1.5, ub=0.10, lb=0.001)
            
            # Normalize
            norm = MinMaxScaler((0,1))
            X = norm.fit_transform(X.astype(np.float))
            
            Xcs, csnames, ti_cs = self.spacetime.fit_transform(
                    X, y, yindex, self.window, self.tr)

            # Name them,
            csnames = unique_nan(y)
            csnames = sort_nanfirst(csnames)

            # and write.
            for Xc, csname, ti in zip(Xcs, csnames, ti_cs):
                save_tcdf(
                        name=join_by_underscore(True, basename, csname), 
                        X=Xc, 
                        cond=csname,
                        dataname=join_by_underscore(False, 
                                os.path.split(basename)[-1], scode),
                        index=ti.astype(np.int),
                        header=header, 
                        mode=mode,
                        float_format="%.{0}f".format(self.nsig))
            
            # After s 1 go to append mode
            mode = 'a'
            header = False
Пример #4
0
    def run(self, basename, smooth=False, filtfile=None, 
        n=None, tr=None, n_rt=None, n_trials_per_cond=None,
        durations=None ,noise=None, n_features=None, n_univariate=None, 
        n_accumulator=None, n_decision=None, n_noise=None, 
        n_repeated=None, drift_noise=False, step_noise=False):
        
        # Write init
        mode = 'w'
        header = True

        for scode in range(n):
            # If were past the first Ss data, append.
            if scode > 0:
                mode = 'a'
                header = False

            # Create the data
            X, y, y_trialcount = make_bold(
                    n_rt, 
                    n_trials_per_cond, 
                    tr, 
                    durations=durations, 
                    noise=noise, 
                    n_features=n_features, 
                    n_univariate=n_univariate, 
                    n_accumulator=n_accumulator, 
                    n_decision=n_decision,
                    n_noise=n_noise,
                    n_repeated=n_repeated,
                    drift_noise=drift_noise,
                    step_noise=step_noise)

            targets = construct_targets(trial_index=y_trialcount, y=y)

            # Drop baseline trials created by make_bold
            baselinemask = np.arange(y.shape[0])[y != 0]
            X = X[baselinemask, ]
            targets = filter_targets(baselinemask, targets)

            # Filter and
            if filtfile is not None:
                X, targets = filterX(filtfile, X, targets)
            if smooth:
                X = smoothfn(X, tr=1.5, ub=0.10, lb=0.001)
            
            # Normalize
            norm = MinMaxScaler((0,1))
            X = norm.fit_transform(X.astype(np.float))
            
            # finally decompose.
            Xcs, csnames, ti_cs = self.spacetime.fit_transform(
                    X, targets["y"], targets["trial_index"], 
                    self.window)
            
            # Name them,
            csnames = unique_nan(y)
            csnames = sort_nanfirst(csnames)

            # and write.
            for Xc, csname, ti in zip(Xcs, csnames, ti_cs):
                save_tcdf(
                        name=join_by_underscore(True, basename, csname), 
                        X=Xc, 
                        cond=csname,
                        dataname=join_by_underscore(False, 
                                os.path.split(basename)[-1], scode),
                        index=ti.astype(np.int),
                        header=header, 
                        mode=mode,
                        float_format="%.{0}f".format(self.nsig))