예제 #1
0
def _create_nod(metapath, fidlpath, scode):
    triallen = 5

    meta = pd.read_csv(metapath)
    faceshouses = np.array(meta["exp"].tolist())
    trs = np.array(meta["TR"].tolist())
    trial_index = np.array(meta["trialcount"].tolist())

    targets = construct_targets(
            trs=trs,
            faceshouses=faceshouses,
            trial_index=trial_index)

    keepers = ["face", "house"]
    keep_fhs = construct_filter(targets["faceshouses"], keepers, True)
    targets = filter_targets(keep_fhs, targets)
    
    names = targets["faceshouses"]
    onsets = targets["trs"]
    durations = np.array([triallen, ] * len(targets["trial_index"]))

    nod_mat(names, onsets, durations, os.path.join(fidlpath, 
            "nod_" + scode + "_stim_facehouse.mat"))
예제 #2
0
def filterX(filtname, X, targets):
    """ Use a config file to filter both X and targets.

    Parameters
    ----------
    filtname - str, a file path
        The name of valid json file (see Info)
    X - 2D array-like (n_samples x n_features)
        The data to filter
    targets - dict-like
        A dictionary of labels/targets for X. Keys 
        are names and values are sklearn compatible
        lebels

    Return
    ------
    The filtered X, targets

    Info
    ----
    The named json file has the must can only have 3 
    top level nodes ["keep", "merge", "join"], one of
    which must be present.

    Below each top-level key is a label name which must be 
    present in targets.

    From there it depends on which top-level branch you are in
        TODO
    """

    # load the json at name,
    filterconf = load(open(filtname, "r"))

    # Validate top level nodes
    validnodes = ["keep", "merge", "join"]
    for k in filterconf.keys():
        if k not in validnodes:
            raise ValueError("Unknown filter command {0}".format(k))

    # Validate that X and targets match
    for k, v in targets.items():
        if v.shape[0] != X.shape[0]:
            raise ValueError("Before: X/target shape mismatch for '{0}'".format(k))

    # test for keep and do that
    if "keep" in filterconf:
        for k, keepers in filterconf["keep"].items():
            labels = targets[k]
            mask = construct_filter(labels, keepers, True)
            targets = filter_targets(mask, targets)
            X = X[mask, :]

    # Test for merge and do that
    if "merge" in filterconf:
        for k, mmap in filterconf["merge"].items():
            labels = targets[k]
            targets[k] = merge_labels(labels, mmap)

    # Test for join and do that
    if "join" in filterconf:
        raise NotImplementedError("join not yet implemented.  Sorry.")

    # revalidate that X and targets match
    for k, v in targets.items():
        if v.shape[0] != X.shape[0]:
            raise ValueError("After: X/targets shape mismatch for '{0}'".format(k))
    assert checkX(X)

    return X, targets
예제 #3
0
def filterX(filtname, X, targets):
    """ Use a config file to filter both X and targets.

    Parameters
    ----------
    filtname - str, a file path
        The name of valid json file (see Info)
    X - 2D array-like (n_samples x n_features)
        The data to filter
    targets - dict-like
        A dictionary of labels/targets for X. Keys 
        are names and values are sklearn compatible
        lebels

    Return
    ------
    The filtered X, targets

    Info
    ----
    The named json file has the must can only have 3 
    top level nodes ["keep", "merge", "join"], one of
    which must be present.

    Below each top-level key is a label name which must be 
    present in targets.

    From there it depends on which top-level branch you are in
        TODO
    """

    # load the json at name,
    filterconf = load(open(filtname, "r"))

    # Validate top level nodes
    validnodes = ["keep", "merge", "join"]
    for k in filterconf.keys():
        if k not in validnodes:
            raise ValueError("Unknown filter command {0}".format(k))

    # Validate that X and targets match
    for k, v in targets.items():
        if v.shape[0] != X.shape[0]:
            raise ValueError(
                "Before: X/target shape mismatch for '{0}'".format(k))

    # test for keep and do that
    if "keep" in filterconf:
        for k, keepers in filterconf["keep"].items():
            labels = targets[k]
            mask = construct_filter(labels, keepers, True)
            targets = filter_targets(mask, targets)
            X = X[mask, :]

    # Test for merge and do that
    if "merge" in filterconf:
        for k, mmap in filterconf["merge"].items():
            labels = targets[k]
            targets[k] = merge_labels(labels, mmap)

    # Test for join and do that
    if "join" in filterconf:
        raise NotImplementedError("join not yet implemented.  Sorry.")

    # revalidate that X and targets match
    for k, v in targets.items():
        if v.shape[0] != X.shape[0]:
            raise ValueError(
                "After: X/targets shape mismatch for '{0}'".format(k))
    assert checkX(X)

    return X, targets
예제 #4
0
    def run(self, basename, smooth=False, filtfile=None, 
        n=None, tr=None, n_rt=None, n_trials_per_cond=None,
        durations=None ,noise=None, n_features=None, n_univariate=None, 
        n_accumulator=None, n_decision=None, n_noise=None, 
        n_repeated=None, drift_noise=False, step_noise=False):
        
        # Write init
        mode = 'w'
        header = True

        for scode in range(n):
            # If were past the first Ss data, append.
            if scode > 0:
                mode = 'a'
                header = False

            # Create the data
            X, y, y_trialcount = make_bold(
                    n_rt, 
                    n_trials_per_cond, 
                    tr, 
                    durations=durations, 
                    noise=noise, 
                    n_features=n_features, 
                    n_univariate=n_univariate, 
                    n_accumulator=n_accumulator, 
                    n_decision=n_decision,
                    n_noise=n_noise,
                    n_repeated=n_repeated,
                    drift_noise=drift_noise,
                    step_noise=step_noise)

            targets = construct_targets(trial_index=y_trialcount, y=y)

            # Drop baseline trials created by make_bold
            baselinemask = np.arange(y.shape[0])[y != 0]
            X = X[baselinemask, ]
            targets = filter_targets(baselinemask, targets)

            # Filter and
            if filtfile is not None:
                X, targets = filterX(filtfile, X, targets)
            if smooth:
                X = smoothfn(X, tr=1.5, ub=0.10, lb=0.001)
            
            # Normalize
            norm = MinMaxScaler((0,1))
            X = norm.fit_transform(X.astype(np.float))
            
            # finally decompose.
            Xcs, csnames, ti_cs = self.spacetime.fit_transform(
                    X, targets["y"], targets["trial_index"], 
                    self.window)
            
            # Name them,
            csnames = unique_nan(y)
            csnames = sort_nanfirst(csnames)

            # and write.
            for Xc, csname, ti in zip(Xcs, csnames, ti_cs):
                save_tcdf(
                        name=join_by_underscore(True, basename, csname), 
                        X=Xc, 
                        cond=csname,
                        dataname=join_by_underscore(False, 
                                os.path.split(basename)[-1], scode),
                        index=ti.astype(np.int),
                        header=header, 
                        mode=mode,
                        float_format="%.{0}f".format(self.nsig))