Example #1
0
def _create_nod(metapath, fidlpath, scode):
    triallen = 5

    meta = pd.read_csv(metapath)
    faceshouses = np.array(meta["exp"].tolist())
    trs = np.array(meta["TR"].tolist())
    trial_index = np.array(meta["trialcount"].tolist())

    targets = construct_targets(
            trs=trs,
            faceshouses=faceshouses,
            trial_index=trial_index)

    keepers = ["face", "house"]
    keep_fhs = construct_filter(targets["faceshouses"], keepers, True)
    targets = filter_targets(keep_fhs, targets)
    
    names = targets["faceshouses"]
    onsets = targets["trs"]
    durations = np.array([triallen, ] * len(targets["trial_index"]))

    nod_mat(names, onsets, durations, os.path.join(fidlpath, 
            "nod_" + scode + "_stim_facehouse.mat"))
Example #2
0
def create(name, fidl, filterf, nod=True):
    """Use a fild file to create a metadata label table, using the json
    filter file to convert, rename, and reorganize labels as needed.

    Parameters
    ----------
    name : str
        The name of the csv file (inculde extension)
    fidl : str
        The name of the .fidl file
    filterf : str
        The name of the filer json file (see Note for format details)
    nod : Boolean
        Write SPM compatile NOD (Names, Onsets, Durations) .mat files

    Notes
    -----
    Overall filterf has two goals. 1. Configure create() for the 
    current exp.  2. Remap/join labels/conditions from the fidl file
    before using them in the csv.  Fidl names are often rather
    confusing and noisy.

    The filterf should be valid json file, where the top level
    is a list.  The first element in that list must be the exp parameters
    in a dict.  The next and remaining elements are dicts whose keys 
    will become a colname in the csv.  The values are the 
    fuzzy_label : label pairs.

    Nothing is done to validate the filterf.  Use with care and
    double check the results.

    For example (delete comments (#) before use/validation):
        
        [
        # The parameters
            {
                "triallen" : 5, 
                "condcol" : 2,
                "trialcol" : 4,
                "final_ncol" : 8
            },
            {
        # The first filteration, col with be trial
        # and each fidl match on the right gets 
        # coded as 'trial'
                "trial": {
                    "1FaceCor1" : "trial",
                    "2FaceCor2" :  "trial",
                    "3FaceCor3" :  "trial",
                    "4FaceCor4" : "trial",
                    "5FaceCor5" : "trial",
                    "6HouseCor1" : "trial",
                    "7HouseCor2" : "trial",
                    "8HouseCor3" :  "trial",
                    "9HouseCor4" :  "trial",
                    "10HouseCor5" :  "trial",
                    "11NoiseResp1" : "trial",
                    "12NoiseResp2" : "trial",
                    "13NoiseResp3" : "trial",
                    "14NoiseResp4" : "trial",
                    "15NoiseResp5" : "trial",
                    "16NoiseResp5" : "trial",
                    "16MultiResp" :  "trial",
                    "17NoResp" : "trial"
                }
            },
            }
        # Col is rt, and both Cor1 and Resp1
        # get remapped to rt1 (and repeat for 2, 3 ...)
                "rt": {
                    "Cor1" : "rt1",
                    "Cor2" : "rt2",
                    "Cor3" : "rt3",
                    "Cor4" : "rt4",
                    "Resp1" : "rt1",
                    "Resp2" : "rt2",
                    "Resp3" : "rt3",
                    "Resp4" : "rt4"
                }    
            },
            {
        # And so on....
                "exp": {
                    "Face" : "face",
                    "House" : "house",
                    "Noise" : "noise"
                }
            }
        ]
    """

    fidl_to_csv(fidlfile, name, 0)
    ## Write a csv to disk

    filterdata = load(open(filterf, 'r'))
    expdata = filterdata.pop(0)
    condcol = expdata["condcol"]
    trialcol = expdata["trialcol"]
    triallen = expdata["triallen"]

    for fd in filterdata:
        fuzzy_label(name, condcol, _get_filt(fd), _get_name(fd), header=True)

    # Add the name of the fidl file as a factor
    dname = os.path.splitext(os.path.basename(fidl))[0]
    if len(dname) < 3:
        dname += "__"

    sublab = {"trial": dname}
    fuzzy_label(name, trialcol, sublab, "scode", header=True)

    # Fill in TR time
    tdur = {"trial": triallen}
    tr_time(name, trialcol, tdur, drop=True, header=True)
    ncol = expdata["final_ncol"]
    fill_tr_gaps(os.path.join(os.path.dirname(name),
                              "trtime_" + os.path.basename(name)),
                 ncol,
                 fill='nan')

    # Be SPM compatible?
    if nod:
        csvdata = pd.read_csv(name)
        names = csvdata["rt"]
        trials = csvdata["trial"]
        onsets = csvdata["TR"]

        durations = np.array([
            triallen,
        ] * len(trials))
        nod_mat(
            names, onsets, durations,
            os.path.join(
                os.path.dirname(name),
                "nod_" + os.path.splitext(os.path.basename(name))[0] + ".mat"))
# ----
# 6. Add labels for subject/fidl
sublab = {"trial": basename}
fuzzy_label(csv_f, trialcol, sublab, "scode", header=True)

# ----
# 7. Expand labels so they cover every TR
final_ncol = 8
tdur = {"trial": triallen}
tr_time(csv_f, trialcol, tdur, drop=True, header=True)
fill_tr_gaps("trtime_" + csv_f, final_ncol)
## As trial labels were added first, following
## csv conversion, the trial label lives in col 3

# ----
# 8. Create the NOD mat file
#
# Get the csv file"s data
# then get onsets, names and
# create durations
csvdata = pd.read_csv(csv_f)

names = csvdata["rt"]
trials = csvdata["trial"]

onsets = csvdata["TR"]
durations = np.array([
    triallen,
] * len(trials))
nod_mat(names, onsets, durations, "nod_" + basename + ".mat")
fuzzy_label(csv_f, condcol, resplab, "resp", header=True)

# ----
# 5. Add labels for subject/fidl
sublab = {"trial" : basename}
fuzzy_label(csv_f, trialcol, sublab, "scode", header=True)

# ----
# 6. Expand labels so they cover every TR
final_ncol = 8
tdur  = {"trial" : triallen} 
tr_time(csv_f, trialcol, tdur, drop=True, header=True)
fill_tr_gaps("trtime_" + csv_f, final_ncol)
    ## As trial labels were added first, following
    ## csv conversion, the trial label lives in col 3

# ----
# 8. Create the NOD mat file
#
# Get the csv file"s data
# then get onsets, names and 
# create durations 
csvdata = pd.read_csv(csv_f)
trials = csvdata["trial"]

names = csvdata["resp"]
onsets = csvdata["TR"]
durations = np.array([triallen, ] * len(trials))

nod_mat(names, onsets, durations, "nod_" + basename + ".mat")
Example #5
0
def create(name, fidl, filterf, nod=True):
    """Use a fild file to create a metadata label table, using the json
    filter file to convert, rename, and reorganize labels as needed.

    Parameters
    ----------
    name : str
        The name of the csv file (inculde extension)
    fidl : str
        The name of the .fidl file
    filterf : str
        The name of the filer json file (see Note for format details)
    nod : Boolean
        Write SPM compatile NOD (Names, Onsets, Durations) .mat files

    Notes
    -----
    Overall filterf has two goals. 1. Configure create() for the 
    current exp.  2. Remap/join labels/conditions from the fidl file
    before using them in the csv.  Fidl names are often rather
    confusing and noisy.

    The filterf should be valid json file, where the top level
    is a list.  The first element in that list must be the exp parameters
    in a dict.  The next and remaining elements are dicts whose keys 
    will become a colname in the csv.  The values are the 
    fuzzy_label : label pairs.

    Nothing is done to validate the filterf.  Use with care and
    double check the results.

    For example (delete comments (#) before use/validation):
        
        [
        # The parameters
            {
                "triallen" : 5, 
                "condcol" : 2,
                "trialcol" : 4,
                "final_ncol" : 8
            },
            {
        # The first filteration, col with be trial
        # and each fidl match on the right gets 
        # coded as 'trial'
                "trial": {
                    "1FaceCor1" : "trial",
                    "2FaceCor2" :  "trial",
                    "3FaceCor3" :  "trial",
                    "4FaceCor4" : "trial",
                    "5FaceCor5" : "trial",
                    "6HouseCor1" : "trial",
                    "7HouseCor2" : "trial",
                    "8HouseCor3" :  "trial",
                    "9HouseCor4" :  "trial",
                    "10HouseCor5" :  "trial",
                    "11NoiseResp1" : "trial",
                    "12NoiseResp2" : "trial",
                    "13NoiseResp3" : "trial",
                    "14NoiseResp4" : "trial",
                    "15NoiseResp5" : "trial",
                    "16NoiseResp5" : "trial",
                    "16MultiResp" :  "trial",
                    "17NoResp" : "trial"
                }
            },
            }
        # Col is rt, and both Cor1 and Resp1
        # get remapped to rt1 (and repeat for 2, 3 ...)
                "rt": {
                    "Cor1" : "rt1",
                    "Cor2" : "rt2",
                    "Cor3" : "rt3",
                    "Cor4" : "rt4",
                    "Resp1" : "rt1",
                    "Resp2" : "rt2",
                    "Resp3" : "rt3",
                    "Resp4" : "rt4"
                }    
            },
            {
        # And so on....
                "exp": {
                    "Face" : "face",
                    "House" : "house",
                    "Noise" : "noise"
                }
            }
        ]
    """

    fidl_to_csv(fidlfile, name, 0)
    ## Write a csv to disk

    filterdata = load(open(filterf, "r"))
    expdata = filterdata.pop(0)
    condcol = expdata["condcol"]
    trialcol = expdata["trialcol"]
    triallen = expdata["triallen"]

    for fd in filterdata:
        fuzzy_label(name, condcol, _get_filt(fd), _get_name(fd), header=True)

    # Add the name of the fidl file as a factor
    dname = os.path.splitext(os.path.basename(fidl))[0]
    if len(dname) < 3:
        dname += "__"

    sublab = {"trial": dname}
    fuzzy_label(name, trialcol, sublab, "scode", header=True)

    # Fill in TR time
    tdur = {"trial": triallen}
    tr_time(name, trialcol, tdur, drop=True, header=True)
    ncol = expdata["final_ncol"]
    fill_tr_gaps(os.path.join(os.path.dirname(name), "trtime_" + os.path.basename(name)), ncol, fill="nan")

    # Be SPM compatible?
    if nod:
        csvdata = pd.read_csv(name)
        names = csvdata["rt"]
        trials = csvdata["trial"]
        onsets = csvdata["TR"]

        durations = np.array([triallen] * len(trials))
        nod_mat(
            names,
            onsets,
            durations,
            os.path.join(os.path.dirname(name), "nod_" + os.path.splitext(os.path.basename(name))[0] + ".mat"),
        )