コード例 #1
0
ファイル: var_model.py プロジェクト: hjpae/hnlpy
def getdata(path, subID):
    currentSub = subID[0:4]
    print('Current Subject: ', currentSub)
    #    pcdict = read_mat(path + subID + '_task3_photocells.mat')
    datadict = read_mat(path + subID + '_task3_final.mat')
    behavdict = read_mat(path + subID[0:4] + '_behavior_final.mat')

    data = np.array(datadict['data'])

    artifact = np.array(datadict['artifact'])
    sr = np.array(datadict['sr'])
    beh_ind = np.array(behavdict['trials'])

    # open up indices
    artifact0 = artifact.sum(axis=0)
    artifact1 = artifact.sum(axis=1)

    # identify goodtrials and good channels.
    goodtrials = np.squeeze(np.array(np.where(artifact0 < 20)))
    goodchans = np.squeeze(np.array(np.where(artifact1 < 40)))

    # BehEEG_int = list(set(beh_ind) & set(goodtrials))
    finalgoodtrials = np.array(diffusion.compLists(beh_ind, goodtrials))
    # finalgoodtrials = np.array(BehEEG_int)
    return data, sr, finalgoodtrials, goodchans
コード例 #2
0
def test_cell_struct_v6v7():
    v6_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_cell_struct_v6)))
    v7_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_cell_struct_v7)))

    assertDeepAlmostEqual(v6_data, v7_data)
コード例 #3
0
def SSVEP_task3All(subID):
    currentSub = subID[0:4]
    print('Current Subject: ', currentSub)
    pcdict = read_mat(path + subID + 'task3_photocells.mat')
    datadict = read_mat(path + subID + 'task3_final.mat')
    behavdict = read_mat(path + subID[0:5] + 'behavior_final.mat')

    data = np.array(datadict['data'])
    artifact = np.array(datadict['artifact'])
    sr = np.array(datadict['sr'])
    beh_ind = np.array(behavdict['trials'])
    condition = np.array(behavdict['condition'])
    rt = behavdict['rt']
    correct = behavdict['correct']

    # open up indices
    artifact0 = artifact.sum(axis=0)
    artifact1 = artifact.sum(axis=1)

    # identify goodtrials and good channels.
    goodtrials = np.squeeze(np.array(np.where(artifact0 < 20)))
    goodchans = np.squeeze(np.array(np.where(artifact1 < 40)))

    # choosing the trials that have RT over 300ms and check if they are all goodtrials
    # get the index and apply to rt, condition and accuracy
    xy, x_ind, y_ind = np.intersect1d(beh_ind, goodtrials, return_indices=True)
    ind, finalgoodtrials = np.array(compListsInd(beh_ind, goodtrials))
    # here the ind stands for ind in beh_ind that are qualified as goodtrials
    # finalgoodtrials stand for the actual trial index from the original unsorted dataset
    rt_final = rt[ind]
    acc_final = correct[ind]
    behav_final = {'rt': rt_final, 'acc': acc_final}
    condition_final = condition[ind]
    correct_final = correct[ind]

    # time window of interest
    window = [1250, 2250]

    # FFT the eeg data
    stimulus_ssvep = getSSVEP(data, sr, window, 30, np.arange(0, 360, 1),
                              rs_chans)
    noise_ssvep = getSSVEP(data, sr, window, 40, np.arange(0, 360, 1),
                           rs_chans)
    #
    # # same procedure analyzed by condition
    # ind_ez = np.where(condition_final == 1)
    # ind_md = np.where(condition_final == 2)
    # ind_hr = np.where(condition_final == 3 )
    #
    # stim_ez = getSSVEP(data,sr,window,30, finalgoodtrials[ind_ez], goodchans)
    # stim_md = getSSVEP(data, sr, window, 30, finalgoodtrials[ind_md], goodchans)
    # stim_hr = getSSVEP(data, sr, window, 30, finalgoodtrials[ind_hr], goodchans)
    # noise_ez = getSSVEP(data,sr,window, 40, finalgoodtrials[ind_ez], goodchans)
    # noise_md = getSSVEP(data, sr, window, 40, finalgoodtrials[ind_md], goodchans)
    # noise_hr = getSSVEP(data, sr, window, 40, finalgoodtrials[ind_hr], goodchans)

    # stim_erpf = np.dstack((stim_ez['erp_fft'], stim_md['erp_fft'], stim_hr['erp_fft']))
    # noise_erpf = np.dstack((noise_ez['erp_fft'], noise_md['erp_fft'], noise_hr['erp_fft']))

    return stimulus_ssvep, noise_ssvep, photocell, behavdict, behav_final
コード例 #4
0
ファイル: gen_sets.py プロジェクト: hjpae/hnlpy
def get_singletrialN200():
    path = '/home/ramesh/pdmattention/task3/'
    Data = np.empty((0, 500))
    TargetMatrix = read_mat(path + 's239_ses1_' + 'N200.mat')['singletrial']
    for index, sub in enumerate(subIDs):
        if sub !='s239_ses1_':
            print(index)
            Source = read_mat(path + sub + 'N200.mat')['singletrial']
            transfor = CORAL()
            transfor.fit(Source, TargetMatrix)
            Xs_trans = transfor.transfer(Source)  # adjusted source matrix
            stimulus_ssvep, _, _, _, _, _, _ = SSVEP_task3(sub)
            finalgoodtrials = stimulus_ssvep['goodtrials']
            singletrial = Xs_trans[1250:1750, finalgoodtrials].T
        else:
            print(index)
            Xs_trans = TargetMatrix
            stimulus_ssvep, _, _, _, _, _, _ = SSVEP_task3(sub)
            finalgoodtrials = stimulus_ssvep['goodtrials']
            singletrial = Xs_trans[1250:1750, finalgoodtrials].T
        #
        # N200 = read_mat(path + sub + 'N200.mat')
        # singletrial = N200['singletrial']
        # stimulus_ssvep, _, _, _, _, _, _ = SSVEP_task3(sub)
        # finalgoodtrials = stimulus_ssvep['goodtrials']
        # singletrial = singletrial[1250:2000, finalgoodtrials].T
        Data = np.vstack((Data, singletrial))
コード例 #5
0
def test_v7v73():
    v7_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_v7_fname)))
    v73_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_v73_fname)))

    assertDeepAlmostEqual(v7_data, v73_data)
コード例 #6
0
def test_eeglab_v7v73():
    v7_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_eeglab_old)))

    v73_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_eeglab_h5)))

    assertDeepAlmostEqual(v7_data, v73_data)
コード例 #7
0
def test_ft_v7v73():
    v7_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_ft_v7_fname),
                 variable_names=('data_epoched',)))
    v73_data = sanitize_dict(
        read_mat(os.path.join(test_data_folder, testdata_ft_v73_fname),
                 variable_names=('data_epoched',)))

    assertDeepAlmostEqual(v7_data, v73_data)
コード例 #8
0
def ReadFiles(BehPath, EEGPath, taskNum, currentSub):
    behDict = read_mat(BehPath)
    rts = (behDict['rt']).tolist()
    rts = [x / 1000 for x in rts]

    conds = (behDict['condition']).tolist()
    corrects = behDict['correct'].tolist()
    BehInd = EWMAV(rts, conds, corrects, taskNum, currentSub)

    EEGDict = read_mat(EEGPath)
    EEGInd = EEGData(EEGDict)

    return BehInd, EEGInd
コード例 #9
0
    def find_image_pos(self,
                       xs: int,
                       ys: int,
                       t: int,
                       local=False) -> (List[image], List[coord]):
        """
        For coordinates (xs, yx) in the full size stiched image,
        returns a list of original images (before stiching) that contain the coordinate.
        And for each found image, returns the coordinates of the point of interest in
        the image.
        """
        date = self.dates[t]
        directory_name = get_dirname(date, self.plate)
        path_snap = self.directory + directory_name
        path_tile = path_snap + "/Img/TileConfiguration.txt.registered"
        skel = read_mat(path_snap + "/Analysis/skeleton_pruned_realigned.mat")
        Rot = skel["R"]
        trans = skel["t"]
        rottrans = np.dot(np.linalg.inv(Rot), np.array([xs, ys] - trans))
        ys, xs = round(rottrans[0]), round(rottrans[1])
        tileconfig = pd.read_table(
            path_tile,
            sep=";",
            skiprows=4,
            header=None,
            converters={2: ast.literal_eval},
            skipinitialspace=True,
        )
        xs_yss = list(tileconfig[2])
        xes = [xs_ys[0] for xs_ys in xs_yss]
        yes = [xs_ys[1] for xs_ys in xs_yss]
        cmin = np.min(xes)
        cmax = np.max(xes)
        rmin = np.min(yes)
        rmax = np.max(yes)
        ximg = xs
        yimg = ys

        def find(xsub, ysub, x, y):
            indexes = []
            for i in range(len(xsub)):
                if (x >= xsub[i] - cmin and x < xsub[i] - cmin + 4096
                        and y >= ysub[i] - rmin and y < ysub[i] - rmin + 3000):
                    indexes.append(i)
            return indexes

        indsImg = find(xes, yes, ximg, yimg)
        possImg = [
            ximg - np.array(xes)[indsImg] + cmin + 1,
            yimg - np.array(yes)[indsImg] + rmin + 1,
        ]
        paths = []
        for index in indsImg:
            name = tileconfig[0][index]
            imname = "/Img/" + name.split("/")[-1]
            directory_name = get_dirname(date, self.plate)
            path = self.directory + directory_name + imname
            paths.append(path)
        ims = [imageio.imread(path) for path in paths]
        return (ims, possImg)
コード例 #10
0
ファイル: util.py プロジェクト: ctroein/octavvs
def load_reference(wn, what=None, matfilename=None):
    """
    Loads and normalizes a spectrum from a Matlab file, interpolating at the given points.
        The reference is assumed to cover the entire range of wavenumbers.
    Parameters:
        wn: array of wavenumbers at which to get the spectrum
        what: A string defining what type of reference to get, corresponding to a file in the
        'reference' directory
        matfilename: the name of an arbitrary Matlab file to load data from; the data must be
        in a matrix called AB, with wavenumbers in the first column.
        Returns: spectrum at the points given by wn
    """
    if (what is None) == (matfilename is None):
        raise ValueError("Either 'what' or 'matfilename' must be specified")
    if what is not None:
        matfilename = resource_filename('octavvs.reference_spectra',
                                        what + ".mat")


#        matfilename = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__),
#                                       'reference', what + '.mat'))
    ref = read_mat(matfilename)['AB']
    # Handle the case of high-to-low since the interpolator requires low-to-high
    d = 1 if ref[0, 0] < ref[-1, 0] else -1
    ref = PchipInterpolator(ref[::d, 0], ref[::d, 1])(wn)
    return ref  #/ ref.max()
コード例 #11
0
    def load(self, verbose=False):

        mdfs = []

        for m in self.movlist:

            if verbose:
                report('I', 'Loading data of movie ' + str(m))

            filename = join(self.ex.antdatadir, 'xy_' + str(m) + '_' + str(m) + '_untagged.mat')
            tracklet_data = read_mat(filename)

            # convert to dataframe

            cols = ['tracklet', 'frame', 'x', 'y', 'orient', 'area', 'nants']
            if 'majax' in tracklet_data:
                cols.append('majax')

            cols = pd.Index(cols)

            d = {k: tracklet_data[k] for k in cols if k in tracklet_data.keys()}
            d['x'] = tracklet_data['xy'][:, 0]
            d['y'] = tracklet_data['xy'][:, 1]

            df = pd.DataFrame(d)

            df['tracklet'] = df['tracklet'].astype('int')
            df['frame'] = df['frame'].astype('int')

            mdfs.append(df)

        self.trdata = pd.concat(mdfs, axis=0)
        self.tracklet_table = self.ex.get_tracklet_table(self.movlist, type='untagged')
コード例 #12
0
    def __init__(self, pathDict):
        # Parse L_modified file
        self.pathLmod = os.path.join(pathDict['Preferences'], 'L_modified.mat')
        prepcommon._testfile(self.pathLmod)
        self.allen = pymatreader.read_mat(self.pathLmod)['L']

        # Parse sessions file
        pathSessions = os.path.join(pathDict['Preferences'], 'sessions_tex.csv')
        self.dfSession = pd.read_csv(pathSessions, sep='\t')

        # with open(pathSessions, 'r') as json_file:
        #     self.sessions = json.load(json_file)

        # Find parse TGT files
        self.dataPaths = pd.DataFrame(
            columns=['mouse', 'day', 'session', 'sessionPath',
                     'trialIndPath', 'trialStructPathMat', 'trialStructPathLabview',
                     'pathActivePassive', 'pathMovementVectors'])
        self.pathT1 = defaultdict(dict)

        if 'TDT' in pathDict:
            self.find_parse_tdt(pathDict['TDT'])

        # Find parse Overlay
        self.pathRef = {}
        self.pathT2 = {}
        if 'Overlay' in pathDict:
            self.find_parse_overlay(pathDict['Overlay'])
コード例 #13
0
def test_beer_lambert_v_matlab():
    """Compare MNE results to MATLAB toolbox."""
    from pymatreader import read_mat
    raw = read_raw_nirx(fname_nirx_15_0)
    raw = optical_density(raw)
    raw = beer_lambert_law(raw, ppf=0.121)
    raw._data *= 1e6  # Scale to uM for comparison to MATLAB

    matlab_fname = op.join(testing_path, 'NIRx', 'nirscout', 'validation',
                           'nirx_15_0_recording_bl.mat')
    matlab_data = read_mat(matlab_fname)

    matlab_names = ["_"] * len(raw.ch_names)
    for idx in range(len(raw.ch_names)):
        matlab_names[idx] = ("S" + str(int(matlab_data['sources'][idx])) +
                             "_D" + str(int(matlab_data['detectors'][idx])) +
                             " " + matlab_data['type'][idx])
    matlab_to_mne = np.argsort(matlab_names)

    for idx in range(raw.get_data().shape[0]):

        matlab_idx = matlab_to_mne[idx]

        mean_error = np.mean(matlab_data['data'][:, matlab_idx] -
                             raw._data[idx])
        assert mean_error < 0.1
        matlab_name = ("S" + str(int(matlab_data['sources'][matlab_idx])) +
                       "_D" + str(int(matlab_data['detectors'][matlab_idx])) +
                       " " + matlab_data['type'][matlab_idx])
        assert raw.info['ch_names'][idx] == matlab_name
コード例 #14
0
def extract_data(OverlapInd, currentSub, path, taskNum):
    # extracts the data based on indices saved in "find_overlapIndices'
    # reconstrucct path
    dataPath = path + currentSub[0:4] + '_behavior_final.mat'
    dataDict = read_mat(dataPath)

    rt_list = []
    correct_list = []
    condition_list = []
    eeg_list = []
    sub_list = []

    rts = (dataDict['rt']).tolist()
    corrects = (dataDict['correct']).tolist()
    conditions = (dataDict['condition']).tolist()
    eeg = (dataDict['trial']).tolist()

    for index in OverlapInd:
        rt_list.append(float(rts[index]))
        correct_list.append(float(corrects[index]))
        condition_list.append(int(conditions[index]))
        eeg_list.append(eeg[index])
        sub_list.append(currentSub[0:4])

    data = [sub_list, condition_list, rt_list, correct_list, eeg_list]
    if debugging == True:
        print(type(eeg_list))

    return data
コード例 #15
0
ファイル: beh_ostwald.py プロジェクト: jennyqsun/hnlpyjenny
def AllBehavFile():
    subIDs, _ = get_sub(base_dir)
    subIDs = list(subIDs)
    subIDs.remove('sub-001')

    newRT = []
    newCond = []
    newACC = []
    newY = []
    newSub = []

    for subject in subIDs:
        datadict = read_mat(targetpath + '/combined/' + subject +
                            '-beh-concat.mat')
        rts = datadict['rt']
        condition = datadict['condition']
        correct = datadict['correct']
        y = datadict['y']
        sub = datadict['participant']

        newRT = newRT + rts.tolist()
        newCond = newCond + condition.tolist()
        newACC = newACC + correct.tolist()
        newY = newY + y.tolist()
        newSub = newSub + sub.tolist()
    ncond = 4
    nparts = len(subIDs)
    N = len(newRT)
    dataDict = {'condition': newCond, 'rt': newRT, 'acc': newACC, 'y': newY, \
                'participant': newSub, 'nconds': ncond, 'nparts': nparts}
    filename = targetpath + 'allsubtest1.mat'
    savemat(filename, dataDict)
    return filename
コード例 #16
0
    def __init__(self, pathDict):
        # Parse L_modified file
        self.pathLmod = os.path.join(pathDict['Preferences'], 'L_modified.mat')
        prepcommon._testfile(self.pathLmod)
        self.allen = pymatreader.read_mat(self.pathLmod)['L']

        # Parse sessions file
        pathSessions = os.path.join(pathDict['Preferences'],
                                    'sessions_aud.csv')
        self.dfSession = pd.read_csv(pathSessions, sep='\t')

        # Find parse TGT files
        self.dataPaths = pd.DataFrame(columns=[
            'mouse',
            'day',
            'session',
            'sessionPath',  #'trialIndPath',
            'trialStructPath',
            'pathActivePassive',
            'pathMovementVectors'
        ])

        self.pathT1 = defaultdict(dict)
        self.find_parse_data_paths(pathDict['AUD'])

        # Find parse Overlay
        # self.pathRef = {}
        self.pathT2 = {}
        self.find_parse_overlay(pathDict['Overlay'])
def parse_active_passive(dfTrialStruct, activePassivePath, mapCanon):
    dfTrialStruct['Activity'] = None

    activePassiveStruct = pymatreader.read_mat(activePassivePath)
    for tt, ttCanon in mapCanon.items():
        rezDict = {}

        for activity in ['delay_move', 'no_prior_move', 'noisy', 'prior_move', 'quiet_sens', 'quiet_then_move']:
            keyAct = 'tr_' + tt + '_' + activity
            if keyAct in activePassiveStruct:
                keys = activePassiveStruct[keyAct]
                if isinstance(keys, int):
                    keys = [keys]

                vals = [activity] * len(keys)
                rezDict = {**rezDict, **dict(zip(keys, vals))}

        print('--', tt, (dfTrialStruct['trialType'] == ttCanon).sum(), len(rezDict))

        iTT = 0
        for idx, row in dfTrialStruct.iterrows():
            if row['trialType'] == ttCanon:
                if iTT + 1 in rezDict:
                    dfTrialStruct.loc[idx, 'Activity'] = rezDict[iTT + 1]
                iTT += 1

    return dfTrialStruct
コード例 #18
0
ファイル: beh_ostwald.py プロジェクト: jennyqsun/hnlpyjenny
def combine_runs():
    subdict = save_behavlist()
    count = 0
    for sub in subdict:
        allrunsdict = subdict[sub]
        newRT = []
        newCond = []
        newACC = []
        count = count + 1
        newY = []
        for run in allrunsdict:
            filename = str(run)
            datadict = read_mat(filename)
            rts = datadict['rt']
            condition = datadict['condition']
            correct = datadict['correct']
            y = datadict['rt'] * np.sign(datadict['correct'] - 1 / 2)

            newRT = newRT + rts.tolist()
            newCond = newCond + condition.tolist()
            newACC = newACC + correct.tolist()
            newY = newY + y.tolist()

        sub_ind = [count] * len(newRT)
        dataDict = {'condition': newCond, 'rt': newRT, 'correct': newACC, 'y': newY, \
            'participant': sub_ind}
        filename = '/home/jenny/ostwald-data/inside-beh-converted/combined/' + sub + '-beh-concat' \
                     + '.mat'
        savemat(filename, dataDict)
コード例 #19
0
ファイル: find_baits.py プロジェクト: Cocopyth/AMFtrack
def get_pos_baits_aligned(exp, t):
    pos_bait = get_pos_baits(exp, t)
    date = exp.dates[t]
    directory_name = get_dirname(date, exp.plate)
    path_snap = exp.directory + directory_name
    path_tile = path_snap + "/Img/TileConfiguration.txt.registered"
    skel = read_mat(path_snap + "/Analysis/skeleton_pruned_realigned.mat")
    Rot = skel["R"]
    trans = skel["t"]
    print(Rot, trans)
    real_pos = []
    for x, y, r in pos_bait:
        compression = 5
        xs, ys = x * compression, y * compression
        rottrans = np.dot(Rot, np.array([ys, xs])) + trans
        #         rottrans = np.array([xs, ys])
        xs, ys = round(rottrans[0]), round(rottrans[1])
        real_pos.append((xs, ys))
    pos_real = {}
    if real_pos[0][1] >= real_pos[1][1]:
        pos_real["right"] = real_pos[0]
        pos_real["left"] = real_pos[1]
    else:
        pos_real["right"] = real_pos[1]
        pos_real["left"] = real_pos[0]
    return pos_real
コード例 #20
0
def get_cfg_local(system):
    """Return cfg_local field for the system."""
    from pymatreader import read_mat
    cfg_local = read_mat(os.path.join(get_data_paths(system), 'raw_v7.mat'),
                         ['cfg_local'])['cfg_local']

    return cfg_local
コード例 #21
0
ファイル: spectraldata.py プロジェクト: Chenliushang/octavvs
    def read_matrix(self, filename):
        """
        Read data from a file, with some error checking. The object is modified
        only if the file is successfully loaded.
        """
        wh = None
        image = None
        fext = os.path.splitext(filename)[1].lower()
        #        opusformat = False
        if fext in ['.txt', '.csv', '.mat']:
            if fext == '.mat':
                #                s = scipy.io.loadmat(filename)
                try:
                    s = read_mat(filename)
                except TypeError:
                    # Workaround for uint16_codec bug (pymatreader assumes mio5, not mio4)
                    s = scipy.io.loadmat(filename)
                # Assume data are in the biggest matrix in the file
                ss = max(s.items(), key=lambda k: np.size(k[1]))[1]
                if 'wh' in s:
                    wh = s['wh'].flatten()
            else:
                ss = np.loadtxt(filename)

            if ss.ndim != 2 or ss.shape[0] < 10 or ss.shape[1] < 2:
                raise RuntimeError(
                    'file does not appear to describe an FTIR image matrix')
            d = -1 if ss[0, 0] < ss[-1, 0] else 1
            raw = ss[::d, 1:].T
            wn = ss[::d, 0]
        else:
            reader = OpusReader(filename)
            raw = reader.AB
            wn = reader.wavenum
            wh = reader.wh
            image = reader.image

        if (np.diff(wn) >= 0).any():
            raise RuntimeError('wavenumbers must be sorted')
        npix = raw.shape[0]
        if wh is not None:
            if len(wh) != 2:
                raise RuntimeError('Image size in "wh" must have length 2')
            wh = (int(wh[0]), int(wh[1]))
            if wh[0] * wh[1] != npix:
                raise RuntimeError(
                    'Image size in "wh" does not match data size')
            self.wh = wh
        elif npix != self.wh[0] * self.wh[1]:
            res = int(np.sqrt(npix))
            if npix == res * res:
                self.wh = (res, res)
            else:
                self.wh = (npix, 1)
        self.raw = raw
        self.wavenumber = wn
        self.wmin = self.wavenumber.min()
        self.wmax = self.wavenumber.max()
        self.image = image
        self.curFile = filename
コード例 #22
0
def test_raw_old_eeglab_event_type():
    data = read_mat(os.path.join(test_data_folder, testdata_eeglab_old))
    from .helper_functions.mne_eeglab_stuff import prepare_events_like_mne

    events = prepare_events_like_mne(data)
    first_event = events[0]
    first_event.type
    first_event.latency
コード例 #23
0
ファイル: experiment.py プロジェクト: janamach/anTraX
    def get_images(self,
                   movlist=None,
                   tracklets=None,
                   parts=None,
                   bg='w',
                   ntracklets=None):

        if hasattr(movlist, '__iter__'):
            movlist = list(movlist)
        else:
            movlist = [movlist]

        images = {}

        if parts is not None:
            m = movlist[0]
            for p in parts:
                file = join(self.imagedir,
                            'images_' + str(m) + '_p' + str(p) + '.mat')
                imagesm = read_mat(file, variable_names=tracklets)
                images.update(imagesm)
        else:
            for m in movlist:
                file = join(self.imagedir, 'images_' + str(m) + '.mat')
                imagesm = read_mat(file, variable_names=tracklets)
                images.update(imagesm)

        # arange images
        for tracklet in images.keys():
            if images[tracklet].ndim == 3:
                images[tracklet] = np.expand_dims(images[tracklet], -1)

            images[tracklet] = np.moveaxis(images[tracklet], -1, 0)
            if bg == 'w':
                images[tracklet] = make_white_bg(images[tracklet])

        if ntracklets is not None and ntracklets < len(images):

            tracklets = list(images.keys())
            images = {
                tracklet: images[tracklet]
                for tracklet in tracklets[:ntracklets]
            }

        return images
コード例 #24
0
ファイル: get_erp.py プロジェクト: rameshsrinivasanuci/hnlpy
def  get_erp(subID, run):
    currentSub = subID
    currentRun = 'run-'+ run
    print('Current Subject: ', currentSub)
    print('Current Run:', currentRun)
    datadict = read_mat(path + subID + '/EEG/' + 'EEG_data_'+ subID + '_' + currentRun + '.mat')
    eventsdict = read_mat(path + subID + '/EEG/' + 'EEG_events_' + subID + '_' + currentRun + '.mat')

    data = np.array(datadict['EEGdata']['Y'])
    sr = np.array(datadict['fs'])
    tresp = np.array(eventsdict['tresp'])
    tstim = np.array(eventsdict['tstim'])

    # construct a time x channel x trial matrix for each run
    channelnum = data.shape[0]
    trialnum = tresp.shape[0]
    trialdata = np.zeros((5000,channelnum, trialnum))
    data = np.transpose(data)
    for i in np.arange(trialnum):
        time = tstim[i]
        trialdata[:,:, i] = data[time-2000: time+3000,:]

    erp = np.mean(trialdata[:,:,:],axis = 2)

    # make a lowpass filter
    sos, w, h = timeop.makefiltersos(sr, 10, 20)
    erpfilt = signal.sosfiltfilt(sos, erp, axis=0, padtype='odd')
    erpfiltbase = timeop.baselinecorrect(erpfilt, np.arange(1849,1998,1))

    # Identify an optimal set of weights to estimate a single erp peak.
    u,s,vh = linalg.svd(erpfiltbase[2150:2375,:])
    weights = np.zeros((channelnum,1))
    weights[:,0] = np.matrix.transpose(vh[0,:])

    erpfiltproject = np.matmul(erpfiltbase,weights)
    erpmin = np.amin(erpfiltproject[2150:2375])
    erpmax = np.amax(erpfiltproject[2150:2375])
    if abs(erpmin) < abs(erpmax):
        weights = -weights
        erpfiltproject = -erpfiltproject

    erp_peaktiming = np.argmin(erpfiltproject[2150:2375]) + 2150
    indices = np.arange(erp_peaktiming - 10, erp_peaktiming + 10, 1)
    erp_peakvalue = np.mean(erpfiltproject[indices])
    return erp, erpfiltproject
コード例 #25
0
def get_study_zone(exp, dist, radius, i=0):
    date = exp.dates[i]
    directory_name = get_dirname(date, exp.plate)
    path_snap = exp.directory + directory_name
    im = read_mat(path_snap + "/Analysis/raw_image.mat")["raw"]
    shape_compressed = im.shape[1] // 5, im.shape[0] // 5
    im_comp = cv2.resize(im, shape_compressed)
    dr_orth, dr_center = place_study_zone(im_comp, dist, radius)
    return (dr_orth, dr_center)
コード例 #26
0
 def plot_raw(self, t, figsize=(10, 9)):
     """Plot the full stiched image compressed (otherwise too heavy)"""
     date = self.dates[t]
     directory_name = get_dirname(date, self.plate)
     path_snap = self.directory + directory_name
     im = read_mat(path_snap + "/Analysis/raw_image.mat")["raw"]
     fig = plt.figure(figsize=figsize)
     ax = fig.add_subplot(111)
     ax.imshow(im)
def read_resample_movement_data(pwdMove, trialTypeNames, nTrialData, nSampleData, trialNameMap, sig2, srcFreq=30.0, trgFreq=20.0):
    # Make array of NAN nTrial x nTime (as in data)
    movementRS = np.full((nTrialData, nSampleData), np.nan)

    # Read behaviour matrix
    matDict = pymatreader.read_mat(pwdMove)

    # for each trialType, resample 30Hz to 20Hz (3-step window), fill matrix
    for srcName, trgName in trialNameMap.items():
        if srcName in matDict.keys():
            movementRawRS = matDict[srcName].T  # Original is SR, so transpose

            if movementRawRS.ndim == 2:
                nTrialSrc, nSampleSrc = movementRawRS.shape
            elif movementRawRS.ndim == 1:
                nTrialSrc, nSampleSrc = 1, movementRawRS.shape[0]
                if nSampleSrc == 0:
                    nTrialSrc = 0
            else:
                raise IOError("Unexpected shape", movementRawRS.shape)

            # Test that the number of behavioural trials data trials of this type match
            # print(trgName, nTrialSrc, np.sum(trialTypeNames == trgName))

            if nTrialSrc == 0:
                print('--No trials for ', trgName, ', skipping')
                continue

            timesSrc, timesTrg, rezRS = _behaviour_resample(movementRawRS, nSampleSrc, srcFreq, trgFreq, sig2)

            # Find number of target trials and timesteps
            trialTypeIdxs = trialTypeNames == trgName
            nTrialTrg = np.sum(trialTypeIdxs)
            nSampleTrg = len(timesTrg)

            # Augment or crop number of trials
            if nTrialSrc != nTrialTrg:
                print('Warning: Trial mismatch:', trgName, nTrialSrc, np.sum(trialTypeNames == trgName), ': cropping')
                if nTrialSrc > nTrialTrg:
                    rezRS = rezRS[:nTrialTrg]
                else:
                    tmpRS = np.full((nTrialTrg, nSampleTrg), np.nan)
                    tmpRS[:nTrialSrc] = rezRS
                    rezRS = tmpRS

            # Augment or crop duration
            if nSampleTrg > nSampleData:
                print('too long, crop', nSampleTrg, nSampleData)
                movementRS[trialTypeIdxs] = rezRS[:, :nSampleData]  # If behaviour too long, crop it
            elif nSampleTrg > nSampleData:
                print('too short, pad', nSampleTrg, nSampleData)
                movementRS[trialTypeIdxs, :nSampleTrg] = rezRS  # If behaviour too short, pad it
            else:
                movementRS[trialTypeIdxs] = rezRS

    return movementRS
def load_t2(path):
    d = pymatreader.read_mat(path)['tform_str']
    assert d['Degree'] == 3
    assert d['Dimensionality'] == 2

    # Canonical Monads:  1, x, y, x2, xy, y2, x3, x2y, xy2, x3
    # Matlab Monads:     1, x, y, xy, x2, y2, x2y, xy2, x3, y3

    monadOrder = [0, 1, 2, 4, 3, 5, 8, 6, 7, 9]
    return d['A'][monadOrder], d['B'][monadOrder]
コード例 #29
0
 def parse_trial_structure(self, pwd):
     trialStruct = pymatreader.read_mat(pwd)
     trialTypes = trialStruct['out']['respTypes']
     return pd.DataFrame({
         'trialType': trialTypes
     }).replace({
         1: "Hit",
         2: "CR",
         3: "FA",
         4: "Miss",
         5: "Early"
     })
コード例 #30
0
    def read_trial_structure_as_pd(self, path, mouseName):
        _drop_non_floats = lambda lst: [el if isinstance(el, float) else np.nan for el in lst]

        df = pd.DataFrame(pymatreader.read_mat(path)['trials'])
        df['trialType'] = [self.parse_trial_type(s, d, mouseName) for s, d in zip(df['stimulus'], df['decision'])]

        df['decision_time'] = _drop_non_floats(df['decision_time'])
        df['stimulus_time'] = _drop_non_floats(df['stimulus_time'])
        df['delayLength'] = (df['decision_time'] - df['stimulus_time'] - 2000) / 1000

        df.drop(['id', 'no', 'puff', 'report', 'auto_reward', 'stimulus', 'decision'], inplace=True, axis=1)
        return df