Exemplo n.º 1
0
def get_parametric(grp=None, ntm=None):
    arrdata = lut.unpickle(main_path)['main'][:, [
        r.ix('group'),
        r.ix('sid'),
        r.ix('trial'),
        r.ix('cat'),
        r.ix('cor'),
        r.ix('blkt')
    ]]
    df_dict = {}
    for i, (col, dt) in enumerate(
            zip('grp,sid,trial,tid,cor,blkt'.split(','),
                [int, int, int, int, int, int])):
        df_dict[col] = pd.Series(arrdata[:, i], dtype=dt)

    df = pd.DataFrame(df_dict)
    ntm_df = lut.unpickle(ntm_path)[['sid', 'ntm']].groupby('sid').head(1)
    df = df.merge(ntm_df, on='sid').drop_duplicates()
    del ntm_df, arrdata

    if ntm is not None: df = df.loc[df.ntm == ntm, :]
    if grp is not None: df = df.loc[df.grp == grp, :]

    params_dict = {}
    for tid in [1, 2, 3, 4]:
        tdf = df.loc[df.tid == tid, :]
        m = smf.logit('cor ~ blkt', data=tdf).fit(full_output=0, disp=0)
        b0, b1 = m.params['Intercept'], m.params['blkt']
        params_dict[tid] = np.array([b0, b1])
    return params_dict
Exemplo n.º 2
0
def get_sub_data(sid=None, grp=None, ntm=None):
    arrdata = lut.unpickle(main_path)['main'][:, [
        r.ix('group'),
        r.ix('sid'),
        r.ix('trial'),
        r.ix('cat'),
        r.ix('cor'),
        r.ix('blkt')
    ]]
    df_dict = {}
    for i, (col, dt) in enumerate(
            zip('grp,sid,trial,tid,cor,blkt'.split(','),
                [int, int, int, int, int, int])):
        df_dict[col] = pd.Series(arrdata[:, i], dtype=dt)
    df = pd.DataFrame(df_dict)

    ntm_df = lut.unpickle(ntm_path)[['sid', 'ntm']].groupby('sid').head(1)
    df = df.merge(ntm_df, on='sid').drop_duplicates().astype(int)
    del ntm_df, arrdata
    df = df.loc[df.trial <= 60]
    if ntm: df = df.loc[df.ntm == ntm, :]
    if grp: df = df.loc[df.grp == grp, :]
    if sid:
        df = df.loc[df.sid == int(sid), :]
    else:
        sid = np.random.choice(df.sid.unique())
        df = df.loc[df.sid == sid, :]

    df = df.loc[:, ['blkt', 'tid', 'cor']].pivot(index='blkt',
                                                 columns='tid',
                                                 values='cor')
    return df, sid
Exemplo n.º 3
0
def get_sub_choices(sid):
    data = lut.unpickle(main_path)[
        'main'][:, [r.ix('sid'), r.
                    ix('trial'), r.ix('cat')]]
    filt = (data[:, 0] == sid) & (data[:, 1] > 60) & (data[:, 1] <= 310)
    data = data[filt, -1] - 1
    return data.astype(int)
Exemplo n.º 4
0
 def get_fit_data(self):
     df = lut.unpickle(
         'supplementary/simple_choice_model/data/fit_data.pkl')
     df = df.loc[df.sid == int(self.sid_picker.placeholder)]
     lps = df.loc[:, 'lp1':'lp4'].values[1:, :]
     pcs = df.loc[:, 'pc1':'pc4'].values[1:, :]
     ins = df.loc[:, 'in1':'in4'].values[1:, :]
     chs = df.loc[:, 'ch1':'ch4'].values[1:, :]
     time_alloc = (df.loc[:, 'ch1':'ch4'].values[1:, :].cumsum(axis=0) + 15)
     trs = (time_alloc.T / time_alloc.sum(axis=1)).T
     return lps, pcs, ins, trs, chs
Exemplo n.º 5
0
def get_nonparametric(grp=None, ntm=None):
    arrdata = lut.unpickle(main_path)['main'][:, [
        r.ix('group'),
        r.ix('sid'),
        r.ix('trial'),
        r.ix('cat'),
        r.ix('cor'),
        r.ix('blkt')
    ]]

    df_dict = {}
    for i, (col, dt) in enumerate(
            zip('grp,sid,trial,tid,cor,blkt'.split(','),
                [int, int, int, int, int, int])):
        df_dict[col] = pd.Series(arrdata[:, i], dtype=dt)

    df = pd.DataFrame(df_dict)
    ntm_df = lut.unpickle(ntm_path)[['sid', 'ntm']].groupby('sid').head(1)
    df = df.merge(ntm_df, on='sid').drop_duplicates()
    del ntm_df, arrdata

    if ntm: df = df.loc[df.ntm == ntm, :]
    if grp: df = df.loc[df.grp == grp, :]

    grouped = df.groupby(['tid', 'blkt'])[['cor']].mean()

    probs = np.zeros([4, 100])
    for tid in [1, 2, 3, 4]:
        y = grouped.loc[(tid, slice(None)), :].rolling(
            50, min_periods=1).mean().values.squeeze()
        probs[tid - 1, :] = y[:100]

    def nonparametric_model(trials, tid):
        t = trials.copy()
        t[t <= 0] = 0
        t[t >= 100] = 99
        p = probs[tid - 1, t]
        return (np.random.rand(t.size) <= p).astype(int)

    return nonparametric_model
Exemplo n.º 6
0
def get_multiple_sids(sids):
    arrdata = lut.unpickle(main_path)['main'][:, [
        r.ix('sid'), r.ix('trial'),
        r.ix('cat'), r.ix('cor')
    ]]
    df_dict = {}
    for i, (col, dt) in enumerate(
            zip('sid,trial,tid,cor'.split(','), [int, int, int, int])):
        df_dict[col] = pd.Series(arrdata[:, i], dtype=dt)
    df = pd.DataFrame(df_dict)
    df = df.loc[df.trial <= 60]

    arr = np.zeros([np.shape(sids)[0], 15, 4])
    for i, sid in enumerate(sids):
        for j, tid in enumerate([1, 2, 3, 4]):
            arr[i, :, j] = df.loc[(df.sid == sid) & (df.tid == tid),
                                  'cor'].values

    return arr
Exemplo n.º 7
0
def prepare_fit_data(save_as=''):
    size0, size1, overlap = 10, 9, 4
    cols = [
        'sid', 'grp', 'stage', 'trial', 't0', 'loc_p1', 'loc_p2', 'loc_p3',
        'loc_p4', 'loc_pc1', 'loc_pc2', 'loc_pc3', 'loc_pc4', 'cor'
    ]
    df = lut.unpickle(
        'supplementary/simple_choice_model/data/trials_data_w15.pkl')[cols]
    df = df.loc[df.trial <= 310, :]

    sids, grps, ntms, trials, glps, gpcs, gins, gchs = [], [], [], [], [], [], [], []
    for i, sdf in df.groupby('sid'):
        crit_pval = sdf.loc[:, 'loc_p1':'loc_p3'] <= .01
        crit_pc = sdf.loc[:, 'loc_pc1':'loc_pc3'] > .5
        learned = crit_pval.values & crit_pc.values
        ntm = np.any(learned, axis=0).sum()
        sid = sdf.loc[:, 'sid'].values[0]
        grp = sdf.loc[:, 'grp'].values[0]

        mem = np.full([size0 + size1 - overlap, 4], np.nan)
        mem[:, 0] = sdf.loc[(sdf.t0 == 1) & (sdf.stage == 0), 'cor']
        mem[:, 1] = sdf.loc[(sdf.t0 == 2) & (sdf.stage == 0), 'cor']
        mem[:, 2] = sdf.loc[(sdf.t0 == 3) & (sdf.stage == 0), 'cor']
        mem[:, 3] = sdf.loc[(sdf.t0 == 4) & (sdf.stage == 0), 'cor']

        # Data of the 1st free-play trial
        lps = [
            np.abs(mem[:-size0, :].mean(axis=0) - mem[-size1:, :].mean(axis=0))
        ]
        pcs = [np.mean(mem, axis=0)]
        chs = [np.eye(4)[sdf.t0.values[61] - 1, :]]

        x = np.stack([lps[0], pcs[0]], axis=0).T
        choices = sdf.t0.values[62:]
        cor = sdf.cor.values[61:-1]
        for tid, outcome in zip(choices, cor):
            j = tid - 1  # choice index

            # Update hits memory
            mem[:-1, j] = mem[1:, j]
            mem[-1, j] = outcome

            # Update expected reward (PC)
            pc_vect = np.mean(mem, axis=0)
            x[j, 1] = pc_vect[j]  # PC

            # Update LP
            lp_vect = np.abs(mem[:-size0, :].mean(axis=0) -
                             mem[-size1:, :].mean(axis=0))
            x[j, 0] = lp_vect[j]  # LP

            # ========== Record data ============
            pcs.append(pc_vect)
            lps.append(lp_vect)
            chs.append(np.eye(4)[j, :])

        glps.append(np.stack(lps, axis=0))
        gpcs.append(np.stack(pcs, axis=0))
        gchs.append(np.stack(chs))
        gins.append(np.zeros_like(lps))
        gins[-1][1:] = chs[:-1]

        sids.append(np.ones([len(lps), 1]) * sid)
        grps.append(np.ones([len(lps), 1]) * grp)
        ntms.append(np.ones([len(lps), 1]) * ntm)
        trials.append((np.arange(len(lps)) + 1).reshape([-1, 1]))

    cols = []
    for col in (sids, grps, ntms, trials, glps, gpcs, gins, gchs):
        # print(np.concatenate(col, axis=0).shape)
        cols.append(np.concatenate(col, axis=0))

    data = np.concatenate(cols, axis=1)
    colnames = 'sid,grp,ntm,trial,lp1,lp2,lp3,lp4,pc1,pc2,pc3,pc4,in1,in2,in3,in4,ch1,ch2,ch3,ch4'.split(
        ',')
    df = pd.DataFrame(data, columns=colnames)
    for colname in 'sid,grp,ntm,trial,in1,in2,in3,in4,ch1,ch2,ch3,ch4'.split(
            ','):
        df.loc[:, colname] = df.loc[:, colname].astype(int)

    if save_as: lut.dopickle(save_as, df)
Exemplo n.º 8
0
import loc_utils as lut
import pandas as pd
import numpy as np

tasks = [1, 2, 3, 4]
mcols = 'sid,grp,stage,trial,blkt,current,nxt,pc:1,pc:2,pc:3,pc:4,p:1,p:2,p:3,p:4,sc:1,sc:2,sc:3,sc:4,sw_pred,sw_act,sw_lag,relt:1,relt:2,relt:3,relt:4'.split(
    ',')
ix = mcols.index

mdata = lut.unpickle('../pipeline_data/scdata/modeling_data_sw_lag.pkl')

sids, groups = lut.get_unique(mdata, [ix('sid'), ix('grp')])
rt = np.zeros([mdata.shape[0], 4])

for j, tsk in enumerate(tasks):
    tmask = lut.get_mask(mdata, {ix('current'): tsk})
    rt[:-1][tmask[1:], j] = 1

for sid in sids:
    smask = lut.get_mask(mdata, {ix('sid'): sid})
    srt = rt[smask, :]
    srt[0, :] = 15
    srt = np.cumsum(srt, axis=0)
    rt[smask] = srt

rt = np.transpose(rt.T / np.sum(rt, axis=1))
mdata = np.concatenate([mdata, rt], axis=1)

df1 = pd.DataFrame(mdata, columns=mcols)
neworder = 'sid,grp,stage,trial,blkt,current,nxt,pc:1,pc:2,pc:3,pc:4,p:1,p:2,p:3,p:4,sc:1,sc:2,sc:3,sc:4,relt:1,relt:2,relt:3,relt:4,sw_pred,sw_act,sw_lag'.split(
    ',')