Beispiel #1
0
def get_indexedTrials(choiceData, trials):
    '''
    '''
    from macaque.f_toolbox import flatten
    index = flatten(choiceData.trial_index.values.tolist())
    iTrials = trials.loc[index]
    return iTrials
Beispiel #2
0
def merge_sequential_test(choiceData, trials):
    '''
    '''
    from macaque.f_toolbox import unique_listOfLists, flatten

    def consecutive(data, stepsize=1):
        return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)

    dfs = []
    for seqCode, gList in unique_listOfLists(choiceData[['seqType',
                                                         'gList']].values):
        df = choiceData.loc[(choiceData.seqType == seqCode)
                            & (choiceData.gList == gList)]
        for toMerge in consecutive(df.division.unique()):
            if seqCode < 9020:
                options = [
                    flatten(
                        np.unique([
                            df.loc[df.division == block].option1.tolist(),
                            df.loc[df.division == block].option2.tolist()
                        ])) for block in toMerge
                ]
                indices = np.array([
                    np.unique(
                        df.loc[df.division == block].trial_index.tolist())
                    for block in toMerge
                ])

                consecutives = [[np.nan]]
                for i in range(len(options) - 1):
                    if options[i] == options[i + 1]:
                        if consecutives[-1][-1] == i:
                            consecutives[-1].extend([i + 1])
                        else:
                            consecutives.append([i, i + 1])
                    else:
                        if consecutives[-1][-1] != i:
                            consecutives.append([i])
                if len(consecutives) == 1:
                    consecutives.append([0])
                elif len(consecutives[-1]) == 1:
                    consecutives.append([len(options)])
                consecutives = consecutives[1:]

                for cc in consecutives:
                    iMerge = np.unique(flatten(indices[cc]))
                    dfs.append(
                        get_options(trials.loc[iMerge],
                                    mergeBy='sequencetype').assign(
                                        division=toMerge[0]))

            else:
                iMerge = np.hstack(df.loc[[
                    True if x in toMerge else False for x in df.division
                ]].trial_index.values)
                dfs.append(
                    get_options(
                        trials.loc[iMerge],
                        mergeBy='sequencetype').assign(division=toMerge[0]))

    mCD = pd.concat(dfs, ignore_index=True)
    mCD.sort_values(by=['sessionDate', 'division'], inplace=True)
    mCD.index = range(len(mCD.index))
    return mCD
Beispiel #3
0
def merge_sequential(choiceData, trials):
    '''
    Merge sequential blocks into single blocks when they all share the same trials and/or gList name
    '''
    #     if ('division' in choiceData.columns) and (len(choiceData.sessionDate.unique())>1):
    #THIS NEEDS TO MERGE SOFTMAXES TOGETHER WHY THEY ARE FROM SIMILAR CONTEXTS
    from macaque.f_toolbox import flatten, unique_listOfLists
    from macaque.f_psychometrics import get_softmaxData

    #----------------------------------------------------------------------

    def merge_seqChoices(sDivs, sTrials, oldCD, tt):
        sDivs = np.unique(sDivs)
        sTrials = np.unique(sTrials)
        for div in sDivs:
            oldCD.drop(oldCD.loc[oldCD.division == div].index, inplace=True)
        newRowsChoice = get_options(tt.loc[sTrials], mergeBy='sequencetype')
        newRowsChoice = newRowsChoice.assign(division=sDivs[0])
        return oldCD.append(newRowsChoice).sort_index()

    #----------------------------------------------------------------------

#    choiceData.index = range(len(choiceData.index))
    sTrials = []
    sDivs = []
    oDivs = choiceData.division.unique()  #find the different blocks
    for ii in range(len(choiceData.division.unique()) - 1):
        div1 = oDivs[ii]
        div2 = oDivs[ii + 1]  #assign block numbers for the current and next
        df1 = choiceData.loc[choiceData.division ==
                             div1]  #find row attributable to primary block
        df2 = choiceData.loc[choiceData.division ==
                             div2]  #find row attributable to secondary block
        if df1.seqType.unique() == df2.seqType.unique() and df1.gList.unique(
        ) == df2.gList.unique():  #check if their gList matches
            if df1.seqType.unique(
            ) < 9020.0:  #now check to see if these are both custom sequences (9001)
                gambleList_1 = np.sort(
                    unique_listOfLists(
                        flatten([df1.option1.tolist(),
                                 df1.option2.tolist()])), 0)
                gambleList_2 = np.sort(
                    unique_listOfLists(
                        flatten([df2.option1.tolist(),
                                 df2.option2.tolist()])), 0)

                #find all gambles and all safes taken from their unique list
                gg1 = [
                    option
                    for x, option in zip([len(x)
                                          for x in gambleList_1], gambleList_1)
                    if x == max([len(x) for x in gambleList_1])
                ]
                gg2 = [
                    option
                    for x, option in zip([len(x)
                                          for x in gambleList_2], gambleList_2)
                    if x == max([len(x) for x in gambleList_2])
                ]
                ss1 = [
                    option
                    for x, option in zip([len(x)
                                          for x in gambleList_1], gambleList_1)
                    if x == min([len(x) for x in gambleList_1])
                ]
                ss2 = [
                    option
                    for x, option in zip([len(x)
                                          for x in gambleList_2], gambleList_2)
                    if x == min([len(x) for x in gambleList_2])
                ]

                if type(gg1[0]) != list:
                    gg1[0] = gg1[0].tolist()
                if type(gg2[0]) != list:
                    gg2[0] = gg2[0].tolist()
                if type(ss1[0]) != list:
                    ss1[0] = ss1[0].tolist()
                if type(ss2[0]) != list:
                    ss2[0] = ss2[0].tolist()

                if gg1[0] == gg2[0] and (len(gg1) == 1) and (
                        len(gg2) == 1
                ):  #means there is no difference between the two sets
                    pass
                elif ss1[0] == ss2[0] and (len(ss1) == 1) and (len(ss2) == 1):
                    pass
                else:
                    if len(sTrials) != 0:
                        sf = []
                        for dd in np.unique(sDivs):
                            sf.append(
                                get_softmaxData(
                                    choiceData.loc[choiceData.division == dd],
                                    metricType='CE',
                                    minSecondaries=4,
                                    minChoices=4))
                        try:
                            sf = pd.concat(sf, ignore_index=True)
                            if len(sf) >= 2 and not any(
                                    np.abs(np.diff(sf.equivalent.values)) > 0.1
                            ):
                                pass  #this is done so that I could look at past blocks
                            else:
                                choiceData = merge_seqChoices(
                                    sDivs, sTrials, choiceData, trials)
                        except:
                            choiceData = merge_seqChoices(
                                sDivs, sTrials, choiceData, trials)
                            pass
                    sTrials = []
                    sDivs = []
                    continue

            sTrials.extend(
                np.unique(
                    flatten(
                        [df1.trial_index.tolist(),
                         df2.trial_index.tolist()], 2)))
            sDivs.extend([div1, div2])
        elif len(sTrials) != 0:
            choiceData = merge_seqChoices(sDivs, sTrials, choiceData, trials)
            sTrials = []
            sDivs = []
        else:
            sTrials = []
            sDivs = []

    if len(sTrials) != 0:
        choiceData = merge_seqChoices(sDivs, sTrials, choiceData, trials)

    choiceData.sort_values(by=['sessionDate', 'division'], inplace=True)
    choiceData.index = range(len(choiceData.index))
    return choiceData  #removes trials that were recorded by error
Beispiel #4
0
def plot_transitivity(softmaxDF):
    '''
    From a softmax dataFrame, plot the softmax curves either individually or all at once.

    Parameters
    ----------
    softmaxDF : DataFrame
        DataFrame of psychophysics data i.e. CE or PE sequences
    info : DataFrame
        Contains the information thta we can plot about the day's session (ml rank, choice percentages, etc...)

    Returns
    ----------
    Plots softmax-normalized sigmoid curves that fit to the choice bahviour of the animals

    ----------
    future: needs to print proper Confidence Intervals
    '''
    if softmaxDF.empty:
        return
    import numpy as np
    from macaque.f_toolbox import flatten

    # -------------------------------------------- where primary function starts

    if ('sessionDate' in softmaxDF.columns) and (len(
            softmaxDF.sessionDate.unique()) > 1):
        for day in softmaxDF.sessionDate.unique():
            for div in softmaxDF.seqCode.unique():
                # .sort_values(['primaryEV']))
                plot_transitivity(
                    softmaxDF.loc[softmaxDF.sessionDate == day].loc[
                        softmaxDF.seqCode == div])
    else:
        # if there is a date to the softmax row, add the date to the subplot
        i = 0
        ratios = []
        indice = []
        leftAxis = []
        rightAxis = []
        lookup = []
        for index, row in softmaxDF.iterrows():
            #            np.sort(row.secondary)
            leftAxis.extend(
                np.repeat(str(row.primary), len(row.freq_sCh),
                          axis=0).tolist())
            rightAxis.extend(row.secondary)
            for choice_ratio in row.freq_sCh:
                ratios.extend([choice_ratio - 0.5])
                indice.extend([i])
                i += 1
            lookup.extend([i])

        colors = []
        for ii, ration in enumerate(ratios):
            if ration > 0:
                colors.extend('g')
            elif ration < 0:
                colors.extend('r')
            else:
                colors.extend('k')

        fig, axarr = plt.subplots(
            figsize=(8, len(flatten(softmaxDF.freq_sCh.tolist())) / 4))
        if 'sessionDate' in softmaxDF.columns:
            axarr.set_title(
                softmaxDF.sessionDate.apply(
                    lambda x: x.strftime("%Y-%m-%d")).unique().tolist()[0] +
                ': division ' + str(softmaxDF.seqCode.unique().tolist()[0])
            )  # this sets the subplot's title
        axarr.barh(indice, ratios, color=colors)

        axarr.axvline(x=0, linestyle='-', color='k', alpha=1)
        axarr.axvline(x=0.25, linestyle='--', color='k', alpha=0.6)
        axarr.axvline(x=-0.25, linestyle='--', color='k', alpha=0.6)
        plt.yticks(indice, leftAxis)
        axarr.set_ylim(min(indice) - 1, max(indice) + 2)  # y axis length
        plt.tight_layout()

        axarr2 = axarr.twinx()
        axarr2.barh(indice, ratios, alpha=0)
        for ii, chR, nT in zip(indice, flatten(softmaxDF.freq_sCh.tolist()),
                               flatten(softmaxDF.no_of_Trials.tolist())):
            if chR > 0.5:
                axarr2.text(chR - 0.5 + 0.015,
                            ii - 0.25,
                            str(int(chR * nT)) + '/' + str(int(nT)),
                            style='italic',
                            color='k',
                            alpha=0.65,
                            fontsize='smaller')
            else:
                axarr2.text(chR - 0.5 - 0.08,
                            ii - 0.25,
                            str(int(chR * nT)) + '/' + str(int(nT)),
                            style='italic',
                            color='k',
                            alpha=0.65,
                            fontsize='smaller')
        for lines in lookup:
            axarr2.axhline(y=lines - 0.5, linestyle='-', color='b', alpha=1)
        plt.yticks(indice, rightAxis)
        axarr2.set_ylim(min(indice) - 1, max(indice) + 2)  # y axis length
        axarr2.set_xlim(-0.6, 0.6)  # y axis length
        plt.tight_layout()

        plt.show()
Beispiel #5
0
def get_softmaxData(choiceData,
                    metricType='ce',
                    minSecondaries=4,
                    minChoices=4,
                    plotTQDM=True):
    '''
    From a 'choiceData' dataFrame, retrieve psychometric data used in certainty/probability equivalents, choice ratios, reaction times.  \n
    **IMPORTANT**:\n
    If 'choiceData' is *divided into blocked or sequence-specific choices*, get_softmaxData returns block or sequence specific results (per day of testing).

    Parameters
    ----------
    choiceData : DataFrame
        DataFrame of psychophysics data i.e. CE or PE sequences
    metricType : string
        'CE' / 'certainty equivalent' or 'PE'/'probability equivalent' psychometric fits on the choice Data, \
        'Trans' orders without computing psychometrics
    minSecondaries : int
        Number of secondary options against which the primary is tested (e.g. safes for a single gamble)
    minChoices : int
        Number or choice made between the primary and secondary options (e.g. safes repeated n times per gamble)
    trials : None or DataFrame
        Dataframe from which original trials can be used to merge similar blocks that come one after another *(only useful for blocked choice data)*

    Returns
    ----------
    softmaxDF : DataFrame
        Returns psychometric data used to plot softmax curves, reaction times, and choice rations between gamble/safe pairs and sequences

    ----------
    future: needs to print proper Confidence Intervals
    '''

    # This is in case thedata has been divided in blocks/sequence types
    # (useful for further analysis)
    if ('division' in choiceData.columns) and (len(
            choiceData.sessionDate.unique()) > 1):
        dfs = []
        for day in tqdm(choiceData.sessionDate.unique(),
                        desc='Computing block-based Psychophysics',
                        disable=not plotTQDM):
            for div in choiceData.loc[choiceData.sessionDate ==
                                      day].division.unique():
                tailEnd = get_softmaxData(
                    (choiceData.loc[choiceData.sessionDate == day].loc[
                        choiceData.division == div]), metricType,
                    minSecondaries, minChoices)
                if tailEnd is None:
                    continue
                else:
                    dfs.append(
                        tailEnd.assign(division=div).assign(sessionDate=day))

        softmaxDF = pd.concat(dfs, ignore_index=True)

        if metricType.lower() == 'ce' or metricType.lower(
        ) == 'certainty equivalent':
            cols = [
                'sessionDate', 'primary', 'primaryEV', 'equivalent',
                'secondary', 'secondaryEV', 'm_range', 'freq_sCh', 'pFit',
                'pSTE', 'no_of_Trials', 'nTrials', 'primarySide', 'choiceList',
                'filteredRT', 'choiceTimes', 'moveTime', 'trial_index',
                'oClock', 'func', 'metricType', 'division', 'seqCode', 'gList',
                'chosenEV'
            ]

        elif metricType.lower() == 'pe' or metricType.lower(
        ) == 'probability equivalent':
            cols = [
                'sessionDate', 'primary', 'primaryEV', 'equivalent',
                'freq_sCh', 'secondary', 'secondaryEV', 'm_range', 'pFit',
                'pSTE', 'no_of_Trials', 'nTrials', 'primarySide', 'choiceList',
                'filteredRT', 'choiceTimes', 'moveTime', 'trial_index',
                'oClock', 'func', 'metricType', 'division', 'seqCode', 'gList',
                'chosenEV'
            ]
        elif metricType.lower() == 'none':
            cols = [
                'sessionDate', 'primary', 'primaryEV', 'secondary',
                'secondaryEV', 'm_range', 'freq_sCh', 'no_of_Trials',
                'nTrials', 'primarySide', 'choiceList', 'filteredRT',
                'choiceTimes', 'moveTime', 'trial_index', 'oClock',
                'metricType', 'division', 'seqCode', 'gList', 'chosenEV'
            ]
        else:
            cols = [
                'sessionDate', 'primary', 'primaryEV', 'secondary',
                'secondaryEV', 'm_range', 'freq_sCh', 'no_of_Trials',
                'nTrials', 'primarySide', 'choiceList', 'filteredRT',
                'choiceTimes', 'moveTime', 'trial_index', 'oClock',
                'metricType', 'division', 'seqCode', 'gList', 'chosenEV'
            ]

        return psychometricDF(softmaxDF[cols])

    #-------------------------------------------------------------------------

    else:
        cols = [
            'primary', 'primaryEV', 'secondary', 'secondaryEV', 'm_range',
            'freq_sCh', 'primarySide', 'no_of_Trials', 'nTrials', 'choiceList',
            'filteredRT', 'choiceTimes', 'moveTime', 'trial_index', 'oClock',
            'metricType', 'chosenEV'
        ]
        #        softmaxDF = pd.DataFrame(columns=cols)
        dfs = []

        psychData = get_psychData(choiceData, metricType, transitType='None')
        unique_options = unique_listOfLists(psychData.option1)
        for option in unique_options:
            # find index for specfic option1 gamble
            index = psychData['option1'].apply(lambda x: x == option)
            mags = []
            igg = {}
            trialType = []

            # here we define different secondary gambles from their magnitudes
            #   LOOK HERE FOR THE ISSUE OF != 2
            if psychData.loc[index].loc[psychData.loc[index].option2.apply(
                    lambda x: len(x)) != 2].option2.values.tolist() != []:
                gg = psychData.loc[index].loc[
                    psychData.loc[index].option2.apply(lambda x: len(x)) !=
                    2].option2.apply(lambda x: [x[0], x[2]])
                mags, igg = unique_listOfLists(gg, returnIndex=True)
                for nn in mags:
                    igg[tuple(nn)] = gg.iloc[igg[tuple(nn)]].index
                trialType = mags[:]

            # here we define safe secondary options as unique
            if psychData.loc[index].loc[psychData.loc[index].option2.apply(
                    lambda x: len(x)) == 2].index.tolist() != []:
                listy = psychData.loc[index].loc[
                    psychData.loc[index].option2.apply(
                        lambda x: len(x)) == 2].option2.apply(lambda x: x[0])
                mags.append([min(listy), max(listy)])  # add the safes to this
                igg[tuple([min(listy),
                           max(listy)])] = psychData.loc[index].loc[
                               psychData.loc[index].option2.apply(
                                   lambda x: len(x)) == 2].index.tolist()
                trialType.append(['safe'])

            for m_range, tt in zip(mags, trialType):
                # make series of trial numbers for minChoices filter
                choiceRepeats = psychData.loc[igg[tuple(
                    m_range)]].no_of_Trials.values.tolist()

                if len([lens for lens in choiceRepeats if lens >= minChoices
                        ]) >= minSecondaries:
                    # condition to evaluate the options
                    # import pdb ; pdb.set_trace() #AWESOME WAY TO DEBUG
                    subDf = psychData.loc[igg[tuple(m_range)]].loc[
                        psychData.loc[igg[tuple(m_range)]].no_of_Trials >=
                        minChoices].sort_values('option2')
                    if np.size(subDf) == 0:
                        continue

                    if tt != ['safe']:
                        # look at the magnitude in option 2 fields
                        marker = [m[-1] for m in subDf.option2]
                    else:
                        marker = [m[0] for m in subDf.option2]

                    try:
                        seq = int(subDf.seqType.unique()[0])
                        gList = subDf.gList.unique()[0]
                    except BaseException:
                        seq = []
                        gList = []

                    dfs.append(
                        pd.DataFrame({
                            'primary': [
                                flatten(
                                    unique_listOfLists(
                                        subDf.option1.values.tolist()))
                            ],
                            'primaryEV':
                            np.unique(subDf.G1_ev.values.tolist()).tolist(),
                            'secondary': [subDf.option2.values.tolist()],
                            'secondaryEV':
                            [np.unique(subDf.G2_ev.values.tolist()).tolist()],
                            'm_range': [m_range],
                            'freq_sCh': [(subDf.chose2 /
                                          subDf.no_of_Trials).values.tolist()],
                            'no_of_Trials':
                            [subDf.no_of_Trials.values.tolist()],
                            'nTrials':
                            [sum(subDf.no_of_Trials.values.tolist())],
                            'choiceList': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.choiceList.values.tolist())
                            }],
                            'choiceTimes': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.choiceTimes.values.tolist())
                            }],
                            'filteredRT': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.filteredRT.values.tolist())
                            }],
                            'moveTime': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.moveTime.values.tolist())
                            }],
                            'trial_index': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.trial_index.values.tolist())
                            }],
                            'oClock': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.oClock.values.tolist())
                            }],
                            'primarySide': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.side_of_1.values.tolist())
                            }],
                            'metricType': [metricType.upper()],
                            'seqCode': [seq],
                            'gList': [gList],
                            'chosenEV': [{
                                key: value
                                for key, value in zip(
                                    marker, subDf.chosenEV.values.tolist())
                            }]
                        }))

        if dfs == []:
            softmaxDF = pd.DataFrame(columns=cols)
        else:
            softmaxDF = pd.concat(dfs, ignore_index=True)
        if softmaxDF.empty:
            return None

        if metricType.lower() == 'ce' or metricType.lower(
        ) == 'certainty equivalent' or metricType.lower(
        ) == 'pe' or metricType.lower() == 'probability equivalent':
            cols = [
                'primary', 'primaryEV', 'equivalent', 'secondary',
                'secondaryEV', 'm_range', 'freq_sCh', 'pFit', 'pSTE',
                'primarySide', 'no_of_Trials', 'nTrials', 'choiceList',
                'filteredRT', 'choiceTimes', 'moveTime', 'trial_index',
                'oClock', 'func', 'metricType', 'seqCode', 'gList', 'chosenEV'
            ]
            softmaxDF = fit_softmax(softmaxDF, metricType)
        elif metricType.lower() == 'trans' or metricType.lower(
        ) == 'transitivity':
            cols = [
                'primary', 'primaryEV', 'secondary', 'secondaryEV', 'm_range',
                'freq_sCh', 'primarySide', 'no_of_Trials', 'nTrials',
                'choiceList', 'filteredRT', 'choiceTimes', 'moveTime',
                'trial_index', 'oClock', 'metricType', 'seqCode', 'gList',
                'chosenEV'
            ]
#        import pdb ; pdb.set_trace() #AWESOME WAY TO DEBUG
        return psychometricDF(softmaxDF[cols])
Beispiel #6
0
    def get_RTs(self):
        '''
        '''
        RTs = flatten([list(val.values()) for val in self.choiceTimes])
        sEV = flatten([[np.array(y) for y in val.keys()]
                       for val in self.choiceTimes])
        pEV = flatten([[np.array(ev) for y in val.keys()]
                       for val, ev in zip(self.choiceTimes, self.primaryEV)])
        seqType = flatten(
            [[seq for y in val.keys()]
             for val, seq in zip(self.choiceTimes.values, self.seqCode)])
        chosenEV = flatten(
            flatten([list(val.values()) for val in self.chosenEV]))
        date = flatten(
            [[seq.toordinal() for y in val.keys()]
             for val, seq in zip(self.choiceTimes.values, self.sessionDate)])
        date = np.array(date) - date[0]
        chosenSide = flatten(
            flatten([list(val.values()) for val in self.choiceList]))
        primarySide = flatten(
            flatten([list(val.values()) for val in self.primarySide]))

        chosenEV = flatten(
            flatten([list(val.values()) for val in self.chosenEV]))

        sEVs = []
        pEVs = []
        seqTypes = []
        day = []
        for rt, sev, pev, cc, dd in zip(RTs, sEV, pEV, seqType, date):
            sEVs.extend([sev] * len(rt))
            pEVs.extend([pev] * len(rt))
            seqTypes.extend([cc] * len(rt))
            day.extend([dd] * len(rt))
        del sEV, pEV, seqType, date
        deltaEV = np.round(np.array(pEVs) - np.array(sEVs), decimals=2)

        RTs = np.array(flatten(RTs))
        variables = np.array(
            (RTs, deltaEV, np.array(seqTypes), np.array(chosenEV), day,
             np.array(chosenSide), np.array(primarySide))).T
        aovDF = pd.DataFrame(variables,
                             columns=[
                                 'RTs', 'deltaEV', 'context', 'chosenEV',
                                 'day', 'chosenSide', 'primarySide'
                             ])
        aovDF['RTs'] = aovDF['RTs'].astype(float)
        return aovDF