예제 #1
0
def torusPopulationVector(spikes, sheetSize, tstart=0, tend=-1, dt=0.02, winLen=1.0):
    '''
    This function is deprecated. Use the OO version instead
    '''
    log_warn('This function is deprecated')
    N = sheetSize[0]*sheetSize[1]
    F, tsteps = slidingFiringRateTuple(spikes, N, tstart, tend, dt, winLen)

    return torusPopulationVectorFromRates((F, tsteps), sheetSize)
예제 #2
0
def aggregateType(sp, iterList, types, NTrials, ignoreNaNs=False, **kw):
    '''
    Automatically aggregate data according to the type of the data.

    .. deprecated::
        Use the object-oriented versions instead.
    '''
    type, subType = types
    vars = ['analysis']
    output_dtype = 'array'
    funReduce = None
    normalizeTicks = kw.pop('normalizeTicks', False)

    if (type == 'gamma'):
        # Gamma oscillation analyses
        if (subType == 'acVal'):
            # Autocorrelation first local maximum
            vars += ['acVal']
        elif (subType == 'freq'):
            # Gamm frequency
            vars += ['freq']
        elif (subType == 'acVec'):
            # All the autocorrelations
            vars += ['acVec']
            output_dtype = 'list'
        else:
            raise ValueError('Unknown gamma subtype: {0}'.format(subType))
        trialNumList = np.arange(NTrials)

    elif (type == 'bump'):
        trialNumList = np.arange(NTrials)
        if (subType == 'sigma'):
            vars += ['bump_e', 'sigma']
        elif (subType == 'rateMap_e'):
            vars += ['bump_e', 'bump_e_rateMap']
            output_dtype = 'list'
        elif (subType == 'rateMap_i'):
            vars += ['bump_i', 'bump_i_rateMap']
            output_dtype = 'list'
        else:
            raise ValueError('Unknown bump subtype: {0}'.format(subType))

    elif (type == 'bump_full'):
        trialNumList = np.arange(NTrials)
        if (subType == 'sigma'):
            vars += ['bump_e_full', 'sigma']
        elif (subType == 'rateMap_e'):
            vars += ['bump_e_full', 'bump_e_rateMap']
            output_dtype = 'list'
        elif (subType == 'rateMap_i'):
            vars += ['bump_i_full', 'bump_i_rateMap']
            output_dtype = 'list'
        else:
            raise ValueError('Unknown bump_full subtype: {0}'.format(subType))

    elif (type == 'velocity'):
        if (subType == 'slope'):
            vars += ['lineFitSlope']
        elif (subType == 'fitErr'):
            vars += ['lineFitErr']
            funReduce = np.sum
        else:
            raise ValueError('Unknown velocity subtype: {0}'.format(subType))
        trialNumList = 'all-at-once'

    elif (type == 'grids'):
        trialNumList = np.arange(NTrials)
        if (subType == 'gridnessScore'):
            vars += ['gridnessScore']
            funReduce = None
        else:
            raise ValueError('Unknown grids subtype: {0}'.format(subType))
    elif type == 'FR':
        trialNumList = np.arange(NTrials)
        if subType == 'E':
            vars += ['FR_e', 'avg']
            funReduce = None
        elif subType == 'I_10':  # user should be aware of 10 neurons limitation
            vars += ['FR_i', 'all']
            funReduce = None
        else:
            raise ValueError('Unknown FR subtype: {0}'.format(subType))

    else:
        raise ValueError('Unknown aggregation type: {0}'.format(type))

    data = sp.aggregateData(vars,
                            trialNumList,
                            output_dtype=output_dtype,
                            loadData=True,
                            saveData=False,
                            funReduce=funReduce)
    if (output_dtype != 'list'):
        data = ma.MaskedArray(data)
        if (ignoreNaNs):
            log_warn('aggregateType', 'Ignoring NaNs')
            nans = np.isnan(data)
            data.mask = nans

    if (type == 'velocity'):
        Y, X = computeVelYX(sp, iterList, normalize=normalizeTicks, **kw)
    else:
        if (type == 'bump' or type == 'bump_full'):
            if (subType == 'sigma'):
                # bump sigma is a reciprocal
                data = 1. / data
                ignoreThreshold = 1.0
                data.mask = np.logical_or(data.mask, data > ignoreThreshold)
                data = np.mean(data,
                               axis=2)  # TODO: fix the trials, stack them
        else:
            data = np.mean(data, axis=2)  # TODO: fix the trials, stack them
        Y, X = computeYX(sp, iterList, normalize=normalizeTicks, **kw)

    return data, X, Y
예제 #3
0
def aggregateType(sp, iterList, types, NTrials, ignoreNaNs=False, **kw):
    '''
    Automatically aggregate data according to the type of the data.

    .. deprecated::
        Use the object-oriented versions instead.
    '''
    type, subType = types
    vars          = ['analysis']
    output_dtype  = 'array'
    funReduce     = None
    normalizeTicks = kw.pop('normalizeTicks', False)

    if (type == 'gamma'):
        # Gamma oscillation analyses
        if (subType == 'acVal'):
            # Autocorrelation first local maximum
            vars += ['acVal']
        elif (subType == 'freq'):
            # Gamm frequency
            vars += ['freq']
        elif (subType == 'acVec'):
            # All the autocorrelations
            vars += ['acVec']
            output_dtype = 'list'
        else:
            raise ValueError('Unknown gamma subtype: {0}'.format(subType))
        trialNumList  = np.arange(NTrials)

    elif (type == 'bump'):
        trialNumList  = np.arange(NTrials)
        if (subType == 'sigma'):
            vars += ['bump_e', 'sigma']
        elif (subType == 'rateMap_e'):
            vars += ['bump_e', 'bump_e_rateMap']
            output_dtype = 'list'
        elif (subType == 'rateMap_i'):
            vars += ['bump_i', 'bump_i_rateMap']
            output_dtype = 'list'
        else:
            raise ValueError('Unknown bump subtype: {0}'.format(subType))

    elif (type == 'bump_full'):
        trialNumList  = np.arange(NTrials)
        if (subType == 'sigma'):
            vars += ['bump_e_full', 'sigma']
        elif (subType == 'rateMap_e'):
            vars += ['bump_e_full', 'bump_e_rateMap']
            output_dtype = 'list'
        elif (subType == 'rateMap_i'):
            vars += ['bump_i_full', 'bump_i_rateMap']
            output_dtype = 'list'
        else:
            raise ValueError('Unknown bump_full subtype: {0}'.format(subType))

    elif (type == 'velocity'):
        if (subType == 'slope'):
            vars += ['lineFitSlope']
        elif (subType == 'fitErr'):
            vars += ['lineFitErr']
            funReduce = np.sum
        else:
            raise ValueError('Unknown velocity subtype: {0}'.format(subType))
        trialNumList = 'all-at-once'

    elif (type == 'grids'):
        trialNumList  = np.arange(NTrials)
        if (subType == 'gridnessScore'):
            vars += ['gridnessScore']
            funReduce = None
        else:
            raise ValueError('Unknown grids subtype: {0}'.format(subType))
    elif type == 'FR':
        trialNumList = np.arange(NTrials)
        if subType == 'E':
            vars += ['FR_e', 'avg']
            funReduce = None
        elif subType == 'I_10': # user should be aware of 10 neurons limitation
            vars += ['FR_i', 'all']
            funReduce = None
        else:
            raise ValueError('Unknown FR subtype: {0}'.format(subType))

    else:
        raise ValueError('Unknown aggregation type: {0}'.format(type))


    data = sp.aggregateData(vars, trialNumList, output_dtype=output_dtype,
            loadData=True, saveData=False, funReduce=funReduce)
    if (output_dtype != 'list'):
        data = ma.MaskedArray(data)
        if (ignoreNaNs):
            log_warn('aggregateType', 'Ignoring NaNs')
            nans = np.isnan(data)
            data.mask = nans

    if (type == 'velocity'):
        Y, X = computeVelYX(sp, iterList, normalize=normalizeTicks, **kw)
    else:
        if (type == 'bump' or type == 'bump_full'):
            if (subType == 'sigma'):
                # bump sigma is a reciprocal
                data = 1./data
                ignoreThreshold = 1.0
                data.mask = np.logical_or(data.mask, data > ignoreThreshold)
                data = np.mean(data, axis=2) # TODO: fix the trials, stack them
        else:
            data = np.mean(data, axis=2) # TODO: fix the trials, stack them
        Y, X = computeYX(sp, iterList, normalize=normalizeTicks, **kw)

    return data, X, Y