Beispiel #1
0
## reshape the model/exp responses by condition, group by dispersion
# we reshape the responses to combine within dispersion, i.e. (nDisp, nSf*nCon)
shapeByDisp = lambda resps: resps.reshape(
    (resps.shape[0], resps.shape[1] * resps.shape[2]))
measured_resps = hf.organize_resp(
    data['spikeCount'], cellStruct,
    expInd)[2]  # 3rd output is organized sfMix resp.
measured_byDisp = shapeByDisp(measured_resps)
nDisps = len(measured_byDisp)

## get the final filter tunings
omega = np.logspace(-2, 2, 1000)
# where are we evaluating?
# first, normalization
inhSfTuning = hf.getSuppressiveSFtuning(sfs=omega)
nInhChan = cellStruct['sfm']['mod']['normalization']['pref']['sf']
nTrials = inhSfTuning.shape[0]
if fitType == 2:
    gs_mean, gs_std = [finalParams[normMu], finalParams[normStd]]
    inhWeight = hf.genNormWeights(cellStruct, nInhChan, gs_mean, gs_std,
                                  nTrials, expInd)
    inhWeight = inhWeight[:, :, 0]
    # genNormWeights gives us weights as nTr x nFilters x nFrames - we have only one "frame" here, and all are the same
    # first, tuned norm:
    sfNorm = np.sum(-.5 * (inhWeight * np.square(inhSfTuning)), 1)
    sfNorm = sfNorm / np.amax(np.abs(sfNorm))
    # update function to be used below
    updateInhWeight = lambda mn, std: hf.genNormWeights(
        cellStruct, nInhChan, mn, std, nTrials, expInd)[:, :, 0]
else:
Beispiel #2
0
        sigLow = i[1]
        sigHigh = i[-1]
        sfRel = np.divide(omega, prefSf)
        # - set the sigma appropriately, depending on what the stimulus SF is
        sigma = np.multiply(sigLow, [1] * len(sfRel))
        sigma[[x for x in range(len(sfRel)) if sfRel[x] > 1]] = sigHigh
        # - now, compute the responses (automatically normalized, since max gaussian value is 1...)
        s = [
            np.exp(-np.divide(np.square(np.log(x)), 2 * np.square(y)))
            for x, y in zip(sfRel, sigma)
        ]
        sfExcCurr = s

    sfExc.append(sfExcCurr)

inhSfTuning = hf.getSuppressiveSFtuning()

# Compute weights for suppressive signals
nInhChan = expData['sfm']['mod']['normalization']['pref']['sf']
nTrials = inhSfTuning.shape[0]
inhWeight = hf.genNormWeights(expData, nInhChan, gs_mean, gs_std, nTrials,
                              expInd)
inhWeight = inhWeight[:, :, 0]
# genNormWeights gives us weights as nTr x nFilters x nFrames - we have only one "frame" here, and all are the same
# first, tuned norm:
sfNormTune = np.sum(-.5 * (inhWeight * np.square(inhSfTuning)), 1)
sfNormTune = sfNormTune / np.amax(np.abs(sfNormTune))
# then, untuned norm:
inhAsym = 0
inhWeight = []
for iP in range(len(nInhChan)):
Beispiel #3
0
prefOri = 0
# just fixed value since no model param for this
aRatio = 1
# just fixed value since no model param for this
filtTemp = model_responses.oriFilt(imSizeDeg, pixSize, prefSf, prefOri, dOrder,
                                   aRatio)
filt = (filtTemp - filtTemp[0, 0]) / np.amax(np.abs(filtTemp - filtTemp[0, 0]))

# get model details - exc/suppressive components
omega = np.logspace(-2, 2, 1000)
sfRel = omega / prefSf
s = np.power(omega, dOrder) * np.exp(-dOrder / 2 * np.square(sfRel))
sMax = np.power(prefSf, dOrder) * np.exp(-dOrder / 2)
sfExc = s / sMax

inhSfTuning = helper_fcns.getSuppressiveSFtuning()

nInhChan = cellStruct['sfm']['mod']['normalization']['pref']['sf']
if norm_type == 1:
    nTrials = inhSfTuning.shape[0]
    inhWeight = helper_fcns.genNormWeights(cellStruct, nInhChan, gs_mean,
                                           gs_std, nTrials)
    inhWeight = inhWeight[:, :, 0]
    # genNormWeights gives us weights as nTr x nFilters x nFrames - we have only one "frame" here, and all are the same
else:
    if modFit[8]:  # i.e. if this parameter exists...
        inhAsym = modFit[8]
    else:
        inhAsym = 0

    inhWeight = []