Пример #1
0
# - to properly evaluate the loss, load rvcFits, mask the trials
rvcCurr = hf.get_rvc_fits(data_loc,
                          expInd,
                          cellNum,
                          rvcName=rvcBase,
                          rvcMod=rvcMod)
stimOr = np.vstack(expData['sfm']['exp']['trial']['ori'])
mask = np.isnan(np.sum(stimOr, 0))
# sum over all stim components...if there are any nans in that trial, we know
# - now compute SFMGiveBof!
# ----
modRespWght = mod_resp.SFMGiveBof(modFits[1],
                                  expData,
                                  normType=normTypes[1],
                                  lossType=lossType,
                                  expInd=expInd,
                                  cellNum=cellNum,
                                  rvcFits=rvcCurr,
                                  excType=excType,
                                  maskIn=~mask)
# ----
modResps = [
    mod_resp.SFMGiveBof(fit,
                        expData,
                        normType=norm,
                        lossType=lossType,
                        expInd=expInd,
                        cellNum=cellNum,
                        rvcFits=rvcCurr,
                        excType=excType,
                        maskIn=~mask) for fit, norm in zip(modFits, normTypes)
Пример #2
0
def setModel(cellNum, stopThresh, lr, lossType = 1, fitType = 1, subset_frac = 1, initFromCurr = 1, holdOutCondition = None):
    # Given just a cell number, will fit the Robbe-inspired V1 model to the data
    #
    # stopThresh is the value (in NLL) at which we stop the fitting (i.e. if the difference in NLL between two full steps is < stopThresh, stop the fitting
    #
    # LR is learning rate
    #
    # lossType
    #   1 - loss := square(sqrt(resp) - sqrt(pred))
    #   2 - loss := poissonProb(spikes | modelRate)
    #   3 - loss := modPoiss model (a la Goris, 2014)
    #
    # fitType - what is the model formulation?
    #   1 := flat normalization
    #   2 := gaussian-weighted normalization responses
    #   3 := gaussian-weighted c50/norm "constant"
    #
    # holdOutCondition - [d, c, sf] or None
    #   which condition should we hold out from the dataset
 
    ########
    # Load cell
    ########
    #loc_data = '/Users/paulgerald/work/sfDiversity/sfDiv-OriModel/sfDiv-python/Analysis/Structures/'; # personal mac
    loc_data = '/home/pl1465/SF_diversity/Analysis/Structures/'; # Prince cluster 

    # fitType
    if fitType == 1:
      fL_suffix1 = '_flat';
    elif fitType == 2:
      fL_suffix1 = '_wght';
    elif fitType == 3:
      fL_suffix1 = '_c50';
    # lossType
    if lossType == 1:
      fL_suffix2 = '_sqrt.npy';
    elif lossType == 2:
      fL_suffix2 = '_poiss.npy';
    elif lossType == 3:
      fL_suffix2 = '_modPoiss.npy';

    dataList = hf.np_smart_load(str(loc_data + 'dataList.npy'));
    dataNames = dataList['unitName'];

    print('loading data structure...');
    S = hf.np_smart_load(str(loc_data + dataNames[cellNum-1] + '_sfm.npy')); # why -1? 0 indexing...
    print('...finished loading');
    trial_inf = S['sfm']['exp']['trial'];
    prefOrEst = mode(trial_inf['ori'][1]).mode;
    trialsToCheck = trial_inf['con'][0] == 0.01;
    prefSfEst = mode(trial_inf['sf'][0][trialsToCheck==True]).mode;
    
    ########

    # 00 = preferred spatial frequency   (cycles per degree)
    # 01 = derivative order in space
    # 02 = normalization constant        (log10 basis)
    # 03 = response exponent
    # 04 = response scalar
    # 05 = early additive noise
    # 06 = late additive noise
    # 07 = variance of response gain - only used if lossType = 3
    # if fitType == 2
    # 08 = mean of (log)gaussian for normalization weights
    # 09 = std of (log)gaussian for normalization weights
    # if fitType == 3
    # 08 = the offset of the c50 tuning curve which is bounded between [v_sigOffset, 1] || [0, 1]
    # 09 = standard deviation of the gaussian to the left of the peak || >0.1
    # 10 = "" to the right "" || >0.1
    # 11 = peak of offset curve
    
    curr_params = [];
    initFromCurr = 0; # override initFromCurr so that we just go with default parameters

    if np.any(np.isnan(curr_params)): # if there are nans, we need to ignore...
      curr_params = [];
      initFromCurr = 0;

    pref_sf = float(prefSfEst) if initFromCurr==0 else curr_params[0];
    dOrdSp = np.random.uniform(1, 3) if initFromCurr==0 else curr_params[1];
    normConst = -0.8 if initFromCurr==0 else curr_params[2]; # why -0.8? Talked with Tony, he suggests starting with lower sigma rather than higher/non-saturating one
    #normConst = np.random.uniform(-1, 0) if initFromCurr==0 else curr_params[2];
    respExp = np.random.uniform(1, 3) if initFromCurr==0 else curr_params[3];
    respScalar = np.random.uniform(10, 1000) if initFromCurr==0 else curr_params[4];
    noiseEarly = np.random.uniform(0.001, 0.1) if initFromCurr==0 else curr_params[5];
    noiseLate = np.random.uniform(0.1, 1) if initFromCurr==0 else curr_params[6];
    varGain = np.random.uniform(0.1, 1) if initFromCurr==0 else curr_params[7];
    if fitType == 1:
      inhAsym = 0; 
    if fitType == 2:
      normMean = np.random.uniform(-1, 1) if initFromCurr==0 else curr_params[8];
      normStd = np.random.uniform(0.1, 1) if initFromCurr==0 else curr_params[9];
    if fitType == 3:
      sigOffset = np.random.uniform(0, 0.05) if initFromCurr==0 else curr_params[8];
      stdLeft = np.random.uniform(1, 5) if initFromCurr==0 else curr_params[9];
      stdRight = np.random.uniform(1, 5) if initFromCurr==0 else curr_params[10];
      sigPeak = float(prefSfEst) if initFromCurr==0 else curr_params[11];

    print('Initial parameters:\n\tsf: ' + str(pref_sf)  + '\n\td.ord: ' + str(dOrdSp) + '\n\tnormConst: ' + str(normConst));
    print('\n\trespExp ' + str(respExp) + '\n\trespScalar ' + str(respScalar));
    
    #########
    # Now get all the data we need
    #########    
    # stimulus information
    
    # vstack to turn into array (not array of arrays!)
    stimOr = np.vstack(trial_inf['ori']);

    #purge of NaNs...
    mask = np.isnan(np.sum(stimOr, 0)); # sum over all stim components...if there are any nans in that trial, we know
    objWeight = np.ones((stimOr.shape[1]));    

    # and get rid of orientation tuning curve trials
    oriBlockIDs = np.hstack((np.arange(131, 155+1, 2), np.arange(132, 136+1, 2))); # +1 to include endpoint like Matlab

    oriInds = np.empty((0,));
    for iB in oriBlockIDs:
        indCond = np.where(trial_inf['blockID'] == iB);
        if len(indCond[0]) > 0:
            oriInds = np.append(oriInds, indCond);

    # get rid of CRF trials, too? Not yet...
    conBlockIDs = np.arange(138, 156+1, 2);
    conInds = np.empty((0,));
    for iB in conBlockIDs:
       indCond = np.where(trial_inf['blockID'] == iB);
       if len(indCond[0]) > 0:
           conInds = np.append(conInds, indCond);

    objWeight[conInds.astype(np.int64)] = 1; # for now, yes it's a "magic number"    

    mask[oriInds.astype(np.int64)] = True; # as in, don't include those trials either!
    # hold out a condition if we have specified, and adjust the mask accordingly
    if holdOutCondition is not None:
      # dispInd: [1, 5]...conInd: [1, 2]...sfInd: [1, 11]
      # first, get all of the conditions... - blockIDs by condition known from Robbe code
      dispInd = holdOutCondition[0];
      conInd = holdOutCondition[1];
      sfInd = holdOutCondition[2];

      StimBlockIDs  = np.arange(((dispInd-1)*(13*2)+1)+(conInd-1), ((dispInd)*(13*2)-5)+(conInd-1)+1, 2); # +1 to include the last block ID
      currBlockID = StimBlockIDs[sfInd-1];
      holdOutTr = np.where(trial_inf['blockID'] == currBlockID)[0];
      mask[holdOutTr.astype(np.int64)] = True; # as in, don't include those trials either!
      
    # Set up model here - get the parameters and parameter bounds
    if fitType == 1:
      param_list = (pref_sf, dOrdSp, normConst, respExp, respScalar, noiseEarly, noiseLate, varGain, inhAsym);
    elif fitType == 2:
      param_list = (pref_sf, dOrdSp, normConst, respExp, respScalar, noiseEarly, noiseLate, varGain, normMean, normStd);
    elif fitType == 3:
      param_list = (pref_sf, dOrdSp, normConst, respExp, respScalar, noiseEarly, noiseLate, varGain, sigOffset, stdLeft, stdRight, sigPeak);
    all_bounds = hf.getConstraints(fitType);
   
    # now set up the optimization
    obj = lambda params: mod_resp.SFMGiveBof(params, structureSFM=S, normType=fitType, lossType=lossType, maskIn=~mask)[0];
    tomin = opt.minimize(obj, param_list, bounds=all_bounds);

    opt_params = tomin['x'];
    NLL = tomin['fun'];

    if holdOutCondition is not None:
      holdoutNLL, _, = mod_resp.SFMGiveBof(opt_params, structureSFM=S, normType=fitType, lossType=lossType, trialSubset=holdOutTr);
    else:
      holdoutNLL = [];

    return NLL, opt_params, holdoutNLL;
Пример #3
0
expData = np.load(str(data_loc + dL['unitName'][cellNum - 1] +
                      '_sfm.npy')).item()
expResp = expData
modFit = fitList[cellNum - 1]['params']
#
descrExpFit = descrExpFits[cellNum - 1]['params']
# nFam x nCon x nDescrParams
descrModFit = descrModFits[cellNum - 1]['params']
# nFam x nCon x nDescrParams

if len(normTypeArr
       ) == 3:  # i.e. we've passed in gs_mean, gs_std, then replace...
    modFit[-2] = normTypeArr[1]
    modFit[-1] = normTypeArr[2]

ignore, modResp, normTypeArr = mod_resp.SFMGiveBof(modFit, expData,
                                                   normTypeArr)
norm_type = normTypeArr[0]
if norm_type == 1:
    gs_mean = normTypeArr[1]
    # guaranteed to exist after call to .SFMGiveBof, if norm_type == 1
    gs_std = normTypeArr[2]
    # guaranteed to exist ...
#modRespAll = mod_resp.SFMGiveBof(modParamsCurr, expData, normTypeArr)[1]; # NOTE: We're taking [1] (i.e. second) output of SFMGiveBof
oriModResp, conModResp, sfmixModResp, allSfMix = organize_modResp(
    modResp, expData['sfm']['exp']['trial'])
oriExpResp, conExpResp, sfmixExpResp, allSfMixExp = organize_modResp(expData['sfm']['exp']['trial']['spikeCount'], \
                                                                           expData['sfm']['exp']['trial'])
#pdb.set_trace();

# allSfMix is (nFam, nCon, nCond, nReps) where nCond is 11, # of SF centers and nReps is usually 10
modLow = np.nanmin(allSfMix, axis=3)
Пример #4
0
  cellType = 'V1'; 

expData  = np.load(str(data_loc + cellName + '_sfm.npy'), encoding='latin1').item();
expInd   = hf.get_exp_ind(data_loc, cellName)[0];

# #### Load model fits

modFit_fl = fitList_fl[cellNum-1]['params']; # 
modFit_wg = fitList_wg[cellNum-1]['params']; # 
modFits = [modFit_fl, modFit_wg];
normTypes = [1, 2]; # flat, then weighted

# ### Organize data
# #### determine contrasts, center spatial frequency, dispersions

modResps = [mod_resp.SFMGiveBof(fit, expData, normType=norm, lossType=lossType, expInd=expInd) for fit, norm in zip(modFits, normTypes)];
modResps = [x[1] for x in modResps]; # 1st return output (x[0]) is NLL (don't care about that here)
gs_mean = modFit_wg[8]; 
gs_std = modFit_wg[9];
# now organize the responses
orgs = [hf.organize_resp(mr, expData, expInd) for mr in modResps];
oriModResps = [org[0] for org in orgs]; # only non-empty if expInd = 1
conModResps = [org[1] for org in orgs]; # only non-empty if expInd = 1
sfmixModResps = [org[2] for org in orgs];
allSfMixs = [org[3] for org in orgs];

modLows = [np.nanmin(resp, axis=3) for resp in allSfMixs];
modHighs = [np.nanmax(resp, axis=3) for resp in allSfMixs];
modAvgs = [np.nanmean(resp, axis=3) for resp in allSfMixs];
modSponRates = [fit[6] for fit in modFits];
Пример #5
0
# #### Load model fits

modFit_fl = fitList_fl[cellNum-1]['params']; # 
modFit_wg = fitList_wg[cellNum-1]['params']; # 
modFits = [modFit_fl, modFit_wg];
normTypes = [1, 2]; # flat, then weighted

# ### Organize data
# #### determine contrasts, center spatial frequency, dispersions
# SFMGiveBof returns spike counts per trial, NOT rates -- we will correct in hf.organize_resp call below
# - to properly evaluate the loss, load rvcFits, mask the trials
rvcCurr = hf.get_rvc_fits(data_loc, expInd, cellNum, rvcName=rvcBase, rvcMod=rvcMod);
stimOr = np.vstack(expData['sfm']['exp']['trial']['ori']);
mask = np.isnan(np.sum(stimOr, 0)); # sum over all stim components...if there are any nans in that trial, we know
# - now compute SFMGiveBof!
modResps = [mod_resp.SFMGiveBof(fit, expData, normType=norm, lossType=lossType, expInd=expInd, cellNum=cellNum, rvcFits=rvcCurr, excType=excType, maskIn=~mask, compute_varExpl=1, lgnFrontEnd=lgnFrontEnd) for fit, norm in zip(modFits, normTypes)];

# unpack the model fits!
varExplSF_flat = modResps[0][3];
varExplSF = modResps[1][3];
varExplCon_flat = modResps[0][4];
varExplCon = modResps[1][4];
lossByCond_flat = modResps[0][2];
lossByCond = modResps[1][2]; # We only care about weighted...
modResps = [x[1] for x in modResps]; # 1st return output (x[0]) is NLL (don't care about that here)
#lossByCond = [x[2] for x in modResps]; # if we want both...
gs_mean = modFit_wg[8]; 
gs_std = modFit_wg[9];
# now organize the responses
orgs = [hf.organize_resp(mr, expData, expInd, respsAsRate=False) for mr in modResps];
oriModResps = [org[0] for org in orgs]; # only non-empty if expInd = 1
Пример #6
0
for c in range(nCells):
    curr = hf.np_smart_load(loc_data + dL_mr['unitName'][c] + '_sfm.npy')
    if 'respWght' in curr['sfm']['mod']['recovery'] and 'respFlat' in curr[
            'sfm']['mod']['recovery'] and overwriteMR == 0:
        print('\talready generated these model recovery responses; skipping')
        continue
    recov = curr['sfm']['mod']['recovery']
    expInd = hf.exp_name_to_ind(dL_mr['expType'][c])

    types = ['Wght', 'Flat']
    paramStrs = ['params%s' % x for x in types]
    respStrs = ['resp%s' % x for x in types]
    normTypes = [2, 1]
    for (paramStr, respStr, norm) in zip(paramStrs, respStrs, normTypes):
        currResp = mod_resp.SFMGiveBof(recov[paramStr],
                                       curr,
                                       normType=norm,
                                       lossType=lossType,
                                       expInd=expInd)[1]
        # 0th return is NLL
        curr['sfm']['mod']['recovery'][respStr] = np.random.poisson(currResp)
        # simulate from poisson model - this makes integer spike counts and introduces some variability
    # now save it!
    np.save(loc_data + dL_mr['unitName'][c] + '_sfm.npy', curr)

###########
# 3. fit model
# now, run model_responses while specifying the correct dataList/fitList
###########
print('\n\n\n********YOU HAVE MADE IT TO FITTING STAGE*********\n\n')
Пример #7
0
                     encoding='latin1').item()

# #### Load descriptive model fits, comp. model fits
descrFitName = hf.descrFit_name(descr_fit_type)

modParams = np.load(str(dataPath + fitListName), encoding='latin1').item()
modParamsCurr = modParams[which_cell - 1]['params']

# ### Organize data
# #### determine contrasts, center spatial frequency, dispersions

data = cellStruct['sfm']['exp']['trial']

ignore, modRespAll = mod_resp.SFMGiveBof(modParamsCurr,
                                         cellStruct,
                                         normType=norm_type,
                                         lossType=lossType,
                                         expInd=expInd)
print('norm type %02d' % (norm_type))
if norm_type == 2:
    gs_mean = modParamsCurr[1]
    # guaranteed to exist after call to .SFMGiveBof, if norm_type == 2
    gs_std = modParamsCurr[2]
    # guaranteed to exist ...
resp, stimVals, val_con_by_disp, validByStimVal, modResp = hf.tabulate_responses(
    cellStruct, expInd, modRespAll)
blankMean, blankStd, _ = hf.blankResp(cellStruct)
modBlankMean = modParamsCurr[6]
# late additive noise is the baseline of the model
# all responses on log ordinate (y axis) should be baseline subtracted
Пример #8
0
def fit_descr(cell_num,
              data_loc,
              n_repeats=4,
              fromModelSim=0,
              fitLossType=1,
              baseStr=None,
              normType=None,
              lossType=None):

    nFam = 5
    nCon = 2
    nParam = 5

    # get base descrFit name (including loss str)
    if fitLossType == 1:
        floss_str = '_lsq'
    elif fitLossType == 2:
        floss_str = '_sqrt'
    elif fitLossType == 3:
        floss_str = '_poiss'
    descrFitBase = 'descrFits%s' % floss_str

    # load cell information
    dataList = hfunc.np_smart_load(data_loc + 'dataList.npy')
    if fromModelSim:
        # get model fit name
        fL_name = baseStr

        # normType
        if normType == 1:
            fL_suffix1 = '_flat'
        elif normType == 2:
            fL_suffix1 = '_wght'
        elif normType == 3:
            fL_suffix1 = '_c50'
        # lossType
        if lossType == 1:
            fL_suffix2 = '_sqrt.npy'
        elif lossType == 2:
            fL_suffix2 = '_poiss.npy'
        elif lossType == 3:
            fL_suffix2 = '_modPoiss.npy'
        elif lossType == 4:
            fL_suffix2 = '_chiSq.npy'
        fitListName = str(fL_name + fL_suffix1 + fL_suffix2)

        dfModelName = '%s_%s' % (descrFitBase, fitListName)

        if os.path.isfile(data_loc + dfModelName):
            descrFits = hfunc.np_smart_load(data_loc + dfModelName)
        else:
            descrFits = dict()
    else:
        dfModelName = '%s.npy' % descrFitBase
        if os.path.isfile(data_loc + dfModelName):
            descrFits = hfunc.np_smart_load(data_loc + dfModelName)
        else:
            descrFits = dict()
    data = hfunc.np_smart_load(data_loc + dataList['unitName'][cell_num - 1] +
                               '_sfm.npy')

    if fromModelSim:  # then we'll 'sneak' in the model responses in the place of the real data
        modFits = hfunc.np_smart_load(data_loc + fitListName)
        modFit = modFits[cell_num - 1]['params']
        a, modResp = mod_resp.SFMGiveBof(modFit,
                                         data,
                                         normType=normType,
                                         lossType=lossType)
        # spike count must be integers! Simply round
        data['sfm']['exp']['trial']['spikeCount'] = np.round(
            modResp * data['sfm']['exp']['trial']['duration'])

    if cell_num - 1 in descrFits:
        bestNLL = descrFits[cell_num - 1]['NLL']
        currParams = descrFits[cell_num - 1]['params']
    else:  # set values to NaN...
        bestNLL = np.ones((nFam, nCon)) * np.nan
        currParams = np.ones((nFam, nCon, nParam)) * np.nan

    print('Doing the work, now')
    for family in range(nFam):
        for con in range(nCon):

            print('.')
            # set initial parameters - a range from which we will pick!
            base_rate = data['sfm']['exp']['sponRateMean']
            if base_rate <= 3:
                range_baseline = (0, 3)
            else:
                range_baseline = (0.5 * base_rate, 1.5 * base_rate)

            max_resp = np.amax(data['sfm']['exp']['sfRateMean'][family][con])
            range_amp = (0.5 * max_resp, 1.5)

            theSfCents = data['sfm']['exp']['sf'][family][con]

            max_sf_index = np.argmax(
                data['sfm']['exp']['sfRateMean'][family][con])
            # what sf index gives peak response?
            mu_init = theSfCents[max_sf_index]

            if max_sf_index == 0:  # i.e. smallest SF center gives max response...
                range_mu = (mu_init / 2, theSfCents[max_sf_index + 3])
            elif max_sf_index + 1 == len(
                    theSfCents):  # i.e. highest SF center is max
                range_mu = (theSfCents[max_sf_index - 3], mu_init)
            else:
                range_mu = (theSfCents[max_sf_index - 1],
                            theSfCents[max_sf_index + 1])
                # go +-1 indices from center

            log_bw_lo = 0.75
            # 0.75 octave bandwidth...
            log_bw_hi = 2
            # 2 octave bandwidth...
            denom_lo = hfunc.bw_log_to_lin(log_bw_lo, mu_init)[0]
            # get linear bandwidth
            denom_hi = hfunc.bw_log_to_lin(log_bw_hi, mu_init)[0]
            # get lin. bw (cpd)
            range_denom = (denom_lo, denom_hi)
            # don't want 0 in sigma

            # set bounds for parameters
            min_bw = 1 / 4
            max_bw = 10
            # ranges in octave bandwidth

            bound_baseline = (0, max_resp)
            bound_range = (0, 1.5 * max_resp)
            bound_mu = (0.01, 10)
            bound_sig = (np.maximum(0.1,
                                    min_bw / (2 * np.sqrt(2 * np.log(2)))),
                         max_bw / (2 * np.sqrt(2 * np.log(2))))
            # Gaussian at half-height

            all_bounds = (bound_baseline, bound_range, bound_mu, bound_sig,
                          bound_sig)

            for n_try in range(n_repeats):

                # pick initial params
                init_base = hfunc.random_in_range(range_baseline)
                init_amp = hfunc.random_in_range(range_amp)
                init_mu = hfunc.random_in_range(range_mu)
                init_sig_left = hfunc.random_in_range(range_denom)
                init_sig_right = hfunc.random_in_range(range_denom)

                init_params = [
                    init_base, init_amp, init_mu, init_sig_left, init_sig_right
                ]

                # choose optimization method
                if np.mod(n_try, 2) == 0:
                    methodStr = 'L-BFGS-B'
                else:
                    methodStr = 'TNC'

                obj = lambda params: descr_loss(params, data, family, con)
                wax = opt.minimize(obj,
                                   init_params,
                                   method=methodStr,
                                   bounds=all_bounds)

                # compare
                NLL = wax['fun']
                params = wax['x']

                if np.isnan(
                        bestNLL[family,
                                con]) or NLL < bestNLL[family, con] or invalid(
                                    currParams[family, con, :], all_bounds):
                    bestNLL[family, con] = NLL
                    currParams[family, con, :] = params

    # update stuff - load again in case some other run has saved/made changes
    if os.path.isfile(data_loc + dfModelName):
        print('reloading descrFitsModel...')
        descrFits = hfunc.np_smart_load(data_loc + dfModelName)
    if cell_num - 1 not in descrFits:
        descrFits[cell_num - 1] = dict()
    descrFits[cell_num - 1]['NLL'] = bestNLL
    descrFits[cell_num - 1]['params'] = currParams

    np.save(data_loc + dfModelName, descrFits)
    print('saving for cell ' + str(cell_num))
Пример #9
0
# #### Load data

expData = np.load(str(data_loc + dL['unitName'][cellNum - 1] +
                      '_sfm.npy')).item()
expResp = expData
modFit = fitList[cellNum - 1]['params']
#
descrExpFit = descrExpFits[cellNum - 1]['params']
# nFam x nCon x nDescrParams
descrModFit = descrModFits[cellNum - 1]['params']
# nFam x nCon x nDescrParams

norm_type = fitType
ignore, modResp = mod_resp.SFMGiveBof(modFit,
                                      expData,
                                      normType=norm_type,
                                      lossType=lossType)
if norm_type == 2:
    gs_mean = modFit[8]
    gs_std = modFit[9]
oriModResp, conModResp, sfmixModResp, allSfMix = organize_modResp(
    modResp, expData['sfm']['exp']['trial'])
oriExpResp, conExpResp, sfmixExpResp, allSfMixExp = organize_modResp(expData['sfm']['exp']['trial']['spikeCount'], \
                                                                           expData['sfm']['exp']['trial'])
#pdb.set_trace();

# allSfMix is (nFam, nCon, nCond, nReps) where nCond is 11, # of SF centers and nReps is usually 10
modLow = np.nanmin(allSfMix, axis=3)
modHigh = np.nanmax(allSfMix, axis=3)
modAvg = np.nanmean(allSfMix, axis=3)
modSponRate = modFit[6]
Пример #10
0
# #### Load descriptive model fits, comp. model fits

descrFits = np.load(str(dataPath + 'descrFits.npy'), encoding='latin1').item()
descrFits = descrFits[which_cell - 1]['params']
# just get this cell

modParams = np.load(str(dataPath + fitListName), encoding='latin1').item()
modParamsCurr = modParams[which_cell - 1]['params']

# ### Organize data
# #### determine contrasts, center spatial frequency, dispersions

data = cellStruct['sfm']['exp']['trial']

modRespAll = model_responses.SFMGiveBof(modParamsCurr, cellStruct)[1]
resp, stimVals, val_con_by_disp, validByStimVal, modResp = helper_fcns.tabulate_responses(
    cellStruct, modRespAll)
blankMean, blankStd, _ = helper_fcns.blankResp(cellStruct)
# all responses on log ordinate (y axis) should be baseline subtracted

all_disps = stimVals[0]
all_cons = stimVals[1]
all_sfs = stimVals[2]

nCons = len(all_cons)
nSfs = len(all_sfs)
nDisps = len(all_disps)

# #### Unpack responses
Пример #11
0
modFit_fl = fitList_fl[cellNum - 1]['params']
#
modFit_wg = fitList_wg[cellNum - 1]['params']
#
modFits = [modFit_fl, modFit_wg]
normTypes = [1, 2]
# flat, then weighted

descrExpFit = descrExpFits[cellNum - 1]['params']
# nFam x nCon x nDescrParams
descrModFit = descrModFits[cellNum - 1]['params']
# nFam x nCon x nDescrParams

modResps = [
    mod_resp.SFMGiveBof(fit, expData, normType=norm, lossType=lossType)
    for fit, norm in zip(modFits, normTypes)
]
modResps = [x[1] for x in modResps]
# 1st return output is NLL (don't care about that here)
gs_mean = modFit_wg[8]
gs_std = modFit_wg[9]
# now organize the responses
orgs = [
    organize_modResp(mr, expData['sfm']['exp']['trial']) for mr in modResps
]
oriModResps = [org[0] for org in orgs]
conModResps = [org[1] for org in orgs]
sfmixModResps = [org[2] for org in orgs]
allSfMixs = [org[3] for org in orgs]
# now organize the measured responses in the same way
Пример #12
0
    inhAsym = normParams

# descrFit, if exists
if descrFits is not None:
    descrParams = descrFits[cellNum - 1]['params']
else:
    descrParams = None

###########
# Organize data
###########
# #### determine contrasts, center spatial frequency, dispersions

modResp = mod_resp.SFMGiveBof(modFit,
                              expData,
                              normType=fitType,
                              lossType=lossType,
                              expInd=expInd)[1]
# now organize the responses
orgs = hf.organize_resp(modResp, expData, expInd)
oriModResp = orgs[0]
# only non-empty if expInd = 1
conModResp = orgs[1]
# only non-empty if expInd = 1
sfmixModResp = orgs[2]
allSfMix = orgs[3]

modLow = np.nanmin(allSfMix, axis=3)
modHigh = np.nanmax(allSfMix, axis=3)
modAvg = np.nanmean(allSfMix, axis=3)
modSponRate = modFit[6]
Пример #13
0
    expData,
    expInd,
    overwriteSpikes=spikes,
    respsAsRates=rates,
    modsAsRate=rates)

if fitList is None:
    resps = resps_data
    # otherwise, we'll still keep resps_data for reference
elif fitList is not None:  # OVERWRITE the data with the model spikes!
    if use_mod_resp == 1:
        curr_fit = fitList[which_cell - 1]['params']
        modResp = mod_resp.SFMGiveBof(curr_fit,
                                      S,
                                      normType=fitType,
                                      lossType=lossType,
                                      expInd=expInd,
                                      cellNum=which_cell,
                                      excType=excType)[1]
        if f1f0_rat < 1:  # then subtract baseline..
            modResp = modResp - baseline * hf.get_exp_params(expInd).stimDur
        # now organize the responses
        resps, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(
            expData,
            expInd,
            overwriteSpikes=modResp,
            respsAsRates=False,
            modsAsRate=False)
    elif use_mod_resp == 2:  # then pytorch model!
        resp_str = hf_sf.get_resp_str(respMeasure)
        curr_fit = fitList[which_cell - 1][resp_str]['params']
Пример #14
0
# TEMP HACK
modParamsCurr[2] = modParamsCurr[2] / 1.5
modParamsCurr[4] = modParamsCurr[4] * 10

if len(normTypeArr
       ) == 3:  # i.e. we've passed in gs_mean, gs_std, then replace...
    modParamsCurr[-2] = normTypeArr[1]
    modParamsCurr[-1] = normTypeArr[2]

# ### Organize data
# #### determine contrasts, center spatial frequency, dispersions

data = cellStruct['sfm']['exp']['trial']

ignore, modRespAll, normTypeArr = model_responses.SFMGiveBof(
    modParamsCurr, cellStruct, normTypeArr)
norm_type = normTypeArr[0]
if norm_type == 1:
    gs_mean = normTypeArr[1]
    # guaranteed to exist after call to .SFMGiveBof, if norm_type == 1
    gs_std = normTypeArr[2]
    # guaranteed to exist ...
#modRespAll = model_responses.SFMGiveBof(modParamsCurr, cellStruct, normTypeArr)[1]; # NOTE: We're taking [1] (i.e. second) output of SFMGiveBof
resp, stimVals, val_con_by_disp, validByStimVal, modResp = helper_fcns.tabulate_responses(
    cellStruct, modRespAll)
blankMean, blankStd, _ = helper_fcns.blankResp(cellStruct)
modBlankMean = modParamsCurr[6]
# late additive noise is the baseline of the model
# all responses on log ordinate (y axis) should be baseline subtracted

all_disps = stimVals[0]
def plot_save_superposition(which_cell, expDir, use_mod_resp=0, fitType=2, excType=1, useHPCfit=1, conType=None, lgnFrontEnd=None, force_full=1, f1_expCutoff=2, to_save=1):

  if use_mod_resp == 2:
    rvcAdj   = -1; # this means vec corrected F1, not phase adjustment F1...
    _applyLGNtoNorm = 0; # don't apply the LGN front-end to the gain control weights
    recenter_norm = 1;
    newMethod = 1; # yes, use the "new" method for mrpt (not that new anymore, as of 21.03)
    lossType = 1; # sqrt
    _sigmoidSigma = 5;

  basePath = os.getcwd() + '/'
  if 'pl1465' in basePath or useHPCfit:
    loc_str = 'HPC';
  else:
    loc_str = '';

  rvcName = 'rvcFits%s_220531' % loc_str if expDir=='LGN/' else 'rvcFits%s_220609' % loc_str
  rvcFits = None; # pre-define this as None; will be overwritten if available/needed
  if expDir == 'altExp/': # we don't adjust responses there...
    rvcName = None;
  dFits_base = 'descrFits%s_220609' % loc_str if expDir=='LGN/' else 'descrFits%s_220631' % loc_str
  if use_mod_resp == 1:
    rvcName = None; # Use NONE if getting model responses, only
    if excType == 1:
      fitBase = 'fitList_200417';
    elif excType == 2:
      fitBase = 'fitList_200507';
    lossType = 1; # sqrt
    fitList_nm = hf.fitList_name(fitBase, fitType, lossType=lossType);
  elif use_mod_resp == 2:
    rvcName = None; # Use NONE if getting model responses, only
    if excType == 1:
      fitBase = 'fitList%s_210308_dG' % loc_str
      if recenter_norm:
        #fitBase = 'fitList%s_pyt_210312_dG' % loc_str
        fitBase = 'fitList%s_pyt_210331_dG' % loc_str
    elif excType == 2:
      fitBase = 'fitList%s_pyt_210310' % loc_str
      if recenter_norm:
        #fitBase = 'fitList%s_pyt_210312' % loc_str
        fitBase = 'fitList%s_pyt_210331' % loc_str
    fitList_nm = hf.fitList_name(fitBase, fitType, lossType=lossType, lgnType=lgnFrontEnd, lgnConType=conType, vecCorrected=-rvcAdj);

  # ^^^ EDIT rvc/descrFits/fitList names here; 

  ############
  # Before any plotting, fix plotting paramaters
  ############
  plt.style.use('https://raw.githubusercontent.com/paul-levy/SF_diversity/master/paul_plt_style.mplstyle');
  from matplotlib import rcParams
  rcParams['font.size'] = 20;
  rcParams['pdf.fonttype'] = 42 # should be 42, but there are kerning issues
  rcParams['ps.fonttype'] = 42 # should be 42, but there are kerning issues
  rcParams['lines.linewidth'] = 2.5;
  rcParams['axes.linewidth'] = 1.5;
  rcParams['lines.markersize'] = 8; # this is in style sheet, just being explicit
  rcParams['lines.markeredgewidth'] = 0; # no edge, since weird tings happen then

  rcParams['xtick.major.size'] = 15
  rcParams['xtick.minor.size'] = 5; # no minor ticks
  rcParams['ytick.major.size'] = 15
  rcParams['ytick.minor.size'] = 0; # no minor ticks

  rcParams['xtick.major.width'] = 2
  rcParams['xtick.minor.width'] = 2;
  rcParams['ytick.major.width'] = 2
  rcParams['ytick.minor.width'] = 0

  rcParams['font.style'] = 'oblique';
  rcParams['font.size'] = 20;

  ############
  # load everything
  ############
  dataListNm = hf.get_datalist(expDir, force_full=force_full);
  descrFits_f0 = None;
  dLoss_num = 2; # see hf.descrFit_name/descrMod_name/etc for details
  if expDir == 'LGN/':
    rvcMod = 0; 
    dMod_num = 1;
    rvcDir = 1;
    vecF1 = -1;
  else:
    rvcMod = 1; # i.e. Naka-rushton (1)
    dMod_num = 3; # d-dog-s
    rvcDir = None; # None if we're doing vec-corrected
    if expDir == 'altExp/':
      vecF1 = 0;
    else:
      vecF1 = 1;

  dFits_mod = hf.descrMod_name(dMod_num)
  descrFits_name = hf.descrFit_name(lossType=dLoss_num, descrBase=dFits_base, modelName=dFits_mod, phAdj=1 if vecF1==-1 else None);

  ## now, let it run
  dataPath = basePath + expDir + 'structures/'
  save_loc = basePath + expDir + 'figures/'
  save_locSuper = save_loc + 'superposition_220713/'
  if use_mod_resp == 1:
    save_locSuper = save_locSuper + '%s/' % fitBase

  dataList = hf.np_smart_load(dataPath + dataListNm);
  print('Trying to load descrFits at: %s' % (dataPath + descrFits_name));
  descrFits = hf.np_smart_load(dataPath + descrFits_name);
  if use_mod_resp == 1 or use_mod_resp == 2:
    fitList = hf.np_smart_load(dataPath + fitList_nm);
  else:
    fitList = None;

  if not os.path.exists(save_locSuper):
    os.makedirs(save_locSuper)

  cells = np.arange(1, 1+len(dataList['unitName']))

  zr_rm = lambda x: x[x>0];
  # more flexible - only get values where x AND z are greater than some value "gt" (e.g. 0, 1, 0.4, ...)
  zr_rm_pair = lambda x, z, gt: [x[np.logical_and(x>gt, z>gt)], z[np.logical_and(x>gt, z>gt)]];
  # zr_rm_pair = lambda x, z: [x[np.logical_and(x>0, z>0)], z[np.logical_and(x>0, z>0)]] if np.logical_and(x!=[], z!=[])==True else [], [];

  # here, we'll save measures we are going use for analysis purpose - e.g. supperssion index, c50
  curr_suppr = dict();

  ############
  ### Establish the plot, load cell-specific measures
  ############
  nRows, nCols = 6, 2;
  cellName = dataList['unitName'][which_cell-1];
  expInd = hf.get_exp_ind(dataPath, cellName)[0]
  S = hf.np_smart_load(dataPath + cellName + '_sfm.npy')
  expData = S['sfm']['exp']['trial'];

  # 0th, let's load the basic tuning characterizations AND the descriptive fit
  try:
    dfit_curr = descrFits[which_cell-1]['params'][0,-1,:]; # single grating, highest contrast
  except:
    dfit_curr = None;
  # - then the basics
  try:
    basic_names, basic_order = dataList['basicProgName'][which_cell-1], dataList['basicProgOrder']
    basics = hf.get_basic_tunings(basic_names, basic_order);
  except:
    try:
      # we've already put the basics in the data structure... (i.e. post-sorting 2021 data)
      basic_names = ['','','','',''];
      basic_order = ['rf', 'sf', 'tf', 'rvc', 'ori']; # order doesn't matter if they are already loaded
      basics = hf.get_basic_tunings(basic_names, basic_order, preProc=S, reducedSave=True)
    except:
      basics = None;

  ### TEMPORARY: save the "basics" in curr_suppr; should live on its own, though; TODO
  curr_suppr['basics'] = basics;

  try:
    oriBW, oriCV = basics['ori']['bw'], basics['ori']['cv'];
  except:
    oriBW, oriCV = np.nan, np.nan;
  try:
    tfBW = basics['tf']['tfBW_oct'];
  except:
    tfBW = np.nan;
  try:
    suprMod = basics['rfsize']['suprInd_model'];
  except:
    suprMod = np.nan;
  try:
    suprDat = basics['rfsize']['suprInd_data'];
  except:
    suprDat = np.nan;

  try:
    cellType = dataList['unitType'][which_cell-1];
  except:
    # TODO: note, this is dangerous; thus far, only V1 cells don't have 'unitType' field in dataList, so we can safely do this
    cellType = 'V1';


  ############
  ### compute f1f0 ratio, and load the corresponding F0 or F1 responses
  ############
  f1f0_rat = hf.compute_f1f0(expData, which_cell, expInd, dataPath, descrFitName_f0=descrFits_f0)[0];
  curr_suppr['f1f0'] = f1f0_rat;
  respMeasure = 1 if f1f0_rat > 1 else 0;

  if vecF1 == 1:
    # get the correct, adjusted F1 response
    if expInd > f1_expCutoff and respMeasure == 1:
      respOverwrite = hf.adjust_f1_byTrial(expData, expInd);
    else:
      respOverwrite = None;

  if (respMeasure == 1 or expDir == 'LGN/') and expDir != 'altExp/' : # i.e. if we're looking at a simple cell, then let's get F1
    if vecF1 == 1:
      spikes_byComp = respOverwrite
      # then, sum up the valid components per stimulus component
      allCons = np.vstack(expData['con']).transpose();
      blanks = np.where(allCons==0);
      spikes_byComp[blanks] = 0; # just set it to 0 if that component was blank during the trial
    else:
      if rvcName is not None:
        try:
          rvcFits = hf.get_rvc_fits(dataPath, expInd, which_cell, rvcName=rvcName, rvcMod=rvcMod, direc=rvcDir, vecF1=vecF1);
        except:
          rvcFits = None;
      else:
        rvcFits = None
      spikes_byComp = hf.get_spikes(expData, get_f0=0, rvcFits=rvcFits, expInd=expInd);
    spikes = np.array([np.sum(x) for x in spikes_byComp]);
    rates = True if vecF1 == 0 else False; # when we get the spikes from rvcFits, they've already been converted into rates (in hf.get_all_fft)
    baseline = None; # f1 has no "DC", yadig?
  else: # otherwise, if it's complex, just get F0
    respMeasure = 0;
    spikes = hf.get_spikes(expData, get_f0=1, rvcFits=None, expInd=expInd);
    rates = False; # get_spikes without rvcFits is directly from spikeCount, which is counts, not rates!
    baseline = hf.blankResp(expData, expInd)[0]; # we'll plot the spontaneous rate
    # why mult by stimDur? well, spikes are not rates but baseline is, so we convert baseline to count (i.e. not rate, too)
    spikes = spikes - baseline*hf.get_exp_params(expInd).stimDur; 

  #print('###\nGetting spikes (data): rates? %d\n###' % rates);
  _, _, _, respAll = hf.organize_resp(spikes, expData, expInd, respsAsRate=rates); # only using respAll to get variance measures
  resps_data, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(expData, expInd, overwriteSpikes=spikes, respsAsRates=rates, modsAsRate=rates);

  if fitList is None:
    resps = resps_data; # otherwise, we'll still keep resps_data for reference
  elif fitList is not None: # OVERWRITE the data with the model spikes!
    if use_mod_resp == 1:
      curr_fit = fitList[which_cell-1]['params'];
      modResp = mod_resp.SFMGiveBof(curr_fit, S, normType=fitType, lossType=lossType, expInd=expInd, cellNum=which_cell, excType=excType)[1];
      if f1f0_rat < 1: # then subtract baseline..
        modResp = modResp - baseline*hf.get_exp_params(expInd).stimDur; 
      # now organize the responses
      resps, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(expData, expInd, overwriteSpikes=modResp, respsAsRates=False, modsAsRate=False);
    elif use_mod_resp == 2: # then pytorch model!
      resp_str = hf_sf.get_resp_str(respMeasure)
      curr_fit = fitList[which_cell-1][resp_str]['params'];
      model = mrpt.sfNormMod(curr_fit, expInd=expInd, excType=excType, normType=fitType, lossType=lossType, lgnFrontEnd=lgnFrontEnd, newMethod=newMethod, lgnConType=conType, applyLGNtoNorm=_applyLGNtoNorm)
      ### get the vec-corrected responses, if applicable
      if expInd > f1_expCutoff and respMeasure == 1:
        respOverwrite = hf.adjust_f1_byTrial(expData, expInd);
      else:
        respOverwrite = None;

      dw = mrpt.dataWrapper(expData, respMeasure=respMeasure, expInd=expInd, respOverwrite=respOverwrite); # respOverwrite defined above (None if DC or if expInd=-1)
      modResp = model.forward(dw.trInf, respMeasure=respMeasure, sigmoidSigma=_sigmoidSigma, recenter_norm=recenter_norm).detach().numpy();

      if respMeasure == 1: # make sure the blank components have a zero response (we'll do the same with the measured responses)
        blanks = np.where(dw.trInf['con']==0);
        modResp[blanks] = 0;
        # next, sum up across components
        modResp = np.sum(modResp, axis=1);
      # finally, make sure this fills out a vector of all responses (just have nan for non-modelled trials)
      nTrialsFull = len(expData['num']);
      modResp_full = np.nan * np.zeros((nTrialsFull, ));
      modResp_full[dw.trInf['num']] = modResp;

      if respMeasure == 0: # if DC, then subtract baseline..., as determined from data (why not model? we aren't yet calc. response to no stim, though it can be done)
        modResp_full = modResp_full - baseline*hf.get_exp_params(expInd).stimDur;

      # TODO: This is a work around for which measures are in rates vs. counts (DC vs F1, model vs data...)
      stimDur = hf.get_exp_params(expInd).stimDur;
      asRates = False;
      #divFactor = stimDur if asRates == 0 else 1;
      #modResp_full = np.divide(modResp_full, divFactor);
      # now organize the responses
      resps, stimVals, val_con_by_disp, _, _ = hf.tabulate_responses(expData, expInd, overwriteSpikes=modResp_full, respsAsRates=asRates, modsAsRate=asRates);

  predResps = resps[2];

  respMean = resps[0]; # equivalent to resps[0];
  respStd = np.nanstd(respAll, -1); # take std of all responses for a given condition
  # compute SEM, too
  findNaN = np.isnan(respAll);
  nonNaN  = np.sum(findNaN == False, axis=-1);
  respSem = np.nanstd(respAll, -1) / np.sqrt(nonNaN);

  ############
  ### first, fit a smooth function to the overall pred V measured responses
  ### --- from this, we can measure how each example superposition deviates from a central tendency
  ### --- i.e. the residual relative to the "standard" input:output relationship
  ############
  all_resps = respMean[1:, :, :].flatten() # all disp>0
  all_preds = predResps[1:, :, :].flatten() # all disp>0
  # a model which allows negative fits
  #         myFit = lambda x, t0, t1, t2: t0 + t1*x + t2*x*x;
  #         non_nan = np.where(~np.isnan(all_preds)); # cannot fit negative values with naka-rushton...
  #         fitz, _ = opt.curve_fit(myFit, all_preds[non_nan], all_resps[non_nan], p0=[-5, 10, 5], maxfev=5000)
  # naka rushton
  myFit = lambda x, g, expon, c50: hf.naka_rushton(x, [0, g, expon, c50]) 
  non_neg = np.where(all_preds>0) # cannot fit negative values with naka-rushton...
  try:
    if use_mod_resp == 1: # the reference will ALWAYS be the data -- redo the above analysis for data
      predResps_data = resps_data[2];
      respMean_data = resps_data[0];
      all_resps_data = respMean_data[1:, :, :].flatten() # all disp>0
      all_preds_data = predResps_data[1:, :, :].flatten() # all disp>0
      non_neg_data = np.where(all_preds_data>0) # cannot fit negative values with naka-rushton...
      fitz, _ = opt.curve_fit(myFit, all_preds_data[non_neg_data], all_resps_data[non_neg_data], p0=[100, 2, 25], maxfev=5000)
    else:
      fitz, _ = opt.curve_fit(myFit, all_preds[non_neg], all_resps[non_neg], p0=[100, 2, 25], maxfev=5000)
    rel_c50 = np.divide(fitz[-1], np.max(all_preds[non_neg]));
  except:
    fitz = None;
    rel_c50 = -99;

  ############
  ### organize stimulus information
  ############
  all_disps = stimVals[0];
  all_cons = stimVals[1];
  all_sfs = stimVals[2];

  nCons = len(all_cons);
  nSfs = len(all_sfs);
  nDisps = len(all_disps);

  maxResp = np.maximum(np.nanmax(respMean), np.nanmax(predResps));
  # by disp
  clrs_d = cm.viridis(np.linspace(0,0.75,nDisps-1));
  lbls_d = ['disp: %s' % str(x) for x in range(nDisps)];
  # by sf
  val_sfs = hf.get_valid_sfs(S, disp=1, con=val_con_by_disp[1][0], expInd=expInd) # pick 
  clrs_sf = cm.viridis(np.linspace(0,.75,len(val_sfs)));
  lbls_sf = ['sf: %.2f' % all_sfs[x] for x in val_sfs];
  # by con
  val_con = all_cons;
  clrs_con = cm.viridis(np.linspace(0,.75,len(val_con)));
  lbls_con = ['con: %.2f' % x for x in val_con];

  ############
  ### create the figure
  ############
  fSuper, ax = plt.subplots(nRows, nCols, figsize=(10*nCols, 8*nRows))
  sns.despine(fig=fSuper, offset=10)

  allMix = [];
  allSum = [];

  ### plot reference tuning [row 1 (i.e. 2nd row)]
  ## on the right, SF tuning (high contrast)
  sfRef = hf.nan_rm(respMean[0, :, -1]); # high contrast tuning
  ax[1, 1].plot(all_sfs, sfRef, 'k-', marker='o', label='ref. tuning (d0, high con)', clip_on=False)
  ax[1, 1].set_xscale('log')
  ax[1, 1].set_xlim((0.1, 10));
  ax[1, 1].set_xlabel('sf (c/deg)')
  ax[1, 1].set_ylabel('response (spikes/s)')
  ax[1, 1].set_ylim((-5, 1.1*np.nanmax(sfRef)));
  ax[1, 1].legend(fontsize='x-small');

  #####
  ## then on the left, RVC (peak SF)
  #####
  sfPeak = np.argmax(sfRef); # stupid/simple, but just get the rvc for the max response
  v_cons_single = val_con_by_disp[0]
  rvcRef = hf.nan_rm(respMean[0, sfPeak, v_cons_single]);
  # now, if possible, let's also plot the RVC fit
  if rvcFits is not None:
    rvcFits = hf.get_rvc_fits(dataPath, expInd, which_cell, rvcName=rvcName, rvcMod=rvcMod);
    rel_rvc = rvcFits[0]['params'][sfPeak]; # we get 0 dispersion, peak SF
    plt_cons = np.geomspace(all_cons[0], all_cons[-1], 50);
    c50, pk = hf.get_c50(rvcMod, rel_rvc), rvcFits[0]['conGain'][sfPeak];
    c50_emp, c50_eval = hf.c50_empirical(rvcMod, rel_rvc); # determine c50 by optimization, numerical approx.
    if rvcMod == 0:
      rvc_mod = hf.get_rvc_model();
      rvcmodResp = rvc_mod(*rel_rvc, plt_cons);
    else: # i.e. mod=1 or mod=2
      rvcmodResp = hf.naka_rushton(plt_cons, rel_rvc);
    if baseline is not None:
      rvcmodResp = rvcmodResp - baseline; 
    ax[1, 0].plot(plt_cons, rvcmodResp, 'k--', label='rvc fit (c50=%.2f, gain=%0f)' %(c50, pk))
    # and save it
    curr_suppr['c50'] = c50; curr_suppr['conGain'] = pk;
    curr_suppr['c50_emp'] = c50_emp; curr_suppr['c50_emp_eval'] = c50_eval
  else:
    curr_suppr['c50'] = np.nan; curr_suppr['conGain'] = np.nan;
    curr_suppr['c50_emp'] = np.nan; curr_suppr['c50_emp_eval'] = np.nan;

  ax[1, 0].plot(all_cons[v_cons_single], rvcRef, 'k-', marker='o', label='ref. tuning (d0, peak SF)', clip_on=False)
  #         ax[1, 0].set_xscale('log')
  ax[1, 0].set_xlabel('contrast (%)');
  ax[1, 0].set_ylabel('response (spikes/s)')
  ax[1, 0].set_ylim((-5, 1.1*np.nanmax(rvcRef)));
  ax[1, 0].legend(fontsize='x-small');

  # plot the fitted model on each axis
  pred_plt = np.linspace(0, np.nanmax(all_preds), 100);
  if fitz is not None:
    ax[0, 0].plot(pred_plt, myFit(pred_plt, *fitz), 'r--', label='fit')
    ax[0, 1].plot(pred_plt, myFit(pred_plt, *fitz), 'r--', label='fit')

  for d in range(nDisps):
    if d == 0: # we don't care about single gratings!
      dispRats = [];
      continue; 
    v_cons = np.array(val_con_by_disp[d]);
    n_v_cons = len(v_cons);

    # plot split out by each contrast [0,1]
    for c in reversed(range(n_v_cons)):
      v_sfs = hf.get_valid_sfs(S, d, v_cons[c], expInd)
      for s in v_sfs:
        mixResp = respMean[d, s, v_cons[c]];
        allMix.append(mixResp);
        sumResp = predResps[d, s, v_cons[c]];
        allSum.append(sumResp);
  #      print('condition: d(%d), c(%d), sf(%d):: pred(%.2f)|real(%.2f)' % (d, v_cons[c], s, sumResp, mixResp))
        # PLOT in by-disp panel
        if c == 0 and s == v_sfs[0]:
          ax[0, 0].plot(sumResp, mixResp, 'o', color=clrs_d[d-1], label=lbls_d[d], clip_on=False)
        else:
          ax[0, 0].plot(sumResp, mixResp, 'o', color=clrs_d[d-1], clip_on=False)
        # PLOT in by-sf panel
        sfInd = np.where(np.array(v_sfs) == s)[0][0]; # will only be one entry, so just "unpack"
        try:
          if d == 1 and c == 0:
            ax[0, 1].plot(sumResp, mixResp, 'o', color=clrs_sf[sfInd], label=lbls_sf[sfInd], clip_on=False);
          else:
            ax[0, 1].plot(sumResp, mixResp, 'o', color=clrs_sf[sfInd], clip_on=False);
        except:
          pass;
          #pdb.set_trace();
        # plot baseline, if f0...
  #       if baseline is not None:
  #         [ax[0, i].axhline(baseline, linestyle='--', color='k', label='spon. rate') for i in range(2)];


    # plot averaged across all cons/sfs (i.e. average for the whole dispersion) [1,0]
    mixDisp = respMean[d, :, :].flatten();
    sumDisp = predResps[d, :, :].flatten();
    mixDisp, sumDisp = zr_rm_pair(mixDisp, sumDisp, 0.5);
    curr_rats = np.divide(mixDisp, sumDisp)
    curr_mn = geomean(curr_rats); curr_std = np.std(np.log10(curr_rats));
  #  curr_rat = geomean(np.divide(mixDisp, sumDisp));
    ax[2, 0].bar(d, curr_mn, yerr=curr_std, color=clrs_d[d-1]);
    ax[2, 0].set_yscale('log')
    ax[2, 0].set_ylim(0.1, 10);
  #  ax[2, 0].yaxis.set_ticks(minorticks)
    dispRats.append(curr_mn);
  #  ax[2, 0].bar(d, np.mean(np.divide(mixDisp, sumDisp)), color=clrs_d[d-1]);

    # also, let's plot the (signed) error relative to the fit
    if fitz is not None:
      errs = mixDisp - myFit(sumDisp, *fitz);
      ax[3, 0].bar(d, np.mean(errs), yerr=np.std(errs), color=clrs_d[d-1])
      # -- and normalized by the prediction output response
      errs_norm = np.divide(mixDisp - myFit(sumDisp, *fitz), myFit(sumDisp, *fitz));
      ax[4, 0].bar(d, np.mean(errs_norm), yerr=np.std(errs_norm), color=clrs_d[d-1])

    # and set some labels/lines, as needed
    if d == 1:
        ax[2, 0].set_xlabel('dispersion');
        ax[2, 0].set_ylabel('suppression ratio (linear)')
        ax[2, 0].axhline(1, ls='--', color='k')
        ax[3, 0].set_xlabel('dispersion');
        ax[3, 0].set_ylabel('mean (signed) error')
        ax[3, 0].axhline(0, ls='--', color='k')
        ax[4, 0].set_xlabel('dispersion');
        ax[4, 0].set_ylabel('mean (signed) error -- as frac. of fit prediction')
        ax[4, 0].axhline(0, ls='--', color='k')

    curr_suppr['supr_disp'] = dispRats;

  ### plot averaged across all cons/disps
  sfInds = []; sfRats = []; sfRatStd = []; 
  sfErrs = []; sfErrsStd = []; sfErrsInd = []; sfErrsIndStd = []; sfErrsRat = []; sfErrsRatStd = [];
  curr_errNormFactor = [];
  for s in range(len(val_sfs)):
    try: # not all sfs will have legitmate values;
      # only get mixtures (i.e. ignore single gratings)
      mixSf = respMean[1:, val_sfs[s], :].flatten();
      sumSf = predResps[1:, val_sfs[s], :].flatten();
      mixSf, sumSf = zr_rm_pair(mixSf, sumSf, 0.5);
      rats_curr = np.divide(mixSf, sumSf); 
      sfInds.append(s); sfRats.append(geomean(rats_curr)); sfRatStd.append(np.std(np.log10(rats_curr)));

      if fitz is not None:
        #curr_NR = myFit(sumSf, *fitz); # unvarnished
        curr_NR = np.maximum(myFit(sumSf, *fitz), 0.5); # thresholded at 0.5...

        curr_err = mixSf - curr_NR;
        sfErrs.append(np.mean(curr_err));
        sfErrsStd.append(np.std(curr_err))

        curr_errNorm = np.divide(mixSf - curr_NR, mixSf + curr_NR);
        sfErrsInd.append(np.mean(curr_errNorm));
        sfErrsIndStd.append(np.std(curr_errNorm))

        curr_errRat = np.divide(mixSf, curr_NR);
        sfErrsRat.append(np.mean(curr_errRat));
        sfErrsRatStd.append(np.std(curr_errRat));

        curr_normFactors = np.array(curr_NR)
        curr_errNormFactor.append(geomean(curr_normFactors[curr_normFactors>0]));
      else:
        sfErrs.append([]);
        sfErrsStd.append([]);
        sfErrsInd.append([]);
        sfErrsIndStd.append([]);
        sfErrsRat.append([]);
        sfErrsRatStd.append([]);
        curr_errNormFactor.append([]);
    except:
      pass

  # get the offset/scale of the ratio so that we can plot a rescaled/flipped version of
  # the high con/single grat tuning for reference...does the suppression match the response?
  offset, scale = np.nanmax(sfRats), np.nanmax(sfRats) - np.nanmin(sfRats);
  sfRef = hf.nan_rm(respMean[0, val_sfs, -1]); # high contrast tuning
  sfRefShift = offset - scale * (sfRef/np.nanmax(sfRef))
  ax[2,1].scatter(all_sfs[val_sfs][sfInds], sfRats, color=clrs_sf[sfInds], clip_on=False)
  ax[2,1].errorbar(all_sfs[val_sfs][sfInds], sfRats, sfRatStd, color='k', linestyle='-', clip_on=False, label='suppression tuning')
  #         ax[2,1].plot(all_sfs[val_sfs][sfInds], sfRats, 'k-', clip_on=False, label='suppression tuning')
  ax[2,1].plot(all_sfs[val_sfs], sfRefShift, 'k--', label='ref. tuning', clip_on=False)
  ax[2,1].axhline(1, ls='--', color='k')
  ax[2,1].set_xlabel('sf (cpd)')
  ax[2,1].set_xscale('log')
  ax[2,1].set_xlim((0.1, 10));
  #ax[2,1].set_xlim((np.min(all_sfs), np.max(all_sfs)));
  ax[2,1].set_ylabel('suppression ratio');
  ax[2,1].set_yscale('log')
  #ax[2,1].yaxis.set_ticks(minorticks)
  ax[2,1].set_ylim(0.1, 10);        
  ax[2,1].legend(fontsize='x-small');
  curr_suppr['supr_sf'] = sfRats;

  ### residuals from fit of suppression
  if fitz is not None:
    # mean signed error: and labels/plots for the error as f'n of SF
    ax[3,1].axhline(0, ls='--', color='k')
    ax[3,1].set_xlabel('sf (cpd)')
    ax[3,1].set_xscale('log')
    ax[3,1].set_xlim((0.1, 10));
    #ax[3,1].set_xlim((np.min(all_sfs), np.max(all_sfs)));
    ax[3,1].set_ylabel('mean (signed) error');
    ax[3,1].errorbar(all_sfs[val_sfs][sfInds], sfErrs, sfErrsStd, color='k', marker='o', linestyle='-', clip_on=False)
    # -- and normalized by the prediction output response + output respeonse
    val_errs = np.logical_and(~np.isnan(sfErrsRat), np.logical_and(np.array(sfErrsIndStd)>0, np.array(sfErrsIndStd) < 2));
    norm_subset = np.array(sfErrsInd)[val_errs];
    normStd_subset = np.array(sfErrsIndStd)[val_errs];
    ax[4,1].axhline(0, ls='--', color='k')
    ax[4,1].set_xlabel('sf (cpd)')
    ax[4,1].set_xscale('log')
    ax[4,1].set_xlim((0.1, 10));
    #ax[4,1].set_xlim((np.min(all_sfs), np.max(all_sfs)));
    ax[4,1].set_ylim((-1, 1));
    ax[4,1].set_ylabel('error index');
    ax[4,1].errorbar(all_sfs[val_sfs][sfInds][val_errs], norm_subset, normStd_subset, color='k', marker='o', linestyle='-', clip_on=False)
    # -- AND simply the ratio between the mixture response and the mean expected mix response (i.e. Naka-Rushton)
    # --- equivalent to the suppression ratio, but relative to the NR fit rather than perfect linear summation
    val_errs = np.logical_and(~np.isnan(sfErrsRat), np.logical_and(np.array(sfErrsRatStd)>0, np.array(sfErrsRatStd) < 2));
    rat_subset = np.array(sfErrsRat)[val_errs];
    ratStd_subset = np.array(sfErrsRatStd)[val_errs];
    #ratStd_subset = (1/np.log(2))*np.divide(np.array(sfErrsRatStd)[val_errs], rat_subset);
    ax[5,1].scatter(all_sfs[val_sfs][sfInds][val_errs], rat_subset, color=clrs_sf[sfInds][val_errs], clip_on=False)
    ax[5,1].errorbar(all_sfs[val_sfs][sfInds][val_errs], rat_subset, ratStd_subset, color='k', linestyle='-', clip_on=False, label='suppression tuning')
    ax[5,1].axhline(1, ls='--', color='k')
    ax[5,1].set_xlabel('sf (cpd)')
    ax[5,1].set_xscale('log')
    ax[5,1].set_xlim((0.1, 10));
    ax[5,1].set_ylabel('suppression ratio (wrt NR)');
    ax[5,1].set_yscale('log', basey=2)
  #         ax[2,1].yaxis.set_ticks(minorticks)
    ax[5,1].set_ylim(np.power(2.0, -2), np.power(2.0, 2));
    ax[5,1].legend(fontsize='x-small');
    # - compute the variance - and put that value on the plot
    errsRatVar = np.var(np.log2(sfErrsRat)[val_errs]);
    curr_suppr['sfRat_VAR'] = errsRatVar;
    ax[5,1].text(0.1, 2, 'var=%.2f' % errsRatVar);

    # compute the unsigned "area under curve" for the sfErrsInd, and normalize by the octave span of SF values considered
    val_errs = np.logical_and(~np.isnan(sfErrsRat), np.logical_and(np.array(sfErrsIndStd)>0, np.array(sfErrsIndStd) < 2));
    val_x = all_sfs[val_sfs][sfInds][val_errs];
    ind_var = np.var(np.array(sfErrsInd)[val_errs]);
    curr_suppr['sfErrsInd_VAR'] = ind_var;
    # - and put that value on the plot
    ax[4,1].text(0.1, -0.25, 'var=%.3f' % ind_var);
  else:
    curr_suppr['sfErrsInd_VAR'] = np.nan
    curr_suppr['sfRat_VAR'] = np.nan

  #########
  ### NOW, let's evaluate the derivative of the SF tuning curve and get the correlation with the errors
  #########
  mod_sfs = np.geomspace(all_sfs[0], all_sfs[-1], 1000);
  mod_resp = hf.get_descrResp(dfit_curr, mod_sfs, DoGmodel=dMod_num);
  deriv = np.divide(np.diff(mod_resp), np.diff(np.log10(mod_sfs)))
  deriv_norm = np.divide(deriv, np.maximum(np.nanmax(deriv), np.abs(np.nanmin(deriv)))); # make the maximum response 1 (or -1)
  # - then, what indices to evaluate for comparing with sfErr?
  errSfs = all_sfs[val_sfs][sfInds];
  mod_inds = [np.argmin(np.square(mod_sfs-x)) for x in errSfs];
  deriv_norm_eval = deriv_norm[mod_inds];
  # -- plot on [1, 1] (i.e. where the data is)
  ax[1,1].plot(mod_sfs, mod_resp, 'k--', label='fit (g)')
  ax[1,1].legend();
  # Duplicate "twin" the axis to create a second y-axis
  ax2 = ax[1,1].twinx();
  ax2.set_xscale('log'); # have to re-inforce log-scale?
  ax2.set_ylim([-1, 1]); # since the g' is normalized
  # make a plot with different y-axis using second axis object
  ax2.plot(mod_sfs[1:], deriv_norm, '--', color="red", label='g\'');
  ax2.set_ylabel("deriv. (normalized)",color="red")
  ax2.legend();
  sns.despine(ax=ax2, offset=10, right=False);
  # -- and let's plot rescaled and shifted version in [2,1]
  offset, scale = np.nanmax(sfRats), np.nanmax(sfRats) - np.nanmin(sfRats);
  derivShift = offset - scale * (deriv_norm/np.nanmax(deriv_norm));
  ax[2,1].plot(mod_sfs[1:], derivShift, 'r--', label='deriv(ref. tuning)', clip_on=False)
  ax[2,1].legend(fontsize='x-small');
  # - then, normalize the sfErrs/sfErrsInd and compute the correlation coefficient
  if fitz is not None:
    norm_sfErr = np.divide(sfErrs, np.nanmax(np.abs(sfErrs)));
    norm_sfErrInd = np.divide(sfErrsInd, np.nanmax(np.abs(sfErrsInd))); # remember, sfErrsInd is normalized per condition; this is overall
    non_nan = np.logical_and(~np.isnan(norm_sfErr), ~np.isnan(deriv_norm_eval))
    corr_nsf, corr_nsfN = np.corrcoef(deriv_norm_eval[non_nan], norm_sfErr[non_nan])[0,1], np.corrcoef(deriv_norm_eval[non_nan], norm_sfErrInd[non_nan])[0,1]
    curr_suppr['corr_derivWithErr'] = corr_nsf;
    curr_suppr['corr_derivWithErrsInd'] = corr_nsfN;
    ax[3,1].text(0.1, 0.25*np.nanmax(sfErrs), 'corr w/g\' = %.2f' % corr_nsf)
    ax[4,1].text(0.1, 0.25, 'corr w/g\' = %.2f' % corr_nsfN)
  else:
    curr_suppr['corr_derivWithErr'] = np.nan;
    curr_suppr['corr_derivWithErrsInd'] = np.nan;

  # make a polynomial fit
  try:
    hmm = np.polyfit(allSum, allMix, deg=1) # returns [a, b] in ax + b 
  except:
    hmm = [np.nan];
  curr_suppr['supr_index'] = hmm[0];

  for j in range(1):
    for jj in range(nCols):
      ax[j, jj].axis('square')
      ax[j, jj].set_xlabel('prediction: sum(components) (imp/s)');
      ax[j, jj].set_ylabel('mixture response (imp/s)');
      ax[j, jj].plot([0, 1*maxResp], [0, 1*maxResp], 'k--')
      ax[j, jj].set_xlim((-5, maxResp));
      ax[j, jj].set_ylim((-5, 1.1*maxResp));
      ax[j, jj].set_title('Suppression index: %.2f|%.2f' % (hmm[0], rel_c50))
      ax[j, jj].legend(fontsize='x-small');

  fSuper.suptitle('Superposition: %s #%d [%s; f1f0 %.2f; szSupr[dt/md] %.2f/%.2f; oriBW|CV %.2f|%.2f; tfBW %.2f]' % (cellType, which_cell, cellName, f1f0_rat, suprDat, suprMod, oriBW, oriCV, tfBW))

  if fitList is None:
    save_name = 'cell_%03d.pdf' % which_cell
  else:
    save_name = 'cell_%03d_mod%s.pdf' % (which_cell, hf.fitType_suffix(fitType))
  pdfSv = pltSave.PdfPages(str(save_locSuper + save_name));
  pdfSv.savefig(fSuper)
  pdfSv.close();

  #########
  ### Finally, add this "superposition" to the newest 
  #########

  if to_save:

    if fitList is None:
      from datetime import datetime
      suffix = datetime.today().strftime('%y%m%d')
      super_name = 'superposition_analysis_%s.npy' % suffix;
    else:
      super_name = 'superposition_analysis_mod%s.npy' % hf.fitType_suffix(fitType);

    pause_tm = 5*np.random.rand();
    print('sleeping for %d secs (#%d)' % (pause_tm, which_cell));
    time.sleep(pause_tm);

    if os.path.exists(dataPath + super_name):
      suppr_all = hf.np_smart_load(dataPath + super_name);
    else:
      suppr_all = dict();
    suppr_all[which_cell-1] = curr_suppr;
    np.save(dataPath + super_name, suppr_all);
  
  return curr_suppr;