Esempio n. 1
0
def fit_all_CRF(cell_num,
                data_loc,
                each_c50,
                loss_type,
                n_iter=1,
                each_expn=0,
                each_base=0,
                each_gain=1):
    ''' Given cell#, data loc, load the data. Other inputs:
          each_c50/expn/base/gain : separate c50/expn/base/gain for each condition?
          n_iter                  : how many iterations to fit?
          
    '''
    print(str(n_iter) + ' fit attempts')
    np = numpy
    conDig = 3
    # round contrast to the thousandth
    n_params = 5
    # 4 for NR, 1 for varGain

    if each_c50 == 1:
        fit_key = 'fits_each_rpt'
    else:
        fit_key = 'fits_rpt'

    if loss_type == 1:
        loss_str = '-lsq'
    if loss_type == 2:
        loss_str = '-sqrt'
    if loss_type == 3:
        loss_str = '-poiss'
    if loss_type == 4:
        loss_str = '-poissMod'
    fits_name = 'crfFitsCom' + loss_str + '.npy'
    dataList = hf.np_smart_load(str(data_loc + 'dataList.npy'))
    if os.path.isfile(data_loc + fits_name):
        crfFits = hf.np_smart_load(str(data_loc + fits_name))
    else:
        crfFits = dict()

    # load cell information
    cellStruct = hf.np_smart_load(
        str(data_loc + dataList['unitName'][cell_num - 1] + '_sfm.npy'))
    data = cellStruct['sfm']['exp']['trial']

    all_cons = np.unique(np.round(data['total_con'], conDig))
    all_cons = all_cons[~np.isnan(all_cons)]

    all_sfs = np.unique(data['cent_sf'])
    all_sfs = all_sfs[~np.isnan(all_sfs)]

    all_disps = np.unique(data['num_comps'])
    all_disps = all_disps[all_disps > 0]
    # ignore zero...

    nCons = len(all_cons)
    nSfs = len(all_sfs)
    nDisps = len(all_disps)

    nk_ru = dict()
    all_data = dict()

    # for use in fitting SF functions...
    _, _, blankResps = hf.blankResp(cellStruct)
    blankCons = np.zeros_like(blankResps)

    for d in range(nDisps):
        valid_disp = data['num_comps'] == all_disps[d]
        cons = []
        resps = []

        nk_ru[d] = dict()
        v_sfs = []
        # keep track of valid sfs
        all_data[d] = dict()

        for sf in range(nSfs):

            valid_sf = data['cent_sf'] == all_sfs[sf]

            valid_tr = valid_disp & valid_sf
            if np.all(np.unique(valid_tr) ==
                      False):  # did we not find any trials?
                continue

            v_sfs.append(sf)
            nk_ru[d][sf] = dict()
            # create dictionary here; thus, only valid sfs have valid keys
            # for unpacking loss/parameters later...
            nk_ru[d][sf]['params'] = np.nan * np.zeros((n_params, 1))
            nk_ru[d][sf]['loss'] = np.nan

            resps.append(np.hstack((blankResps, data['spikeCount'][valid_tr])))
            cons.append(np.hstack((blankCons, data['total_con'][valid_tr])))

        # save data for later use
        all_data[d]['resps'] = resps
        all_data[d]['cons'] = cons
        all_data[d]['valid_sfs'] = v_sfs

        maxResp = np.max(np.max(resps))
        n_v_sfs = len(v_sfs)

        each_list = (each_base, each_gain, each_expn, each_c50)

        n_per_param = [1 if i == 0 else n_v_sfs for i in each_list]
        '''
    	if each_c50 == 1:
    	  n_c50s = n_v_sfs; # separate for each SF...
    	else:
	  n_c50s = 1;	
        '''
        init_base = 0.1
        #bounds_base = (0, 0);
        bounds_base = (0.1, maxResp)
        init_gain = np.max(resps) - np.min(resps)
        bounds_gain = (0, 10 * maxResp)
        init_expn = 2
        bounds_expn = (0.5, 10)
        init_c50 = 0.1
        #geomean(all_cons);
        bounds_c50 = (0.01, 10 * max(all_cons))
        # contrast values are b/t [0, 1]
        init_varGain = 1
        bounds_varGain = (0.01, None)

        base_inits = np.repeat(init_base, n_per_param[0])
        # default is only one baseline per SF
        base_constr = [
            tuple(x) for x in np.broadcast_to(bounds_base, (n_per_param[0], 2))
        ]

        gain_inits = np.repeat(init_gain, n_per_param[1])
        # gain is always separate for each SF
        gain_constr = [
            tuple(x) for x in np.broadcast_to(bounds_gain, (n_per_param[1], 2))
        ]

        expn_inits = np.repeat(init_expn, n_per_param[2])
        # exponent can be either, like baseline
        expn_constr = [
            tuple(x) for x in np.broadcast_to(bounds_expn, (n_per_param[2], 2))
        ]

        c50_inits = np.repeat(init_c50, n_per_param[3])
        # repeat n_v_sfs times if c50 separate for each SF; otherwise, 1
        c50_constr = [
            tuple(x) for x in np.broadcast_to(bounds_c50, (n_per_param[3], 2))
        ]

        init_params = np.hstack(
            (c50_inits, expn_inits, gain_inits, base_inits, init_varGain))
        boundsAll = np.vstack((c50_constr, expn_constr, gain_constr,
                               base_constr, bounds_varGain))
        boundsAll = [tuple(x) for x in boundsAll]
        # turn the (inner) arrays into tuples...

        c50_ind = 0
        expn_ind = n_per_param[3]
        # the number of c50s...
        gain_ind = expn_ind + n_per_param[2]
        # the number of exponents
        base_ind = gain_ind + n_per_param[1]
        # always n_v_sfs gain parameters
        varGain_ind = base_ind + n_per_param[0]

        obj = lambda params: hf.fit_CRF(cons, resps, params[c50_ind:c50_ind+n_per_param[3]], params[expn_ind:expn_ind+n_per_param[2]], params[gain_ind:gain_ind+n_per_param[1]], \
                                            params[base_ind:base_ind+n_per_param[0]], params[varGain_ind], loss_type)
        opts = opt.minimize(obj, init_params, bounds=boundsAll)

        curr_params = opts['x']
        curr_loss = opts['fun']

        for iter in range(n_iter - 1):  # now, extra iterations if chosen...
            init_params = np.hstack(
                (hf.random_in_range(bounds_c50,
                                    n_c50s), hf.random_in_range(bounds_expn),
                 hf.random_in_range(bounds_gain,
                                    n_v_sfs), hf.random_in_range(bounds_base),
                 hf.random_in_range((bounds_varGain[0], 1))))

            # choose optimization method
            if np.mod(iter, 2) == 0:
                methodStr = 'L-BFGS-B'
            else:
                methodStr = 'TNC'

            opt_iter = opt.minimize(obj,
                                    init_params,
                                    bounds=boundsAll,
                                    method=methodStr)

            if opt_iter['fun'] < curr_loss:
                print('improve.')
                curr_loss = opt_iter['fun']
                curr_params = opt_iter['x']

        # now unpack...
        for sf_in in range(n_v_sfs):
            param_ind = [0 if i == 1 else sf_in for i in n_per_param]

            nk_ru[d][v_sfs[sf_in]]['params'][0] = curr_params[base_ind +
                                                              param_ind[0]]
            nk_ru[d][v_sfs[sf_in]]['params'][1] = curr_params[gain_ind +
                                                              param_ind[1]]
            nk_ru[d][v_sfs[sf_in]]['params'][2] = curr_params[expn_ind +
                                                              param_ind[2]]
            nk_ru[d][v_sfs[sf_in]]['params'][3] = curr_params[c50_ind +
                                                              param_ind[3]]
            # params (to match naka_rushton) are: baseline, gain, expon, c50
            nk_ru[d][v_sfs[sf_in]]['params'][4] = curr_params[varGain_ind]
            nk_ru[d][v_sfs[sf_in]]['loss'] = curr_loss

    # update stuff - load again in case some other run has saved/made changes
    if os.path.isfile(data_loc + fits_name):
        print('reloading CRF Fits...')
        crfFits = hf.np_smart_load(str(data_loc + fits_name))
    if cell_num - 1 not in crfFits:
        crfFits[cell_num - 1] = dict()
    crfFits[cell_num - 1][fit_key] = nk_ru
    crfFits[cell_num - 1]['data'] = all_data
    crfFits[cell_num - 1]['blankResps'] = blankResps

    np.save(data_loc + fits_name, crfFits)
    print('saving for cell ' + str(cell_num))

    return nk_ru
Esempio n. 2
0
def fit_descr(cell_num,
              data_loc,
              n_repeats=4,
              fromModelSim=0,
              fitLossType=1,
              baseStr=None,
              normType=None,
              lossType=None):

    nFam = 5
    nCon = 2
    nParam = 5

    # get base descrFit name (including loss str)
    if fitLossType == 1:
        floss_str = '_lsq'
    elif fitLossType == 2:
        floss_str = '_sqrt'
    elif fitLossType == 3:
        floss_str = '_poiss'
    descrFitBase = 'descrFits%s' % floss_str

    # load cell information
    dataList = hfunc.np_smart_load(data_loc + 'dataList.npy')
    if fromModelSim:
        # get model fit name
        fL_name = baseStr

        # normType
        if normType == 1:
            fL_suffix1 = '_flat'
        elif normType == 2:
            fL_suffix1 = '_wght'
        elif normType == 3:
            fL_suffix1 = '_c50'
        # lossType
        if lossType == 1:
            fL_suffix2 = '_sqrt.npy'
        elif lossType == 2:
            fL_suffix2 = '_poiss.npy'
        elif lossType == 3:
            fL_suffix2 = '_modPoiss.npy'
        elif lossType == 4:
            fL_suffix2 = '_chiSq.npy'
        fitListName = str(fL_name + fL_suffix1 + fL_suffix2)

        dfModelName = '%s_%s' % (descrFitBase, fitListName)

        if os.path.isfile(data_loc + dfModelName):
            descrFits = hfunc.np_smart_load(data_loc + dfModelName)
        else:
            descrFits = dict()
    else:
        dfModelName = '%s.npy' % descrFitBase
        if os.path.isfile(data_loc + dfModelName):
            descrFits = hfunc.np_smart_load(data_loc + dfModelName)
        else:
            descrFits = dict()
    data = hfunc.np_smart_load(data_loc + dataList['unitName'][cell_num - 1] +
                               '_sfm.npy')

    if fromModelSim:  # then we'll 'sneak' in the model responses in the place of the real data
        modFits = hfunc.np_smart_load(data_loc + fitListName)
        modFit = modFits[cell_num - 1]['params']
        a, modResp = mod_resp.SFMGiveBof(modFit,
                                         data,
                                         normType=normType,
                                         lossType=lossType)
        # spike count must be integers! Simply round
        data['sfm']['exp']['trial']['spikeCount'] = np.round(
            modResp * data['sfm']['exp']['trial']['duration'])

    if cell_num - 1 in descrFits:
        bestNLL = descrFits[cell_num - 1]['NLL']
        currParams = descrFits[cell_num - 1]['params']
    else:  # set values to NaN...
        bestNLL = np.ones((nFam, nCon)) * np.nan
        currParams = np.ones((nFam, nCon, nParam)) * np.nan

    print('Doing the work, now')
    for family in range(nFam):
        for con in range(nCon):

            print('.')
            # set initial parameters - a range from which we will pick!
            base_rate = data['sfm']['exp']['sponRateMean']
            if base_rate <= 3:
                range_baseline = (0, 3)
            else:
                range_baseline = (0.5 * base_rate, 1.5 * base_rate)

            max_resp = np.amax(data['sfm']['exp']['sfRateMean'][family][con])
            range_amp = (0.5 * max_resp, 1.5)

            theSfCents = data['sfm']['exp']['sf'][family][con]

            max_sf_index = np.argmax(
                data['sfm']['exp']['sfRateMean'][family][con])
            # what sf index gives peak response?
            mu_init = theSfCents[max_sf_index]

            if max_sf_index == 0:  # i.e. smallest SF center gives max response...
                range_mu = (mu_init / 2, theSfCents[max_sf_index + 3])
            elif max_sf_index + 1 == len(
                    theSfCents):  # i.e. highest SF center is max
                range_mu = (theSfCents[max_sf_index - 3], mu_init)
            else:
                range_mu = (theSfCents[max_sf_index - 1],
                            theSfCents[max_sf_index + 1])
                # go +-1 indices from center

            log_bw_lo = 0.75
            # 0.75 octave bandwidth...
            log_bw_hi = 2
            # 2 octave bandwidth...
            denom_lo = hfunc.bw_log_to_lin(log_bw_lo, mu_init)[0]
            # get linear bandwidth
            denom_hi = hfunc.bw_log_to_lin(log_bw_hi, mu_init)[0]
            # get lin. bw (cpd)
            range_denom = (denom_lo, denom_hi)
            # don't want 0 in sigma

            # set bounds for parameters
            min_bw = 1 / 4
            max_bw = 10
            # ranges in octave bandwidth

            bound_baseline = (0, max_resp)
            bound_range = (0, 1.5 * max_resp)
            bound_mu = (0.01, 10)
            bound_sig = (np.maximum(0.1,
                                    min_bw / (2 * np.sqrt(2 * np.log(2)))),
                         max_bw / (2 * np.sqrt(2 * np.log(2))))
            # Gaussian at half-height

            all_bounds = (bound_baseline, bound_range, bound_mu, bound_sig,
                          bound_sig)

            for n_try in range(n_repeats):

                # pick initial params
                init_base = hfunc.random_in_range(range_baseline)
                init_amp = hfunc.random_in_range(range_amp)
                init_mu = hfunc.random_in_range(range_mu)
                init_sig_left = hfunc.random_in_range(range_denom)
                init_sig_right = hfunc.random_in_range(range_denom)

                init_params = [
                    init_base, init_amp, init_mu, init_sig_left, init_sig_right
                ]

                # choose optimization method
                if np.mod(n_try, 2) == 0:
                    methodStr = 'L-BFGS-B'
                else:
                    methodStr = 'TNC'

                obj = lambda params: descr_loss(params, data, family, con)
                wax = opt.minimize(obj,
                                   init_params,
                                   method=methodStr,
                                   bounds=all_bounds)

                # compare
                NLL = wax['fun']
                params = wax['x']

                if np.isnan(
                        bestNLL[family,
                                con]) or NLL < bestNLL[family, con] or invalid(
                                    currParams[family, con, :], all_bounds):
                    bestNLL[family, con] = NLL
                    currParams[family, con, :] = params

    # update stuff - load again in case some other run has saved/made changes
    if os.path.isfile(data_loc + dfModelName):
        print('reloading descrFitsModel...')
        descrFits = hfunc.np_smart_load(data_loc + dfModelName)
    if cell_num - 1 not in descrFits:
        descrFits[cell_num - 1] = dict()
    descrFits[cell_num - 1]['NLL'] = bestNLL
    descrFits[cell_num - 1]['params'] = currParams

    np.save(data_loc + dfModelName, descrFits)
    print('saving for cell ' + str(cell_num))
Esempio n. 3
0
def fit_descr_DoG(cell_num,
                  data_loc=dataPath,
                  n_repeats=1000,
                  loss_type=3,
                  DoGmodel=1,
                  disp=0,
                  rvcName=rvcName,
                  dir=-1,
                  gain_reg=0,
                  fLname=dogName):

    nParam = 4

    # load cell information
    dataList = hf.np_smart_load(data_loc + 'dataList.npy')
    assert dataList != [], "data file not found!"

    if loss_type == 1:
        loss_str = '_poiss'
    elif loss_type == 2:
        loss_str = '_sqrt'
    elif loss_type == 3:
        loss_str = '_sach'
    elif loss_type == 4:
        loss_str = '_varExpl'
    if DoGmodel == 1:
        mod_str = '_sach'
    elif DoGmodel == 2:
        mod_str = '_tony'
    fLname = str(data_loc + fLname + loss_str + mod_str + '.npy')
    if os.path.isfile(fLname):
        descrFits = hf.np_smart_load(fLname)
    else:
        descrFits = dict()

    cellStruct = hf.np_smart_load(data_loc +
                                  dataList['unitName'][cell_num - 1] +
                                  '_sfm.npy')
    data = cellStruct['sfm']['exp']['trial']
    rvcNameFinal = hf.phase_fit_name(rvcName, dir)
    rvcFits = hf.np_smart_load(data_loc + rvcNameFinal)
    adjResps = rvcFits[cell_num - 1][disp]['adjMeans']
    adjSem = rvcFits[cell_num - 1][disp]['adjSem']
    if 'adjByTr' in rvcFits[cell_num - 1][disp]:
        adjByTr = rvcFits[cell_num - 1][disp]['adjByTr']
    if disp == 1:
        adjResps = [np.sum(x, 1) if x else [] for x in adjResps]
        if adjByTr:
            adjByTr = [np.sum(x, 1) if x else [] for x in adjByTr]
    adjResps = np.array(adjResps)
    # indexing multiple SFs will work only if we convert to numpy array first
    adjSem = np.array([np.array(x) for x in adjSem])
    # make each inner list an array, and the whole thing an array

    print('Doing the work, now')

    # first, get the set of stimulus values:
    resps, stimVals, valConByDisp, _, _ = hf.tabulate_responses(data,
                                                                expInd=expInd)
    # LGN is expInd=3
    all_disps = stimVals[0]
    all_cons = stimVals[1]
    all_sfs = stimVals[2]

    nDisps = len(all_disps)
    nCons = len(all_cons)

    if cell_num - 1 in descrFits:
        bestNLL = descrFits[cell_num - 1]['NLL']
        currParams = descrFits[cell_num - 1]['params']
        varExpl = descrFits[cell_num - 1]['varExpl']
        prefSf = descrFits[cell_num - 1]['prefSf']
        charFreq = descrFits[cell_num - 1]['charFreq']
    else:  # set values to NaN...
        bestNLL = np.ones((nDisps, nCons)) * np.nan
        currParams = np.ones((nDisps, nCons, nParam)) * np.nan
        varExpl = np.ones((nDisps, nCons)) * np.nan
        prefSf = np.ones((nDisps, nCons)) * np.nan
        charFreq = np.ones((nDisps, nCons)) * np.nan

    # set bounds
    if DoGmodel == 1:
        bound_gainCent = (1e-3, None)
        bound_radiusCent = (1e-3, None)
        bound_gainSurr = (1e-3, None)
        bound_radiusSurr = (1e-3, None)
        allBounds = (bound_gainCent, bound_radiusCent, bound_gainSurr,
                     bound_radiusSurr)
    elif DoGmodel == 2:
        bound_gainCent = (1e-3, None)
        bound_gainFracSurr = (1e-2, 1)
        bound_freqCent = (1e-3, None)
        bound_freqFracSurr = (1e-2, 1)
        allBounds = (bound_gainCent, bound_freqCent, bound_gainFracSurr,
                     bound_freqFracSurr)

    for d in range(
            1
    ):  # should be nDisps - just setting to 1 for now (i.e. fitting single gratings and mixtures separately)
        for con in range(nCons):
            if con not in valConByDisp[disp]:
                continue

            valSfInds = hf.get_valid_sfs(data, disp, con, expInd)
            valSfVals = all_sfs[valSfInds]

            print('.')
            # adjResponses (f1) in the rvcFits are separate by sf, values within contrast - so to get all responses for a given SF,
            # access all sfs and get the specific contrast response
            respConInd = np.where(np.asarray(valConByDisp[disp]) == con)[0]
            pdb.set_trace()
            ### interlude...
            spks = hf.get_spikes(data,
                                 rvcFits=rvcFits[cell_num - 1],
                                 expInd=expInd)
            _, _, mnResp, alResp = hf.organize_resp(spks, data, expInd)
            ###
            resps = flatten([x[respConInd] for x in adjResps[valSfInds]])
            resps_sem = [x[respConInd] for x in adjSem[valSfInds]]
            if isinstance(resps_sem[0],
                          np.ndarray):  # i.e. if it's still array of arrays...
                resps_sem = flatten(resps_sem)
            #resps_sem = None;
            maxResp = np.max(resps)
            freqAtMaxResp = all_sfs[np.argmax(resps)]

            for n_try in range(n_repeats):
                # pick initial params
                if DoGmodel == 1:
                    init_gainCent = hf.random_in_range(
                        (maxResp, 5 * maxResp))[0]
                    init_radiusCent = hf.random_in_range((0.05, 2))[0]
                    init_gainSurr = init_gainCent * hf.random_in_range(
                        (0.1, 0.8))[0]
                    init_radiusSurr = hf.random_in_range((0.5, 4))[0]
                    init_params = [
                        init_gainCent, init_radiusCent, init_gainSurr,
                        init_radiusSurr
                    ]
                elif DoGmodel == 2:
                    init_gainCent = maxResp * hf.random_in_range((0.9, 1.2))[0]
                    init_freqCent = np.maximum(
                        all_sfs[2],
                        freqAtMaxResp * hf.random_in_range((1.2, 1.5))[0])
                    # don't pick all_sfs[0] -- that's zero (we're avoiding that)
                    init_gainFracSurr = hf.random_in_range((0.7, 1))[0]
                    init_freqFracSurr = hf.random_in_range((.25, .35))[0]
                    init_params = [
                        init_gainCent, init_freqCent, init_gainFracSurr,
                        init_freqFracSurr
                    ]

                # choose optimization method
                if np.mod(n_try, 2) == 0:
                    methodStr = 'L-BFGS-B'
                else:
                    methodStr = 'TNC'

                obj = lambda params: DoG_loss(params,
                                              resps,
                                              valSfVals,
                                              resps_std=resps_sem,
                                              loss_type=loss_type,
                                              DoGmodel=DoGmodel,
                                              dir=dir,
                                              gain_reg=gain_reg)
                wax = opt.minimize(obj,
                                   init_params,
                                   method=methodStr,
                                   bounds=allBounds)

                # compare
                NLL = wax['fun']
                params = wax['x']

                if np.isnan(bestNLL[disp, con]) or NLL < bestNLL[disp, con]:
                    bestNLL[disp, con] = NLL
                    currParams[disp, con, :] = params
                    varExpl[disp,
                            con] = hf.var_explained(resps, params, valSfVals)
                    prefSf[disp, con] = hf.dog_prefSf(params, DoGmodel,
                                                      valSfVals)
                    charFreq[disp, con] = hf.dog_charFreq(params, DoGmodel)

        # update stuff - load again in case some other run has saved/made changes
        if os.path.isfile(fLname):
            print('reloading descrFits...')
            descrFits = hf.np_smart_load(fLname)
        if cell_num - 1 not in descrFits:
            descrFits[cell_num - 1] = dict()
        descrFits[cell_num - 1]['NLL'] = bestNLL
        descrFits[cell_num - 1]['params'] = currParams
        descrFits[cell_num - 1]['varExpl'] = varExpl
        descrFits[cell_num - 1]['prefSf'] = prefSf
        descrFits[cell_num - 1]['charFreq'] = charFreq
        descrFits[cell_num - 1]['gainRegFactor'] = gain_reg

        np.save(fLname, descrFits)
        print('saving for cell ' + str(cell_num))
Esempio n. 4
0
# Normalization pool simulations
#########
if norm_sim_on:

    conLevels = [1, 0.75, 0.5, 0.33, 0.1]
    nCons = len(conLevels)
    sfCenters = np.logspace(-2, 2, 21)
    # just for now...
    #sfCenters = allSfs;
    fNorm, conDisp_plots = plt.subplots(nCons,
                                        nDisps,
                                        sharey=True,
                                        figsize=(40, 30))
    norm_sim = np.nan * np.empty((nDisps, nCons, len(sfCenters)))
    if len(modParamsCurr) < 9:
        modParamsCurr.append(hf.random_in_range([-0.35, 0.35])[0])
        # enter asymmetry parameter

    # simulations
    for disp in range(nDisps):
        for conLvl in range(nCons):
            print('simulating normResp for family ' + str(disp + 1) +
                  ' and contrast ' + str(conLevels[conLvl]))
            for sfCent in range(len(sfCenters)):
                # if modParamsCurr doesn't have inhAsym parameter, add it!
                if norm_type == 2:
                    unweighted = 1
                    _, _, _, normRespSimple, _ = mod_resp.SFMsimulate(
                        modParamsCurr,
                        cellStruct,
                        disp + 1,
Esempio n. 5
0
def dog_fit(resps, all_cons, all_sfs, DoGmodel, loss_type, n_repeats, joint=0, ref_varExpl=None, veThresh=-np.nan, fracSig=1, ftol=2.220446049250313e-09, jointMinCons=3):
  ''' Helper function for fitting descriptive funtions to SF responses
      if joint=True, (and DoGmodel is 1 or 2, i.e. not flexGauss), then we fit assuming
      a fixed ratio for the center-surround gains and [freq/radius]
      - i.e. of the 4 DoG parameters, 2 are fit separately for each contrast, and 2 are fit 
        jointly across all contrasts!
      - note that ref_varExpl (optional) will be of the same form that the output for varExpl will be
      - note that jointMinCons is the minimum # of contrasts that must be included for a joint fit to be run (e.g. 2)

      inputs: self-explanatory, except for resps, which should be "f1" from tabulateResponses 
      outputs: bestNLL, currParams, varExpl, prefSf, charFreq, [overallNLL, paramList; if joint=True]
  '''
  nCons = len(all_cons);
  if DoGmodel == 0:
    nParam = 5;
  else:
    nParam = 4;

  # unpack responses
  resps_mean = resps['mean'];
  resps_sem = resps['sem'];

  # next, let's compute some measures about the responses
  max_resp = np.nanmax(resps_mean.flatten());
  min_resp = np.nanmin(resps_mean.flatten());
  ############
  ### WARNING - we're subtracting min_resp-1 from all responses
  ############  
  #resps_mean = np.subtract(resps_mean, min_resp-1); # i.e. make the minimum response 1 spk/s...

  # and set up initial arrays
  bestNLL = np.ones((nCons, ), dtype=np.float32) * np.nan;
  currParams = np.ones((nCons, nParam), dtype=np.float32) * np.nan;
  varExpl = np.ones((nCons, ), dtype=np.float32) * np.nan;
  prefSf = np.ones((nCons, ), dtype=np.float32) * np.nan;
  charFreq = np.ones((nCons, ), dtype=np.float32) * np.nan;
  if joint>0:
    overallNLL = np.nan;
    params = np.nan;
    success = False;
  else:
    success = np.zeros((nCons, ), dtype=np.bool_);

  ### set bounds
  if DoGmodel == 0:
    min_bw = 1/4; max_bw = 10; # ranges in octave bandwidth
    bound_baseline = (0, max_resp);
    bound_range = (0, 1.5*max_resp);
    bound_mu = (0.01, 10);
    bound_sig = (np.maximum(0.1, min_bw/(2*np.sqrt(2*np.log(2)))), max_bw/(2*np.sqrt(2*np.log(2)))); # Gaussian at half-height
    if fracSig:
      bound_sigFrac = (0.2, 2);
      allBounds = (bound_baseline, bound_range, bound_mu, bound_sig, bound_sigFrac);
    else:
      allBounds = (bound_baseline, bound_range, bound_mu, bound_sig, bound_sig);
  elif DoGmodel == 1: # SACH
    bound_gainCent = (1, 3*max_resp);
    bound_radiusCent= (1e-2, 1.5);
    bound_gainSurr = (1e-2, 1); # multiplier on gainCent, thus the center must be weaker than the surround
    bound_radiusSurr = (1, 10); # (1,10) # multiplier on radiusCent, thus the surr. radius must be larger than the center
    if joint>0:
      if joint == 1: # original joint (fixed gain and radius ratios across all contrasts)
        bound_gainRatio = (1e-3, 1); # the surround gain will always be less than the center gain
        bound_radiusRatio= (1, 10); # the surround radius will always be greater than the ctr r
        # we'll add to allBounds later, reflecting joint gain/radius ratios common across all cons
        allBounds = (bound_gainRatio, bound_radiusRatio);
      elif joint == 2: # fixed surround radius for all contrasts
        allBounds = (bound_radiusSurr, );
      elif joint == 3: # fixed center AND surround radius for all contrasts
        allBounds = (bound_radiusCent, bound_radiusSurr);
      # In advance of the thesis/publishing the LGN data, we will replicate some of Sach's key results
      # In particular, his thesis covers 4 joint models:
      # -- volume ratio: center and surround radii are fixed, but gains can vary (already covered in joint == 3)
      # -- center radius: fixed center radius across contrast (joint=4) AND fixed volume (i.e. make surround gain constant across contrast)
      # -- surround radius: fixed surround radius across contrast (joint=5) AND fixed volume (i.e. make surround gain constant across contrast) // fixed not in proportion to center, but in absolute value
      # -- center-surround: center and surround radii can vary, but ratio of gains is fixed (joint == 6)
      # ---- NOTE: joints 3-5 have 2*nCons + 2 parms; joint==6 has 3*nCons + 1
      elif joint == 4: # fixed center radius
         allBounds = (bound_radiusCent, bound_gainSurr, ); # center radius AND bound_gainSurr are fixed across condition
      elif joint == 5: # fixed surround radius (again, in absolute terms here, not relative, as is usually specified)
         allBounds = (bound_gainSurr, bound_radiusSurr, ); # surround radius AND bound_gainSurr are fixed across condition
      elif joint == 6: # fixed center:surround gain ratio
         allBounds = (bound_gainSurr, ); # we can fix the ratio by allowing the center gain to vary and keeping the surround in fixed proportion
      elif joint == 7 or joint == 8: # center radius determined by slope! we'll also fixed surround radius; if joint == 8, fixed surround gain instead of radius
         bound_xc_slope = (-1, 1); # 220505 fits inbounded; 220519 fits bounded (-1,1)
         bound_xc_inter = (None, None); #bound_radiusCent; # intercept - shouldn't start outside the bounds we choose for radiusCent
         allBounds = (bound_xc_inter, bound_xc_slope, bound_radiusSurr, ) if joint == 7 else (bound_xc_slope, bound_xc_inter, bound_gainSurr, )
    else:
      allBounds = (bound_gainCent, bound_radiusCent, bound_gainSurr, bound_radiusSurr);
  elif DoGmodel == 2:
    bound_gainCent = (1e-3, None);
    bound_freqCent = (1e-3, 2e1);
    bound_gainFracSurr = (1e-3, 2); # surround gain always less than center gain NOTE: SHOULD BE (1e-3, 1)
    bound_freqFracSurr = (5e-2, 1); # surround freq always less than ctr freq NOTE: SHOULD BE (1e-1, 1)
    if joint>0:
      if joint == 1: # original joint (fixed gain and radius ratios across all contrasts)
        bound_gainRatio = (1e-3, 3);
        bound_freqRatio = (1e-1, 1); 
        # we'll add to allBounds later, reflecting joint gain/radius ratios common across all cons
        allBounds = (bound_gainRatio, bound_freqRatio);
      elif joint == 2: # fixed surround radius for all contrasts
        allBounds = (bound_freqFracSurr,);
      elif joint == 3: # fixed center AND surround radius for all contrasts
        allBounds = (bound_freqCent, bound_freqFracSurr);
    elif joint==0:
      bound_gainFracSurr = (1e-3, 1);
      bound_freqFracSurr = (1e-1, 1);
      allBounds = (bound_gainCent, bound_freqCent, bound_gainFracSurr, bound_freqFracSurr);

  ### organize responses -- and fit, if joint=0
  allResps = []; allRespsSem = []; allSfs = []; valCons = []; start_incl = 0; incl_inds = [];
  base_rate = np.min(resps_mean.flatten());
  for con in range(nCons):
    if all_cons[con] == 0: # skip 0 contrast...
        continue;
    else:
      valCons.append(all_cons[con]);
    valSfInds_curr = np.where(~np.isnan(resps_mean[con,:]))[0];
    resps_curr = resps_mean[con, valSfInds_curr];
    sem_curr   = resps_sem[con, valSfInds_curr];

    ### prepare for the joint fitting, if that's what we've specified!
    if joint>0:
      if resps_curr.size == 0:
         continue;
      if ref_varExpl is None:
        start_incl = 1; # hacky...
      if start_incl == 0:
        if ref_varExpl[con] < veThresh:
          continue; # i.e. we're not adding this; yes we could move this up, but keep it here for now
        else:
          start_incl = 1; # now we're ready to start adding to our responses that we'll fit!

      allResps.append(resps_curr);
      allRespsSem.append(sem_curr);
      allSfs.append(all_sfs[valSfInds_curr]);
      incl_inds.append(con);
      # and add to the bounds list!
      if DoGmodel == 1:
        if joint == 1: # add the center gain and center radius for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_radiusCent);
        if joint == 2: # add the center and surr. gain and center radius for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_radiusCent, bound_gainSurr);
        if joint == 3:  # add the center and surround gain for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_gainSurr);
        elif joint == 4: # fixed center radius, so add all other parameters
          allBounds = (*allBounds, bound_gainCent, bound_radiusSurr);
        elif joint == 5: # add the center and surr. gain and center radius for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_radiusCent);
        elif joint == 6: # fixed center:surround gain ratio
          allBounds = (*allBounds, bound_gainCent, bound_radiusCent, bound_radiusSurr);
        elif joint == 7: # center radius det. by slope, surround radius fixed
          allBounds = (*allBounds, bound_gainCent, bound_gainSurr);
        elif joint == 8: # center radius det. by slope, surround gain fixed
          allBounds = (*allBounds, bound_gainCent, bound_radiusSurr);
      elif DoGmodel == 2:
        if joint == 1: # add the center gain and center radius for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_freqCent);
        if joint == 2: # add the center and surr. gain and center radius for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_freqCent, bound_gainFracSurr);
        if joint == 3:  # add the center and surround gain for each contrast 
          allBounds = (*allBounds, bound_gainCent, bound_gainFracSurr);

      continue;

    ### otherwise, we're really going to fit here! [i.e. if joint is False]
    # first, specify the objection function!
    obj = lambda params: DoG_loss(params, resps_curr, all_sfs[valSfInds_curr], resps_std=sem_curr, loss_type=loss_type, DoGmodel=DoGmodel, joint=joint); # if we're here, then joint=0, but we'll still keep joint=joint

    for n_try in range(n_repeats):
      ###########
      ### pick initial params
      ###########
      init_params = dog_init_params(resps_curr, base_rate, all_sfs, valSfInds_curr, DoGmodel, fracSig=fracSig, bounds=allBounds)

      # choose optimization method
      if np.mod(n_try, 2) == 0:
          methodStr = 'L-BFGS-B';
      else:
          methodStr = 'TNC';
          
      try:
        wax = opt.minimize(obj, init_params, method=methodStr, bounds=allBounds);
      except:
        continue; # the fit has failed (bound issue, for example); so, go back to top of loop, try again
      
      # compare
      NLL = wax['fun'];
      params = wax['x'];

      if np.isnan(bestNLL[con]) or NLL < bestNLL[con]:
        bestNLL[con] = NLL;
        currParams[con, :] = params;
        curr_mod = get_descrResp(params, all_sfs[valSfInds_curr], DoGmodel);
        # TODO: 22.05.10 --> previously ignored sf==0 case for varExpl
        varExpl[con] = var_expl_direct(resps_curr, curr_mod);
        prefSf[con] = dog_prefSf(params, dog_model=DoGmodel, all_sfs=all_sfs[all_sfs>0]); # do not include 0 c/deg SF condition
        charFreq[con] = dog_charFreq(params, DoGmodel=DoGmodel);
        success[con] = wax['success'];

  if joint==0: # then we're DONE
    return bestNLL, currParams, varExpl, prefSf, charFreq, None, None, success; # placeholding None for overallNLL, params [full list]

  ### NOW, we do the fitting if joint=True
  if joint>0:
    if len(allResps)<jointMinCons: # need at least jointMinCons contrasts!
      return bestNLL, currParams, varExpl, prefSf, charFreq, overallNLL, params, success;
    ### now, we fit!
    for n_try in range(n_repeats):
      # first, estimate the joint parameters; then we'll add the per-contrast parameters after
      # --- we'll estimate the joint parameters based on the high contrast response
      ref_resps = allResps[-1];
      ref_init = dog_init_params(ref_resps, base_rate, all_sfs, all_sfs, DoGmodel);
      if joint == 1: # gain ratio (i.e. surround gain) [0] and shape ratio (i.e. surround radius) [1] are joint
        allInitParams = [ref_init[2], ref_init[3]];
      elif joint == 2: #  surround radius [0] (as ratio) is joint
        allInitParams = [ref_init[3]];
      elif joint == 3: # center radius [0] and surround radius [1] ratio are joint
        allInitParams = [ref_init[1], ref_init[3]];
      elif joint == 4: # center radius, surr. gain fixed
        allInitParams = [ref_init[1], ref_init[2]];
      elif joint == 5: #  surround gain AND radius [0] (as ratio in 2; fixed in 5) are joint
        allInitParams = [ref_init[2], ref_init[3]];
      elif joint == 6: # center:surround gain is fixed
        allInitParams = [ref_init[2]];
      elif joint == 7 or joint == 8: # center radius offset and slope fixed; surround radius fixed [7] or surr. gain fixed [8]
        # the slope will be calculated on log contrast, and will start from the lowest contrast
        # -- i.e. xc = np.power(10, init+slope*log10(con))
        # to start, let's assume no slope, so the intercept should be equal to our xc guess
        init_intercept, init_slope = random_in_range([-1.3, -0.6])[0], random_in_range([-0.1,0.2])[0]
        #init_intercept, init_slope = np.log10(ref_init[1]), 0;
        allInitParams = [init_intercept, init_slope, ref_init[3]] if joint == 7 else [init_intercept, init_slope, ref_init[2]];

      # now, we cycle through all responses and add the per-contrast parameters
      for resps_curr in allResps:
        curr_init = dog_init_params(resps_curr, base_rate, all_sfs, all_sfs, DoGmodel);
        if joint == 1:
          allInitParams = [*allInitParams, curr_init[0], curr_init[1]];
        elif joint == 2: # then we add center gain, center radius, surround gain (i.e. params 0:3
          allInitParams = [*allInitParams, curr_init[0], curr_init[1], curr_init[2]];
        elif joint == 3: # then we add center gain and surround gain (i.e. params 0, 2)
          allInitParams = [*allInitParams, curr_init[0], curr_init[2]];
        elif joint == 4: # then we add center gain, surround radius
          allInitParams = [*allInitParams, curr_init[0], curr_init[3]];
        elif joint == 5: # then we add center gain, center radius
          allInitParams = [*allInitParams, curr_init[0], curr_init[1]];
        elif joint == 6: # then we add center gain and both radii
          allInitParams = [*allInitParams, curr_init[0], curr_init[1], curr_init[3]];
        elif joint == 7: # then we add center and surround gains
          allInitParams = [*allInitParams, curr_init[0], curr_init[2]];
        elif joint == 8: # then we add center gain, surr. radius
          allInitParams = [*allInitParams, curr_init[0], curr_init[3]];

      methodStr = 'L-BFGS-B';
      obj = lambda params: DoG_loss(params, allResps, allSfs, resps_std=allRespsSem, loss_type=loss_type, DoGmodel=DoGmodel, joint=joint, n_fits=len(allResps), conVals=valCons, ); # if joint, it's just one fit!
      wax = opt.minimize(obj, allInitParams, method=methodStr, bounds=allBounds, options={'ftol': ftol});

      # compare
      NLL = wax['fun'];
      params_curr = wax['x'];

      if np.isnan(overallNLL) or NLL < overallNLL:
        overallNLL = NLL;
        params = params_curr;
        success = wax['success'];

    ### Done with multi-start fits; now, unpack the fits to fill in the "true" parameters for each contrast
    # --- first, get the global parameters
    ref_rc_val = None;
    if joint == 1:
      gain_rat, shape_rat = params[0], params[1];
    elif joint == 2:
      surr_shape = params[0]; # radius or frequency, if Tony model
    elif joint == 3:
      center_shape, surr_shape = params[0], params[1]; # radius or frequency, if Tony model
    elif joint == 4: # center radius, surr. gain fixed
      center_shape, surr_gain = params[0], params[1];
    elif joint == 5: # surr. gain, surr. radius fixed
      surr_gain, surr_shape = params[0], params[1];
      ref_rc_val = params[2]; # center radius for high contrast
    elif joint == 6: # ctr:surr gain fixed
      surr_gain = params[0];
    elif joint == 7: # center gain det. from slope, surround radius fixed
      xc_inter, xc_slope, surr_shape = params[0:3];
    elif joint == 8: # center gain det. from slope, surround gain fixed
      xc_inter, xc_slope, surr_gain = params[0:3];
      
    for con in range(len(allResps)):
      # --- then, go through each contrast and get the "local", i.e. per-contrast, parameters
      if joint == 1: # center gain, center shape
        center_gain = params[2+con*2]; 
        center_shape = params[3+con*2]; # shape, as in radius/freq, depending on DoGmodel
        curr_params = [center_gain, center_shape, gain_rat, shape_rat];
      elif joint == 2: # center gain, center radus, surround gain
        center_gain = params[1+con*3]; 
        center_shape = params[2+con*3];
        surr_gain = params[3+con*3];
        curr_params = [center_gain, center_shape, surr_gain, surr_shape];
      elif joint == 3: # center gain, surround gain
        center_gain = params[2+con*2]; 
        surr_gain = params[3+con*2];
        curr_params = [center_gain, center_shape, surr_gain, surr_shape];
      elif joint == 4: # center radius, surr. gain fixed for all contrasts
        center_gain = params[2+con*2]; 
        surr_shape = params[3+con*2];
        curr_params = [center_gain, center_shape, surr_gain, surr_shape];
      elif joint == 5: # surround gain, radius fixed for all contrasts
        center_gain = params[2+con*2]; 
        center_shape = params[3+con*2];
        curr_params = [center_gain, center_shape, surr_gain, surr_shape];
      elif joint == 6: # ctr:surr gain fixed for all contrasts
        center_gain = params[1+con*3]; 
        center_shape = params[2+con*3];
        surr_shape = params[3+con*3];
        curr_params = [center_gain, center_shape, surr_gain, surr_shape];
      elif joint == 7 or joint == 8: # surr radius [7] or gain [8] fixed; need to determine center radius from slope
        center_gain = params[3+con*2]; 
        center_shape = get_xc_from_slope(params[0], params[1], all_cons[con]);
        if joint == 7:
          surr_gain = params[4+con*2];
        elif joint == 8:
          surr_shape = params[4+con*2];
        curr_params = [center_gain, center_shape, surr_gain, surr_shape];

      # -- then the responses, and overall contrast index
      resps_curr = allResps[con];
      sem_curr   = allRespsSem[con];

      # now, compute!
      conInd = incl_inds[con];
      bestNLL[conInd] = DoG_loss(curr_params, resps_curr, allSfs[con], resps_std=sem_curr, loss_type=loss_type, DoGmodel=DoGmodel, joint=0, ref_rc_val=ref_rc_val); # now it's NOT joint!
      currParams[conInd, :] = curr_params;
      curr_mod = get_descrResp(curr_params, allSfs[con], DoGmodel, ref_rc_val=ref_rc_val);
      varExpl[conInd] = var_expl_direct(resps_curr, curr_mod);
      prefSf[conInd] = dog_prefSf(curr_params, dog_model=DoGmodel, all_sfs=all_sfs[all_sfs>0], ref_rc_val=ref_rc_val);
      charFreq[conInd] = dog_charFreq(curr_params, DoGmodel=DoGmodel);    

    # and NOW, we can return!
    return bestNLL, currParams, varExpl, prefSf, charFreq, overallNLL, params, success;
Esempio n. 6
0
plt.style.use(
    'https://raw.githubusercontent.com/paul-levy/SF_diversity/master/Analysis/Functions/paul_plt_cluster.mplstyle'
)
plt.rc('legend', fontsize='medium')  # using a named size

which_cell = int(sys.argv[1])
fit_type = int(sys.argv[2])
norm_type = int(sys.argv[3])

if norm_type == 1:  # i.e. gaussian, not "standard asymmetry"
    if len(sys.argv) > 4:
        gs_mean = float(sys.argv[4])
        gs_std = float(sys.argv[5])
    else:
        gs_mean = helper_fcns.random_in_range([-1, 1])[0]
        gs_std = np.power(10,
                          helper_fcns.random_in_range([-2, 2])[0])
        # i.e. 1e-2, 1e2

# at CNS
# dataPath = '/arc/2.2/p1/plevy/SF_diversity/sfDiv-OriModel/sfDiv-python/altExp/recordings/';
# savePath = '/arc/2.2/p1/plevy/SF_diversity/sfDiv-OriModel/sfDiv-python/altExp/analysis/';
# personal mac
#dataPath = '/Users/paulgerald/work/sfDiversity/sfDiv-OriModel/sfDiv-python/altExp/analysis/structures/';
#save_loc = '/Users/paulgerald/work/sfDiversity/sfDiv-OriModel/sfDiv-python/altExp/analysis/figures/';
# prince cluster
dataPath = '/home/pl1465/SF_diversity/altExp/analysis/structures/'
save_loc = '/home/pl1465/SF_diversity/altExp/analysis/figures/'

if fit_type == 1:
Esempio n. 7
0
#plt.text(0.5, 0.1, 'inhibitory asymmetry: {:.3f}'.format(modFit[8]), fontsize=12, horizontalalignment='center', verticalalignment='center');

#########
# Normalization pool simulations
#########

conLevels = [1, 0.75, 0.5, 0.33, 0.1]
nCons = len(conLevels)
sfCenters = np.logspace(-2, 2, 21)
# for now
fNorm, conDisp_plots = plt.subplots(nFam, nCons, sharey=True, figsize=(45, 25))
norm_sim = np.nan * np.empty((nFam, nCons, len(sfCenters)))
if len(
        modFit
) < 9:  # if len >= 9, then either we have asymmetry parameter or we're doing gaussian (or other) normalization weighting
    modFit.append(random_in_range([-0.35, 0.35])[0])
    # enter asymmetry parameter

# simulations
for disp in range(nFam):
    for conLvl in range(nCons):
        print('simulating normResp for family ' + str(disp + 1) +
              ' and contrast ' + str(conLevels[conLvl]))
        for sfCent in range(len(sfCenters)):
            # if modFit doesn't have inhAsym parameter, add it!
            if norm_type == 2:
                unweighted = 1
                _, _, _, normRespSimple, _ = mod_resp.SFMsimulate(
                    modFit,
                    expData,
                    disp + 1,
Esempio n. 8
0
def fit_descr(cell_num, data_loc, n_repeats = 4, loss_type = 1):

    nParam = 5;
    
    if loss_type == 1:
      loss_str = '_lsq.npy';    
    elif loss_type == 2:
      loss_str = '_sqrt.npy';    
    elif loss_type == 3:
      loss_str = '_poiss.npy';

    # load cell information
    dataList = hfunc.np_smart_load(data_loc + 'dataList.npy');
    if os.path.isfile(data_loc + 'descrFits' +  loss_str):
        descrFits = hfunc.np_smart_load(data_loc + 'descrFits' + loss_str);
    else:
        descrFits = dict();
    data = hfunc.np_smart_load(data_loc + dataList['unitName'][cell_num-1] + '_sfm.npy');
    
    print('Doing the work, now');

    to_unpack = hfunc.tabulate_responses(data);
    [respMean, respVar, predMean, predVar] = to_unpack[0];
    [all_disps, all_cons, all_sfs] = to_unpack[1];
    val_con_by_disp = to_unpack[2];
    
    nDisps = len(all_disps);
    nCons = len(all_cons);

    if cell_num-1 in descrFits:
        bestNLL = descrFits[cell_num-1]['NLL'];
        currParams = descrFits[cell_num-1]['params'];
    else: # set values to NaN...
        bestNLL = np.ones((nDisps, nCons)) * np.nan;
        currParams = np.ones((nDisps, nCons, nParam)) * np.nan;
    
    for family in range(nDisps):
        for con in range(nCons):    
            
            if con not in val_con_by_disp[family]:
                continue;

            print('.');           
            # set initial parameters - a range from which we will pick!
            base_rate = hfunc.blankResp(data)[0];
            if base_rate <= 3:
                range_baseline = (0, 3);
            else:
                range_baseline = (0.5 * base_rate, 1.5 * base_rate);

            valid_sf_inds = ~np.isnan(respMean[family, :, con]);
            max_resp = np.amax(respMean[family, valid_sf_inds, con]);
            range_amp = (0.5 * max_resp, 1.5);
            
            theSfCents = all_sfs[valid_sf_inds];
            
            max_sf_index = np.argmax(respMean[family, valid_sf_inds, con]); # what sf index gives peak response?
            mu_init = theSfCents[max_sf_index];
            
            if max_sf_index == 0: # i.e. smallest SF center gives max response...
                range_mu = (mu_init/2,theSfCents[max_sf_index + 3]);
            elif max_sf_index+1 == len(theSfCents): # i.e. highest SF center is max
                range_mu = (theSfCents[max_sf_index-2], mu_init);
            else:
                range_mu = (theSfCents[max_sf_index-1], theSfCents[max_sf_index+1]); # go +-1 indices from center
                
            log_bw_lo = 0.75; # 0.75 octave bandwidth...
            log_bw_hi = 2; # 2 octave bandwidth...
            denom_lo = hfunc.bw_log_to_lin(log_bw_lo, mu_init)[0]; # get linear bandwidth
            denom_hi = hfunc.bw_log_to_lin(log_bw_hi, mu_init)[0]; # get lin. bw (cpd)
            range_denom = (denom_lo, denom_hi); # don't want 0 in sigma 
                
            # set bounds for parameters
            min_bw = 1/4; max_bw = 10; # ranges in octave bandwidth

            bound_baseline = (0, max_resp);
            bound_range = (0, 1.5*max_resp);
            bound_mu = (0.01, 10);
            bound_sig = (np.maximum(0.1, min_bw/(2*np.sqrt(2*np.log(2)))), max_bw/(2*np.sqrt(2*np.log(2)))); # Gaussian at half-height
            all_bounds = (bound_baseline, bound_range, bound_mu, bound_sig, bound_sig);

            for n_try in range(n_repeats):
                
                # pick initial params
                init_base = hfunc.random_in_range(range_baseline);
                init_amp = hfunc.random_in_range(range_amp);
                init_mu = hfunc.random_in_range(range_mu);
                init_sig_left = hfunc.random_in_range(range_denom);
                init_sig_right = hfunc.random_in_range(range_denom);
                         
                init_params = [init_base, init_amp, init_mu, init_sig_left, init_sig_right];
                         
                # choose optimization method
                if np.mod(n_try, 2) == 0:
                    methodStr = 'L-BFGS-B';
                else:
                    methodStr = 'TNC';
                
                obj = lambda params: descr_loss(params, data, family, con, loss_type);
                wax = opt.minimize(obj, init_params, method=methodStr, bounds=all_bounds);
                
                # compare
                NLL = wax['fun'];
                params = wax['x'];

                if np.isnan(bestNLL[family, con]) or NLL < bestNLL[family, con] or invalid(currParams[family, con, :], all_bounds):
                    bestNLL[family, con] = NLL;
                    currParams[family, con, :] = params;

    # update stuff - load again in case some other run has saved/made changes
    if os.path.isfile(data_loc + 'descrFits' + loss_str):
        print('reloading descrFits...');
        descrFits = hfunc.np_smart_load(data_loc + 'descrFits' + loss_str);
    if cell_num-1 not in descrFits:
      descrFits[cell_num-1] = dict();
    descrFits[cell_num-1]['NLL'] = bestNLL;
    descrFits[cell_num-1]['params'] = currParams;

    np.save(data_loc + 'descrFits' + loss_str, descrFits);
    print('saving for cell ' + str(cell_num));