Exemplo n.º 1
0
def standard_correlation_by_epochs(est,val,modelspecs,epochs_list, rec=None):

    #Does the same thing as standard_correlation, excpet with subsets of data
    #defined by epochs_list

    #To use this, first add epochs to define subsets of data.
    #Then, pass epochs_list as a list of subsets to test.
    #For example, ['A', 'B', ['A', 'B']] will measure correlations separately
    # for all epochs marked 'A', all epochs marked 'B', and all epochs marked
    # 'A'or 'B'

    for epochs in epochs_list:
        # Create a label for this subset. If epochs is a list, join elements with "+"
        epoch_list_str="+".join([str(x) for x in epochs])

        # Make a copy for this subset
        val_copy=copy.deepcopy(val)
        for vc in val_copy:
            vc['resp']=vc['resp'].select_epochs(epochs)

        est_copy=copy.deepcopy(est)
        for ec in est_copy:
            ec['resp']=ec['resp'].select_epochs(epochs)

        # Compute scores for validation data
        r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val_copy]
        mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val_copy]
        ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val_copy]

        r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val]
        if rec is not None:
            r_ceiling = [nmet.r_ceiling(p, rec, 'pred', 'resp') for p in val_copy]

        # Repeat for est data.
        r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est_copy]
        mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est_copy]
        ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est_copy]

        #Avergage
        modelspecs[0][0]['meta'][epoch_list_str]={}
        modelspecs[0][0]['meta'][epoch_list_str]['r_test'] = np.mean(r_test)
        modelspecs[0][0]['meta'][epoch_list_str]['mse_test'] = np.mean(mse_test)
        modelspecs[0][0]['meta'][epoch_list_str]['ll_test'] = np.mean(ll_test)

        modelspecs[0][0]['meta'][epoch_list_str]['r_fit'] = np.mean(r_fit)
        modelspecs[0][0]['meta'][epoch_list_str]['r_floor'] = np.mean(r_floor)
        if rec is not None:
            modelspecs[0][0]['meta'][epoch_list_str]['r_ceiling'] = np.mean(r_ceiling)
        modelspecs[0][0]['meta'][epoch_list_str]['mse_fit'] = np.mean(mse_fit)
        modelspecs[0][0]['meta'][epoch_list_str]['ll_fit'] = np.mean(ll_fit)

    return modelspecs
Exemplo n.º 2
0
def fit_cd_nfold_shrinkage(modelspecs,
                           est,
                           ftol=1e-7,
                           maxiter=1000,
                           IsReload=False,
                           **context):
    '''
    fitting n-fold, one from each entry in est, use coordinate descent
    '''
    if not IsReload:
        fit_kwargs = {
            'tolerance': tolerance,
            'max_iter': max_iter,
            'step_size': 0.05
        }
        metric = lambda d: metrics.nmse(d, 'pred', 'resp')
        modelspecs = nems.analysis.api.fit_nfold(est,
                                                 modelspecs,
                                                 metric=metric,
                                                 fit_kwargs=fit_kwargs,
                                                 fitter=coordinate_descent)
        fit_kwargs = {
            'options': {
                'tolerance': tolerance,
                'max_iter': max_iter
            }
        }
        modelspecs = nems.analysis.api.fit_nfold(est,
                                                 modelspecs,
                                                 metric=metric,
                                                 fitter=scipy_minimize,
                                                 fit_kwargs=fit_kwargs)
    return {'modelspecs': modelspecs}
Exemplo n.º 3
0
def basic_error(data,
                modelspec,
                cost_function=None,
                segmentor=nems.segmentors.use_all_data,
                mapper=nems.fitters.mappers.simple_vector,
                metric=lambda data: nmet.nmse(data, 'pred', 'resp')):
    '''
    Similar to fit_basic except that it just returns the error for the fitting
    process instead of a modelspec. Intended to be called after a model
    has already been fit.
    '''
    modelspec = copy.deepcopy(modelspec)
    if cost_function is None:
        # Use the cost function defined in this module by default
        cost_function = basic_cost

    # apply mask to remove invalid portions of signals and allow fit to
    # only evaluate the model on the valid portion of the signals
    if 'mask' in data.signals.keys():
        log.info("Data len pre-mask: %d", data['mask'].shape[1])
        data = data.apply_mask()
        log.info("Data len post-mask: %d", data['mask'].shape[1])

    packer, unpacker, pack_bounds = mapper(modelspec)
    evaluator = nems.modelspec.evaluate
    sigma = packer(modelspec)
    error = cost_function(sigma, unpacker, modelspec, data, segmentor,
                          evaluator, metric)

    return error
Exemplo n.º 4
0
def correlation_per_model(est, val, modelspecs, rec=None):
    '''
    Expects the lengths of est, val, and modelspecs to match since est[i]
    should have been evaluated on the fitted modelspecs[i], etc.
    Similar to standard_correlation, but saves correlation information
    to every first-module 'meta' entry instead of saving an average
    to only the first modelspec
    '''
    if not len(est) == len(val) == len(modelspecs):
        raise ValueError(
            "est, val, and modelspecs should all be lists"
            " of equal length. got: %d, %d, %d respectively.", len(est),
            len(val), len(modelspecs))

    modelspecs = copy.deepcopy(modelspecs)

    r_tests = [nmet.corrcoef(v, 'pred', 'resp') for v in val]
    #se_tests = [np.std(r)/np.sqrt(len(v)) for r, v in zip(r_tests, val)]
    mse_tests = [nmet.nmse(v, 'pred', 'resp') for v in val]
    ll_tests = [nmet.likelihood_poisson(v, 'pred', 'resp') for v in val]

    r_fits = [nmet.corrcoef(e, 'pred', 'resp') for e in est]
    #se_fits = [np.std(r)/np.sqrt(len(v)) for r, v in zip(r_fits, val)]
    mse_fits = [nmet.nmse(e, 'pred', 'resp') for e in est]
    ll_fits = [nmet.likelihood_poisson(e, 'pred', 'resp') for e in est]

    r_floors = [nmet.r_floor(v, 'pred', 'resp') for v in val]
    if rec is None:
        r_ceilings = [None] * len(r_floors)
    else:
        r_ceilings = [nmet.r_ceiling(v, rec, 'pred', 'resp') for v in val]

    for i, m in enumerate(modelspecs):
        m[0]['meta'].update({
            'r_test': r_tests[i],  #'se_test': se_tests[i],
            'mse_test': mse_tests[i],
            'll_test': ll_tests[i],
            'r_fit': r_fits[i],  #'se_fit': se_fits[i],
            'mse_fit': mse_fits[i],
            'll_fit': ll_fits[i],
            'r_floor': r_floors[i],
            'r_ceiling': r_ceilings[i],
        })

    return modelspecs
Exemplo n.º 5
0
def standard_correlation(est, val, modelspecs):

    # Compute scores for validation data
    r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val]
    mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val]
    ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val]

    # Repeat for est data.
    r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est]
    mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est]
    ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est]

    modelspecs[0][0]['meta']['r_test'] = np.mean(r_test)
    modelspecs[0][0]['meta']['mse_test'] = np.mean(mse_test)
    modelspecs[0][0]['meta']['ll_test'] = np.mean(ll_test)

    modelspecs[0][0]['meta']['r_fit'] = np.mean(r_fit)
    modelspecs[0][0]['meta']['mse_fit'] = np.mean(mse_fit)
    modelspecs[0][0]['meta']['ll_fit'] = np.mean(ll_fit)

    return modelspecs
Exemplo n.º 6
0
def standard_correlation_by_set(est, val, modelspecs):

    # Compute scores for validation data
    r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val]
    mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val]
    ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val]

    # Repeat for est data.
    r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est]
    mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est]
    ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est]
    for i in range(len(modelspecs)):
        modelspecs[i][0]['meta']['r_test'] = r_test[i]
        modelspecs[i][0]['meta']['mse_test'] = mse_test[i]
        modelspecs[i][0]['meta']['ll_test'] = ll_test[i]

        modelspecs[i][0]['meta']['r_fit'] = r_fit[i]
        modelspecs[i][0]['meta']['mse_fit'] = mse_fit[i]
        modelspecs[i][0]['meta']['ll_fit'] = ll_fit[i]

    return modelspecs
Exemplo n.º 7
0
def fit_nfold(data_list, modelspecs, generate_psth=False,
              fitter=scipy_minimize, analysis='fit_basic',
              metric=None, tolerances=None, module_sets=None,
              tol_iter=100, fit_iter=20, fit_kwargs={}):
    '''
    Takes njacks jackknifes, where each jackknife has some small
    fraction of data NaN'd out, and fits modelspec to them.

    TESTING:
    if input len(modelspecs) == len(data_list) then use each
      modelspec as initial condition for corresponding data_list fold
    if len(modelspecs) == 1, then use the same initial conditions for
      each fold

    '''

    nfolds = len(data_list)
    models = []
    if metric is None:
        metric = lambda d: metrics.nmse(d, 'pred', 'resp')

    for i in range(nfolds):
        if len(modelspecs) > 1:
            msidx = i
        else:
            msidx = 0

        log.info("Fitting fold %d/%d, modelspec %d", i+1, nfolds, msidx)

        if analysis == 'fit_basic':
            models += fit_basic(data_list[i], copy.deepcopy(modelspecs[msidx]),
                                fitter=fitter,
                                metric=metric,
                                metaname='fit_nfold',
                                fit_kwargs=fit_kwargs)
        elif analysis == 'fit_iteratively':
            models += fit_iteratively(
                        data_list[i], copy.deepcopy(modelspecs[msidx]),
                        fitter=fitter, metric=metric,
                        metaname='fit_nfold', fit_kwargs=fit_kwargs,
                        module_sets=module_sets, invert=False,
                        tolerances=tolerances, tol_iter=tol_iter,
                        fit_iter=fit_iter,
                        )

        else:
            # Unknown analysis
            # TODO: use getattr / import to make this more general for
            #       use with any analysis function?
            #       Maybe too much of a pain.
            raise NotImplementedError

    return models
Exemplo n.º 8
0
def fit_basic_cd(modelspecs,
                 est,
                 max_iter=1000,
                 tolerance=1e-8,
                 IsReload=False,
                 shrinkage=0,
                 **context):
    '''
    A basic fit that optimizes every input modelspec. Use coordinate
    descent for fitting and nmse_shrink for cost function
    '''

    if not IsReload:
        if shrinkage:
            metric = lambda d: metrics.nmse_shrink(d, 'pred', 'resp')
        else:
            metric = lambda d: metrics.nmse(d, 'pred', 'resp')

        fit_kwargs = {'tolerance': tolerance, 'max_iter': max_iter}
        if type(est) is list:
            # jackknife!
            modelspecs_out = []
            njacks = len(modelspecs)
            i = 0
            for m, d in zip(modelspecs, est):
                i += 1
                log.info("Fitting JK {}/{}".format(i, njacks))
                modelspecs_out += nems.analysis.api.fit_basic(
                    d,
                    m,
                    fit_kwargs=fit_kwargs,
                    metric=metric,
                    fitter=coordinate_descent)
            modelspecs = modelspecs_out
        else:
            # standard single shot
            modelspecs = [
                nems.analysis.api.fit_basic(est,
                                            modelspec,
                                            fit_kwargs=fit_kwargs,
                                            metric=metric,
                                            fitter=coordinate_descent)[0]
                for modelspec in modelspecs
            ]

    return {'modelspecs': modelspecs}
Exemplo n.º 9
0
def fit_nfold(data_list,
              modelspecs,
              generate_psth=False,
              fitter=scipy_minimize,
              metric=None,
              fit_kwargs={'options': {
                  'ftol': 1e-7,
                  'maxiter': 1000
              }}):
    '''
    Takes njacks jackknifes, where each jackknife has some small
    fraction of data NaN'd out, and fits modelspec to them.
    '''
    nfolds = len(data_list)
    #    if type(modelspec) is not list:
    #        modelspecs=[modelspec]*nfolds
    #    elif len(modelspec)==1:
    #        modelspec=modelspec*nfolds

    models = []
    if not metric:
        metric = lambda d: metrics.nmse(d, 'pred', 'resp')

    for i in range(nfolds):
        log.info("Fitting fold {}/{}".format(i + 1, nfolds))
        tms = nems.initializers.prefit_to_target(
            data_list[i],
            copy.deepcopy(modelspecs[0]),
            nems.analysis.api.fit_basic,
            'levelshift',
            fitter=scipy_minimize,
            fit_kwargs={'options': {
                'ftol': 1e-4,
                'maxiter': 500
            }})

        models += fit_basic(data_list[i],
                            tms,
                            fitter=fitter,
                            metric=metric,
                            metaname='fit_nfold',
                            fit_kwargs=fit_kwargs)

    return models
Exemplo n.º 10
0
def init_pop_rand(est, modelspec, IsReload=False, start_count=1,
                  pc_signal='rand_resp', whiten=True, **context):
    """
    initialize population model with random combinations of responses.
    generates random response combinations and passes them through to
    init_pop_pca()
    :param est: recording object with fit data
    :param modelspec: un-fit modelspec
    :param IsReload: don't fit if IsReload=True
    :param pc_signal: name of signal to generate with random combinations of responses
    :param context: dictionary of other context variables
    :return: initialized modelspec
    """
    if IsReload:
        return {}

    # guess at number of subspace dimensions
    fit_set_all, fit_set_slice = _figure_out_mod_split(modelspec)
    dim_count = modelspec[fit_set_slice[0]]['phi']['coefficients'].shape[1]

    mset = []
    E = np.ones(start_count)
    for i in range(start_count):
        log.info('Rand init: %d/%d', i, start_count)
        rec = est.copy()
        rec[pc_signal] = _random_resp_combos(
            rec['resp'], dim_count=dim_count, whiten=whiten)

        log.info('rec signal: %s (%d x %d)', pc_signal,
                 rec[pc_signal].shape[0], rec[pc_signal].shape[1])

        mset.append(init_pop_pca(rec, modelspec, pc_signal=pc_signal, **context))
        rec = mset[-1]['modelspec'].evaluate(rec)
        E[i] = metrics.nmse(rec)

    imax = np.nanargmin(E)
    for i in range(start_count):
        ss = "**" if (i == imax) else ""
        log.info('i=%d E=%.3e %s', i, E[i], ss)

    return mset[imax]
Exemplo n.º 11
0
def fit_state_nfold(data_list,
                    modelspecs,
                    generate_psth=False,
                    fitter=scipy_minimize,
                    metric=None,
                    fit_kwargs={}):
    '''
    Generic state-dependent-stream model fitter
    Takes njacks jackknifes, where each jackknife has some small
    fraction of data NaN'd out, and fits modelspec to them.

    DEPRECATED? REPLACED BY STANDARD nfold?
    '''
    nfolds = len(data_list)

    models = []
    if not metric:
        metric = lambda d: metrics.nmse(d, 'pred', 'resp')

    for i in range(nfolds):
        log.info("Fitting fold {}/{}".format(i + 1, nfolds))
        tms = nems.initializers.prefit_to_target(
            data_list[i],
            copy.deepcopy(modelspecs[0]),
            nems.analysis.api.fit_basic,
            'merge_channels',
            fitter=scipy_minimize,
            fit_kwargs={'options': {
                'tolerance': 1e-4,
                'max_iter': 500
            }})

        models += fit_basic(data_list[i],
                            tms,
                            fitter=fitter,
                            metric=metric,
                            metaname='fit_nfold',
                            fit_kwargs=fit_kwargs)

    return models
Exemplo n.º 12
0
def standard_correlation_by_epochs(est,
                                   val,
                                   modelspec=None,
                                   modelspecs=None,
                                   epochs_list=None,
                                   rec=None):
    """
    Does the same thing as standard_correlation, excpet with subsets of data
    defined by epochs_list

    To use this, first add epochs to define subsets of data.
    Then, pass epochs_list as a list of subsets to test.
    For example, ['A', 'B', ['A', 'B']] will measure correlations separately
     for all epochs marked 'A', all epochs marked 'B', and all epochs marked
     'A'or 'B'
    """
    # some crazy stuff to maintain backward compatibility
    # eventually we will only support modelspec and deprecate support for
    # modelspecs lists
    if modelspecs is not None:
        raise Warning('Use of modelspecs list is deprecated')
        modelspec = modelspecs[0]
        list_modelspec = True
    else:
        list_modelspec = False

    for epochs in epochs_list:
        # Create a label for this subset. If epochs is a list, join elements with "+"
        epoch_list_str = "+".join([str(x) for x in epochs])

        # Make a copy for this subset
        val_copy = copy.deepcopy(val)
        for vc in val_copy:
            vc['resp'] = vc['resp'].select_epochs(epochs)

        est_copy = copy.deepcopy(est)
        for ec in est_copy:
            ec['resp'] = ec['resp'].select_epochs(epochs)

        # Compute scores for validation data
        r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val_copy]
        mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val_copy]
        ll_test = [
            nmet.likelihood_poisson(p, 'pred', 'resp') for p in val_copy
        ]

        r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val]
        if rec is not None:
            r_ceiling = [
                nmet.r_ceiling(p, rec, 'pred', 'resp') for p in val_copy
            ]

        # Repeat for est data.
        r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est_copy]
        mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est_copy]
        ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est_copy]

        #Avergage
        modelspec.meta[epoch_list_str] = {}
        modelspec.meta[epoch_list_str]['r_test'] = np.mean(r_test)
        modelspec.meta[epoch_list_str]['mse_test'] = np.mean(mse_test)
        modelspec.meta[epoch_list_str]['ll_test'] = np.mean(ll_test)

        modelspec.meta[epoch_list_str]['r_fit'] = np.mean(r_fit)
        modelspec.meta[epoch_list_str]['r_floor'] = np.mean(r_floor)
        if rec is not None:
            modelspec.meta[epoch_list_str]['r_ceiling'] = np.mean(r_ceiling)
        modelspec.meta[epoch_list_str]['mse_fit'] = np.mean(mse_fit)
        modelspec.meta[epoch_list_str]['ll_fit'] = np.mean(ll_fit)

    if list_modelspec:
        # backward compatibility
        return [modelspec]
    else:
        return modelspec
Exemplo n.º 13
0
def fit_basic(data,
              modelspec,
              fitter=scipy_minimize,
              cost_function=None,
              segmentor=nems.segmentors.use_all_data,
              mapper=nems.fitters.mappers.simple_vector,
              metric=lambda data: metrics.nmse(data, 'pred', 'resp'),
              metaname='fit_basic',
              fit_kwargs={},
              require_phi=True):
    '''
    Required Arguments:
     data          A recording object
     modelspec     A modelspec object

    Optional Arguments:
     fitter        A function of (sigma, costfn) that tests various points,
                   in fitspace (i.e. sigmas) using the cost function costfn,
                   and hopefully returns a better sigma after some time.
     mapper        A class that has two methods, pack and unpack, which define
                   the mapping between modelspecs and a fitter's fitspace.
     segmentor     An function that selects a subset of the data during the
                   fitting process. This is NOT the same as est/val data splits
     metric        A function of a Recording that returns an error value
                   that is to be minimized.

    Returns
    A list containing a single modelspec, which has the best parameters found
    by this fitter.
    '''
    start_time = time.time()

    if cost_function is None:
        # Use the cost function defined in this module by default
        cost_function = basic_cost

    if require_phi:
        # Ensure that phi exists for all modules; choose prior mean if not found
        for i, m in enumerate(modelspec):
            if not m.get('phi'):
                log.debug('Phi not found for module, using mean of prior: %s',
                          m)
                m = nems.priors.set_mean_phi([m])[0]  # Inits phi for 1 module
                modelspec[i] = m

    ms.fit_mode_on(modelspec)

    # Create the mapper object that translates to and from modelspecs.
    # It has two methods that, when defined as mathematical functions, are:
    #    .pack(modelspec) -> fitspace_point
    #    .unpack(fitspace_point) -> modelspec
    packer, unpacker = mapper(modelspec)

    # A function to evaluate the modelspec on the data
    evaluator = nems.modelspec.evaluate

    my_cost_function = cost_function
    my_cost_function.counter = 0

    # Freeze everything but sigma, since that's all the fitter should be
    # updating.
    cost_fn = partial(my_cost_function,
                      unpacker=unpacker,
                      modelspec=modelspec,
                      data=data,
                      segmentor=segmentor,
                      evaluator=evaluator,
                      metric=metric)

    # get initial sigma value representing some point in the fit space
    sigma = packer(modelspec)

    # Results should be a list of modelspecs
    # (might only be one in list, but still should be packaged as a list)
    improved_sigma = fitter(sigma, cost_fn, **fit_kwargs)
    improved_modelspec = unpacker(improved_sigma)

    elapsed_time = (time.time() - start_time)

    # TODO: Should this maybe be moved to a higher level
    # so it applies to ALL the fittters?
    ms.fit_mode_off(improved_modelspec)
    ms.set_modelspec_metadata(improved_modelspec, 'fitter', metaname)
    ms.set_modelspec_metadata(improved_modelspec, 'fit_time', elapsed_time)
    results = [copy.deepcopy(improved_modelspec)]
    return results
Exemplo n.º 14
0
def standard_correlation(est, val, modelspecs, rec=None, use_mask=True):
    # use_mask: mask before computing metrics (if mask exists)
    # Compute scores for validation dat
    r_ceiling = 0
    if type(val) is not list:
        if ('mask' in val[0].signals.keys()) and use_mask:
            v = val.apply_mask()
            e = est.apply_mask()
        else:
            v = val
            e = est

        r_test, se_test = nmet.j_corrcoef(v, 'pred', 'resp')
        r_fit, se_fit = nmet.j_corrcoef(e, 'pred', 'resp')
        r_floor = nmet.r_floor(v, 'pred', 'resp')
        if rec is not None:
            # print('running r_ceiling')
            r_ceiling = nmet.r_ceiling(v, rec, 'pred', 'resp')

        mse_test = nmet.j_nmse(v, 'pred', 'resp')
        mse_fit = nmet.j_nmse(e, 'pred', 'resp')

    elif len(val) == 1:
        if ('mask' in val[0].signals.keys()) and use_mask:
            v = val[0].apply_mask()
            e = est[0].apply_mask()
        else:
            v = val[0]
            e = est[0]

        r_test, se_test = nmet.j_corrcoef(v, 'pred', 'resp')
        r_fit, se_fit = nmet.j_corrcoef(e, 'pred', 'resp')
        r_floor = nmet.r_floor(v, 'pred', 'resp')
        if rec is not None:
            try:
                # print('running r_ceiling')
                r_ceiling = nmet.r_ceiling(v, rec, 'pred', 'resp')
            except:
                r_ceiling = 0

        mse_test, se_mse_test = nmet.j_nmse(v, 'pred', 'resp')
        mse_fit, se_mse_fit = nmet.j_nmse(e, 'pred', 'resp')

    else:
        # unclear if this ever excutes since jackknifed val sets are
        # typically already merged
        r = [nmet.corrcoef(p, 'pred', 'resp') for p in val]
        r_test = np.mean(r)
        se_test = np.std(r) / np.sqrt(len(val))
        r = [nmet.corrcoef(p, 'pred', 'resp') for p in est]
        r_fit = np.mean(r)
        se_fit = np.std(r) / np.sqrt(len(val))
        r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val]

        # TODO compute r_ceiling for multiple val sets
        r_ceiling = 0

        mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val]
        mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est]

        se_mse_test = np.std(mse_test) / np.sqrt(len(val))
        se_mse_fit = np.std(mse_fit) / np.sqrt(len(est))
        mse_test = np.mean(mse_test)
        mse_fit = np.mean(mse_fit)

    ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val]
    ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est]

    modelspecs[0][0]['meta']['r_test'] = r_test
    modelspecs[0][0]['meta']['se_test'] = se_test
    modelspecs[0][0]['meta']['r_floor'] = r_floor
    modelspecs[0][0]['meta']['mse_test'] = mse_test
    modelspecs[0][0]['meta']['se_mse_test'] = se_mse_test
    modelspecs[0][0]['meta']['ll_test'] = np.mean(ll_test)

    modelspecs[0][0]['meta']['r_fit'] = r_fit
    modelspecs[0][0]['meta']['se_fit'] = se_fit
    modelspecs[0][0]['meta']['r_ceiling'] = r_ceiling
    modelspecs[0][0]['meta']['mse_fit'] = mse_fit
    modelspecs[0][0]['meta']['se_mse_fit'] = se_mse_fit
    modelspecs[0][0]['meta']['ll_fit'] = np.mean(ll_fit)

    return modelspecs
Exemplo n.º 15
0
def fit_basic(data, modelspec,
              fitter=scipy_minimize, cost_function=None,
              segmentor=nems.segmentors.use_all_data,
              mapper=nems.fitters.mappers.simple_vector,
              metric=None,
              metaname='fit_basic', fit_kwargs={}, require_phi=True):
    '''
    Required Arguments:
     data          A recording object
     modelspec     A modelspec object

    Optional Arguments:
     fitter        A function of (sigma, costfn) that tests various points,
                   in fitspace (i.e. sigmas) using the cost function costfn,
                   and hopefully returns a better sigma after some time.
     mapper        A class that has two methods, pack and unpack, which define
                   the mapping between modelspecs and a fitter's fitspace.
     segmentor     An function that selects a subset of the data during the
                   fitting process. This is NOT the same as est/val data splits
     metric        A function of a Recording that returns an error value
                   that is to be minimized.

    Returns
    A list containing a single modelspec, which has the best parameters found
    by this fitter.
    '''
    start_time = time.time()

    modelspec = copy.deepcopy(modelspec)
    output_name = modelspec.meta.get('output_name', 'resp')

    if metric is None:
        metric = lambda data: metrics.nmse(data, 'pred', output_name)

    if cost_function is None:
        # Use the cost function defined in this module by default
        cost_function = basic_cost

    if require_phi:
        # Ensure that phi exists for all modules;
        # choose prior mean if not found
        for i, m in enumerate(modelspec.modules):
            if ('phi' not in m.keys()) and ('prior' in m.keys()):
                log.debug('Phi not found for module, using mean of prior: %s', m)
                m = nems.priors.set_mean_phi([m])[0]  # Inits phi for 1 module
                modelspec[i] = m

    # apply mask to remove invalid portions of signals and allow fit to
    # only evaluate the model on the valid portion of the signals
    if 'mask' in data.signals.keys():
        log.info("Data len pre-mask: %d", data['mask'].shape[1])
        data = data.apply_mask()
        log.info("Data len post-mask: %d", data['mask'].shape[1])

    # turn on "fit mode". currently this serves one purpose, for normalization
    # parameters to be re-fit for the output of each module that uses
    # normalization. does nothing if normalization is not being used.
    ms.fit_mode_on(modelspec, data)

    # Create the mapper functions that translates to and from modelspecs.
    # It has three functions that, when defined as mathematical functions, are:
    #    .pack(modelspec) -> fitspace_point
    #    .unpack(fitspace_point) -> modelspec
    #    .bounds(modelspec) -> fitspace_bounds
    packer, unpacker, pack_bounds = mapper(modelspec)

    # A function to evaluate the modelspec on the data
    evaluator = nems.modelspec.evaluate

    my_cost_function = cost_function
    my_cost_function.counter = 0

    # Freeze everything but sigma, since that's all the fitter should be
    # updating.
    cost_fn = partial(my_cost_function,
                      unpacker=unpacker, modelspec=modelspec,
                      data=data, segmentor=segmentor, evaluator=evaluator,
                      metric=metric)

    # get initial sigma value representing some point in the fit space,
    # and corresponding bounds for each value
    sigma = packer(modelspec)
    bounds = pack_bounds(modelspec)

    # Results should be a list of modelspecs
    # (might only be one in list, but still should be packaged as a list)
    improved_sigma = fitter(sigma, cost_fn, bounds=bounds, **fit_kwargs)
    improved_modelspec = unpacker(improved_sigma)
    elapsed_time = (time.time() - start_time)

    start_err = cost_fn(sigma)
    final_err = cost_fn(improved_sigma)
    log.info("Delta error: %.06f - %.06f = %e", start_err, final_err, final_err-start_err)

    # TODO: Should this maybe be moved to a higher level
    # so it applies to ALL the fittters?
    ms.fit_mode_off(improved_modelspec)
    ms.set_modelspec_metadata(improved_modelspec, 'fitter', metaname)
    ms.set_modelspec_metadata(improved_modelspec, 'n_parms',
                              len(improved_sigma))
    if modelspec.fit_count == 1:
        improved_modelspec.meta['fit_time'] = elapsed_time
        improved_modelspec.meta['loss'] = final_err
    else:
        fit_index = modelspec.fit_index
        if fit_index == 0:
            improved_modelspec.meta['fit_time'] = np.zeros(improved_modelspec.fit_count)
            improved_modelspec.meta['loss'] = np.zeros(improved_modelspec.fit_count)
        improved_modelspec.meta['fit_time'][fit_index] = elapsed_time
        improved_modelspec.meta['loss'][fit_index] = final_err

    if type(improved_modelspec) is list:
        return [copy.deepcopy(improved_modelspec)]
    else:
        return improved_modelspec.copy()
Exemplo n.º 16
0
 def metric(d):
     metrics.nmse(d, 'pred', 'resp')
Exemplo n.º 17
0
def standard_correlation(est, val, modelspecs, rec=None):

    # Compute scores for validation dat
    r_ceiling = 0
    if type(val) is not list:
        r_test, se_test = nmet.j_corrcoef(val, 'pred', 'resp')
        r_fit, se_fit = nmet.j_corrcoef(est, 'pred', 'resp')
        r_floor = nmet.r_floor(val, 'pred', 'resp')
        if rec is not None:
            # print('running r_ceiling')
            r_ceiling = nmet.r_ceiling(val, rec, 'pred', 'resp')

        mse_test = nmet.j_nmse(val, 'pred', 'resp')
        mse_fit = nmet.j_nmse(est, 'pred', 'resp')

    elif len(val) == 1:
        r_test, se_test = nmet.j_corrcoef(val[0], 'pred', 'resp')
        r_fit, se_fit = nmet.j_corrcoef(est[0], 'pred', 'resp')
        r_floor = nmet.r_floor(val[0], 'pred', 'resp')
        if rec is not None:
            # print('running r_ceiling')
            r_ceiling = nmet.r_ceiling(val[0], rec, 'pred', 'resp')

        mse_test, se_mse_test = nmet.j_nmse(val[0], 'pred', 'resp')
        mse_fit, se_mse_fit = nmet.j_nmse(est[0], 'pred', 'resp')

    else:
        # unclear if this ever excutes since jackknifed val sets are
        # typically already merged
        r = [nmet.corrcoef(p, 'pred', 'resp') for p in val]
        r_test = np.mean(r)
        se_test = np.std(r) / np.sqrt(len(val))
        r = [nmet.corrcoef(p, 'pred', 'resp') for p in est]
        r_fit = np.mean(r)
        se_fit = np.std(r) / np.sqrt(len(val))
        r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val]

        # TODO compute r_ceiling for multiple val sets
        r_ceiling = 0

        mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val]
        mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est]

        se_mse_test = np.std(mse_test) / np.sqrt(len(val))
        se_mse_fit = np.std(mse_fit) / np.sqrt(len(est))
        mse_test = np.mean(mse_test)
        mse_fit = np.mean(mse_fit)

    ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val]
    ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est]

    modelspecs[0][0]['meta']['r_test'] = r_test
    modelspecs[0][0]['meta']['se_test'] = se_test
    modelspecs[0][0]['meta']['r_floor'] = r_floor
    modelspecs[0][0]['meta']['mse_test'] = mse_test
    modelspecs[0][0]['meta']['se_mse_test'] = se_mse_test
    modelspecs[0][0]['meta']['ll_test'] = np.mean(ll_test)

    modelspecs[0][0]['meta']['r_fit'] = r_fit
    modelspecs[0][0]['meta']['se_fit'] = se_fit
    modelspecs[0][0]['meta']['r_ceiling'] = r_ceiling
    modelspecs[0][0]['meta']['mse_fit'] = mse_fit
    modelspecs[0][0]['meta']['se_mse_fit'] = se_mse_fit
    modelspecs[0][0]['meta']['ll_fit'] = np.mean(ll_fit)

    return modelspecs