import copy import numpy as np from nems.registry import KeywordRegistry from nems.plugins import default_keywords from nems.utils import find_module from nems.analysis.api import fit_basic from nems.fitters.api import scipy_minimize import nems.priors as priors import nems.modelspec as ms import nems.metrics.api as metrics from nems import get_setting log = logging.getLogger(__name__) default_kws = KeywordRegistry() default_kws.register_module(default_keywords) default_kws.register_plugins(get_setting('KEYWORD_PLUGINS')) def from_keywords(keyword_string, registry=None, rec=None, meta={}): ''' Returns a modelspec created by splitting keyword_string on underscores and replacing each keyword with what is found in the nems.keywords.defaults registry. You may provide your own keyword registry using the registry={...} argument. ''' if registry is None: registry = default_kws keywords = keyword_string.split('-') # Lookup the modelspec fragments in the registry
'githash': os.environ.get('CODEHASH', ''), 'recording': loadkey} load_keywords, model_keywords, fit_keywords = modelname.split("_") # xforms_kwargs = {'cellid': cellid, 'batch': int(batch)} xforms_kwargs = {} xforms_init_context = {'cellid': cellid, 'batch': int(batch), 'meta': meta, 'keywordstring': model_keywords} xforms_lib = KeywordRegistry(**xforms_kwargs) xforms_lib.register_modules([default_loaders, default_fitters, default_initializers]) xforms_lib.register_plugins(get_setting('XFORMS_PLUGINS')) keyword_lib = KeywordRegistry() keyword_lib.register_module(default_keywords) keyword_lib.register_plugins(get_setting('KEYWORD_PLUGINS')) # Generate the xfspec, which defines the sequence of events # to run through (like a packaged-up script) xfspec = [] # 0) set up initial context xfspec.append(['nems.xforms.init_context', xforms_init_context]) # 1) Load the data xfspec.extend(xhelp._parse_kw_string(load_keywords, xforms_lib)) # 2) generate a modelspec xfspec.append(['nems.xforms.init_from_keywords', {'registry': keyword_lib}]) #xfspec.append(['nems.xforms.init_from_keywords', {}])
def loader_registry(): loaders = KeywordRegistry(recording_uri='dummy_recording_uri') loaders.register_module(default_loaders) return loaders
def fit_xforms_model(batch, cellid, modelname, save_analysis=False): # parse modelname into loaders, modelspecs, and fit keys load_keywords, model_keywords, fit_keywords = modelname.split("_") # construct the meta data dict meta = { 'batch': batch, 'cellid': cellid, 'modelname': modelname, 'loader': load_keywords, 'fitkey': fit_keywords, 'modelspecname': model_keywords, 'username': '******', 'labgroup': 'lbhb', 'public': 1, 'githash': os.environ.get('CODEHASH', ''), 'recording': load_keywords } xforms_kwargs = {} xforms_init_context = {'cellid': cellid, 'batch': int(batch)} recording_uri = None kw_kwargs = {} xforms_lib = KeywordRegistry(**xforms_kwargs) xforms_lib.register_modules( [default_loaders, default_fitters, default_initializers]) xforms_lib.register_plugins(get_setting('XFORMS_PLUGINS')) keyword_lib = KeywordRegistry() keyword_lib.register_module(default_keywords) keyword_lib.register_plugins(get_setting('KEYWORD_PLUGINS')) # Generate the xfspec, which defines the sequence of events # to run through (like a packaged-up script) xfspec = [] # 0) set up initial context if xforms_init_context is None: xforms_init_context = {} if kw_kwargs is not None: xforms_init_context['kw_kwargs'] = kw_kwargs xforms_init_context['keywordstring'] = model_keywords xforms_init_context['meta'] = meta xfspec.append(['nems.xforms.init_context', xforms_init_context]) # 1) Load the data xfspec.extend(xhelp._parse_kw_string(load_keywords, xforms_lib)) # 2) generate a modelspec xfspec.append( ['nems.xforms.init_from_keywords', { 'registry': keyword_lib }]) # 3) fit the data xfspec.extend(xhelp._parse_kw_string(fit_keywords, xforms_lib)) # Generate a prediction xfspec.append(['nems.xforms.predict', {}]) # 4) add some performance statistics xfspec.append(['nems.xforms.add_summary_statistics', {}]) # 5) plot #xfspec.append(['nems_lbhb.lv_helpers.add_summary_statistics', {}]) # Create a log stream set to the debug level; add it as a root log handler log_stream = io.StringIO() ch = logging.StreamHandler(log_stream) ch.setLevel(logging.DEBUG) fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' formatter = logging.Formatter(fmt) ch.setFormatter(formatter) rootlogger = logging.getLogger() rootlogger.addHandler(ch) ctx = {} for xfa in xfspec: ctx = xforms.evaluate_step(xfa, ctx) # Close the log, remove the handler, and add the 'log' string to context log.info('Done (re-)evaluating xforms.') ch.close() rootlogger.removeFilter(ch) log_xf = log_stream.getvalue() modelspec = ctx['modelspec'] if save_analysis: # save results if get_setting('USE_NEMS_BAPHY_API'): prefix = 'http://' + get_setting( 'NEMS_BAPHY_API_HOST') + ":" + str( get_setting('NEMS_BAPHY_API_PORT')) + '/results/' else: prefix = get_setting('NEMS_RESULTS_DIR') if type(cellid) is list: cell_name = cellid[0].split("-")[0] else: cell_name = cellid destination = os.path.join(prefix, str(batch), cell_name, modelspec.get_longname()) modelspec.meta['modelpath'] = destination modelspec.meta.update(meta) log.info('Saving modelspec(s) to {0} ...'.format(destination)) xforms.save_analysis(destination, recording=ctx['rec'], modelspec=modelspec, xfspec=xfspec, figures=[], log=log_xf) # save performance and some other metadata in database Results table nd.update_results_table(modelspec) return xfspec, ctx
def model_registry(): models = KeywordRegistry() models.register_module(default_keywords) models.register_plugins(['tests/resources/plugin1.py']) return models
def fitter_registry(): fitters = KeywordRegistry() fitters.register_module(default_fitters) return fitters
def model_registry(): models = KeywordRegistry() models.register_module(default_keywords) return models
def generate_xforms_spec(recording_uri=None, modelname=None, meta={}, xforms_kwargs={}, kw_kwargs={}, autoPred=True, autoStats=True, autoPlot=True): """ Generate an xforms spec based on a modelname, which can then be evaluated in order to process and fit a model. Parameter --------- recording_uri : str Location to load recording from, e.g. a filepath or URL. modelname : str NEMS-formatted modelname, e.g. 'ld-sev_wc.18x2-fir.2x15-dexp.1_basic' The modelname will be parsed into a series of xforms functions using xforms and keyword registries. meta : dict Additional keyword arguments for nems.initializers.init_from_keywords xforms_kwargs : dict Additional keyword arguments for the xforms registry kw_kwargs : dict Additional keyword arguments for the keyword registry autoPred : boolean If true, will automatically append nems.xforms.predict to the xfspec if it is not already present. autoStats : boolean If true, will automatically append nems.xforms.add_summary_statistics to the xfspec if it is not already present. autoPlot : boolean If true, will automatically append nems.xforms.plot_summary to the xfspec if it is not already present. Returns ------- xfspec : list of 2- or 4- tuples """ log.info('Initializing modelspec(s) for recording/model {0}/{1}...'.format( recording_uri, modelname)) # parse modelname and assemble xfspecs for loader and fitter # TODO: naming scheme change: pre_modules, modules, post_modules? # or something along those lines... since they aren't really # just loaders and fitters load_keywords, model_keywords, fit_keywords = escaped_split(modelname, '_') if recording_uri is not None: xforms_lib = KeywordRegistry(recording_uri=recording_uri, **xforms_kwargs) else: xforms_lib = KeywordRegistry(**xforms_kwargs) xforms_lib.register_modules( [default_loaders, default_fitters, default_initializers]) xforms_lib.register_plugins(get_setting('XFORMS_PLUGINS')) keyword_lib = KeywordRegistry(**kw_kwargs) keyword_lib.register_module(default_keywords) keyword_lib.register_plugins(get_setting('KEYWORD_PLUGINS')) # Generate the xfspec, which defines the sequence of events # to run through (like a packaged-up script) xfspec = [] # 1) Load the data xfspec.extend(_parse_kw_string(load_keywords, xforms_lib)) # 2) generate a modelspec xfspec.append([ 'nems.xforms.init_from_keywords', { 'keywordstring': model_keywords, 'meta': meta, 'registry': keyword_lib } ]) # 3) fit the data xfspec.extend(_parse_kw_string(fit_keywords, xforms_lib)) # TODO: need to make this smarter about how to handle the ordering # of pred/stats when only stats is overridden. # For now just have to manually include pred if you want to # do your own stats or plot xform (like using stats.pm) # 4) generate a prediction (optional) if autoPred: if not _xform_exists(xfspec, 'nems.xforms.predict'): xfspec.append(['nems.xforms.predict', {}]) # 5) add some performance statistics (optional) if autoStats: if not _xform_exists(xfspec, 'nems.xforms.add_summary_statistics'): xfspec.append(['nems.xforms.add_summary_statistics', {}]) # 6) generate plots (optional) if autoPlot: if not _xform_exists(xfspec, 'nems.xforms.plot_summary'): # log.info('Adding summary plot to xfspec...') xfspec.append(['nems.xforms.plot_summary', {}]) return xfspec
def init_pop_pca(est, modelspec, flip_pcs=False, IsReload=False, **context): """ fit up through the fir module of a population model using the pca signal""" if IsReload: return {} # preserve input modelspec. necessary? modelspec = copy.deepcopy(modelspec) ifir = find_module('filter_bank', modelspec) iwc = find_module('weight_channels', modelspec) chan_count = modelspec[ifir]['fn_kwargs']['bank_count'] chan_per_bank = int(modelspec[iwc]['prior']['mean'][1]['mean'].shape[0] / chan_count) rec = est.copy() tmodelspec = copy.deepcopy(modelspec) kw = [m['id'] for m in modelspec[:iwc]] wc = modelspec[iwc]['id'].split(".") wcs = wc[1].split("x") wcs[1] = str(chan_per_bank) wc[1] = "x".join(wcs) wc = ".".join(wc) fir = modelspec[ifir]['id'].split(".") fircore = fir[1].split("x") fir[1] = "x".join(fircore[:-1]) fir = ".".join(fir) kw.append(wc) kw.append(fir) kw.append("lvl.1") keywordstring = "-".join(kw) keyword_lib = KeywordRegistry() keyword_lib.register_module(default_keywords) keyword_lib.register_plugins(get_setting('KEYWORD_PLUGINS')) if flip_pcs: pc_fit_count = int(np.ceil(chan_count / 2)) else: pc_fit_count = chan_count for pc_idx in range(pc_fit_count): r = rec['pca'].extract_channels([rec['pca'].chans[pc_idx]]) m = np.nanmean(r.as_continuous()) d = np.nanstd(r.as_continuous()) rec['resp'] = r._modified_copy((r._data - m) / d) tmodelspec = init.from_keywords(keyword_string=keywordstring, meta={}, registry=keyword_lib, rec=rec) tolerance = 1e-4 tmodelspec = init.prefit_LN(rec, tmodelspec, tolerance=tolerance, max_iter=700) # save results back into main modelspec itfir = find_module('fir', tmodelspec) itwc = find_module('weight_channels', tmodelspec) if pc_idx == 0: for tm, m in zip(tmodelspec[:(iwc + 1)], modelspec[:(iwc + 1)]): m['phi'] = tm['phi'].copy() modelspec[ifir]['phi'] = tmodelspec[itfir]['phi'].copy() else: for k, v in tmodelspec[iwc]['phi'].items(): modelspec[iwc]['phi'][k] = np.concatenate( (modelspec[iwc]['phi'][k], v)) for k, v in tmodelspec[itfir]['phi'].items(): #if k=='coefficients': # v/=100 # kludge modelspec[ifir]['phi'][k] = np.concatenate( (modelspec[ifir]['phi'][k], v)) if flip_pcs and (pc_idx * 2 < chan_count): # add negative flipped version of fit for k, v in tmodelspec[iwc]['phi'].items(): modelspec[iwc]['phi'][k] = np.concatenate( (modelspec[iwc]['phi'][k], v)) for k, v in tmodelspec[itfir]['phi'].items(): #if k=='coefficients': # v/=100 # kludge modelspec[ifir]['phi'][k] = np.concatenate( (-modelspec[ifir]['phi'][k], v)) respcount = est['resp'].shape[0] fit_set_all, fit_set_slice = _figure_out_mod_split(modelspec) cd_kwargs = {} cd_kwargs.update({ 'tolerance': tolerance, 'max_iter': 100, 'step_size': 0.1 }) for s in range(respcount): log.info('Pre-fit slice %d', s) modelspec = fit_population_slice(est, modelspec, slice=s, fit_set=fit_set_slice, analysis_function=analysis.fit_basic, metric=metrics.nmse, fitter=coordinate_descent, fit_kwargs=cd_kwargs) return {'modelspec': modelspec}
def generate_xforms_spec(recording_uri, modelname, meta={}, xforms_kwargs={}, kw_kwargs={}, autoPred=True, autoStats=True, autoPlot=True): """ TODO: Update this doc OUTDATED Fits a single NEMS model eg, 'ozgf100ch18_wc18x1_lvl1_fir15x1_dexp1_fit01' generates modelspec with 'wc18x1_lvl1_fir1x15_dexp1' based on fit_model function in nems/scripts/fit_model.py example xfspec: xfspec = [ ['nems.xforms.load_recordings', {'recording_uri_list': recordings}], ['nems.xforms.add_average_sig', {'signal_to_average': 'resp', 'new_signalname': 'resp', 'epoch_regex': '^STIM_'}], ['nems.xforms.split_by_occurrence_counts', {'epoch_regex': '^STIM_'}], ['nems.xforms.init_from_keywords', {'keywordstring': modelspecname}], ['nems.xforms.set_random_phi', {}], ['nems.xforms.fit_basic', {}], # ['nems.xforms.add_summary_statistics', {}], ['nems.xforms.plot_summary', {}], # ['nems.xforms.save_recordings', {'recordings': ['est', 'val']}], ['nems.xforms.fill_in_default_metadata', {}], ] """ log.info('Initializing modelspec(s) for recording/model {0}/{1}...'.format( recording_uri, modelname)) # parse modelname and assemble xfspecs for loader and fitter # TODO: naming scheme change: pre_modules, modules, post_modules? # or something along those lines... since they aren't really # just loaders and fitters load_keywords, model_keywords, fit_keywords = escaped_split(modelname, '_') xforms_lib = KeywordRegistry(recording_uri=recording_uri, **xforms_kwargs) xforms_lib.register_modules( [default_loaders, default_fitters, default_initializers]) xforms_lib.register_plugins(get_setting('XFORMS_PLUGINS')) keyword_lib = KeywordRegistry(**kw_kwargs) keyword_lib.register_module(default_keywords) keyword_lib.register_plugins(get_setting('KEYWORD_PLUGINS')) # Generate the xfspec, which defines the sequence of events # to run through (like a packaged-up script) xfspec = [] # 1) Load the data xfspec.extend(_parse_kw_string(load_keywords, xforms_lib)) # 2) generate a modelspec xfspec.append([ 'nems.xforms.init_from_keywords', { 'keywordstring': model_keywords, 'meta': meta, 'registry': keyword_lib } ]) # 3) fit the data xfspec.extend(_parse_kw_string(fit_keywords, xforms_lib)) # TODO: need to make this smarter about how to handle the ordering # of pred/stats when only stats is overridden. # For now just have to manually include pred if you want to # do your own stats or plot xform (like using stats.pm) # 4) generate a prediction (optional) if autoPred: if not _xform_exists(xfspec, 'nems.xforms.predict'): xfspec.append(['nems.xforms.predict', {}]) # 5) add some performance statistics (optional) if autoStats: if not _xform_exists(xfspec, 'nems.xforms.add_summary_statistics'): xfspec.append(['nems.xforms.add_summary_statistics', {}]) # 6) generate plots (optional) if autoPlot: if not _xform_exists(xfspec, 'nems.xforms.plot_summary'): log.info('Adding summary plot to xfspec...') xfspec.append(['nems.xforms.plot_summary', {}]) return xfspec
def keyword_registry(): keyword_lib = KeywordRegistry() keyword_lib.register_module(default_keywords) keyword_lib.register_plugins(ngs('XF_LOADER_PLUGINS')) return keyword_lib
def fitter_registry(): fitter_lib = KeywordRegistry() fitter_lib.register_module(default_fitters) fitter_lib.register_plugins(ngs('XF_FITTER_PLUGINS')) return fitter_lib
def loader_registry(): loader_lib = KeywordRegistry('dummy_recording_uri') loader_lib.register_module(default_loaders) loader_lib.register_plugins(ngs('XF_LOADER_PLUGINS')) return loader_lib