Exemple #1
0
def _get_modelspecs(cellids, batch, modelname, multi='mean'):
    filepaths = load_batch_modelpaths(batch,
                                      modelname,
                                      cellids,
                                      eval_model=False)
    speclists = []
    for path in filepaths:
        mspaths = []
        path = path.replace('http://hyrax.ohsu.edu:3003/',
                            '/auto/data/nems_db/')
        if get_setting('NEMS_RESULTS_DIR').startswith("/Volumes"):
            path = path.replace('/auto/', '/Volumes/')
        for file in os.listdir(path):
            if file.startswith("modelspec"):
                mspaths.append(os.path.join(path, file))
        speclists.append([load_resource(p) for p in mspaths])

    modelspecs = []
    for m in speclists:
        if len(m) > 1:
            if multi == 'first':
                this_mspec = m[0]
            elif multi == 'all':
                this_mspec = m
            elif multi == 'mean':
                stats = ms.summary_stats(m)
                temp_spec = copy.deepcopy(m[0])
                phis = [m['phi'] for m in temp_spec]
                for p in phis:
                    for k in p:
                        for s in stats:
                            if s.endswith('--' + k):
                                p[k] = stats[s]['mean']
                for m, p in zip(temp_spec, phis):
                    m['phi'] = p
                this_mspec = temp_spec
            else:
                log.warning(
                    "Couldn't interpret <multi> parameter. Got: %s,\n"
                    "Expected one of: 'mean, first, random, all'.\n"
                    "Using first modelspec instead.", multi)
                this_mspec = m[0]
        else:
            this_mspec = m[0]

        modelspecs.append(ms.ModelSpec([this_mspec]))

    return modelspecs
Exemple #2
0
# 3. Invert the folding to unwrap the psth back out into a predicted spike_dict by
# simply replacing all epochs in the signal with their psth
respavg = rec['resp'].replace_epochs(per_stim_psth)
respavg.name = 'respavg'

# 4. Now add the signal to the recording
rec.add_signal(respavg)

# Now split into est and val data sets
est, val = rec.split_using_epoch_occurrence_counts(epoch_regex='^STIM_')
# est, val = rec.split_at_time(0.8)

# Load some modelspecs and create their predictions
modelspecs = ms.load_modelspecs(modelspecs_dir, 'TAR010c-18-1')#, regex=('^TAR010c-18-1\.{\d+}\.json'))
# Testing summary statistics:
means, stds = ms.summary_stats(modelspecs)
print("means: {}".format(means))
print("stds: {}".format(stds))

pred = [ms.evaluate(val, m)['pred'] for m in modelspecs]

# Shorthands for unchanging signals
stim = val['stim']
resp = val['resp']
respavg = val['respavg']


def plot_layout(plot_fn_struct):
    '''
    Accepts a list of lists of functions of 1 argument (ax).
    Basically a fancy subplot that lets you lay out functions without
Exemple #3
0
for batch, bs in zip(batches, batstring):
    modelspecs = get_batch_modelspecs(batch,
                                      modelname,
                                      multi=multi,
                                      limit=None)
    modelspecs_shf = get_batch_modelspecs(batch,
                                          modelnames[1],
                                          multi=multi,
                                          limit=None)
    modelspecs_SR = get_batch_modelspecs(batch,
                                         modelnames[0],
                                         multi=multi,
                                         limit=None)

    stats = ms.summary_stats(modelspecs,
                             mod_key=mod_key,
                             meta_include=meta,
                             stats_keys=stats_keys)
    index = list(stats.keys())
    columns = [m[0].get('meta').get('cellid') for m in modelspecs]

    midx = 0
    fields = ['bg_gain', 'fg_gain']
    b = np.array([])
    f = np.array([])
    tar_id = np.array([])
    cellids = []
    b_S = np.array([])
    f_S = np.array([])
    c = np.array([])
    cid = []
    r_test = np.array([])
Exemple #4
0
def fitted_params_per_cell(cellids,
                           batch,
                           modelname,
                           mod_key='id',
                           meta=['r_test', 'r_fit', 'se_test'],
                           multi='mean',
                           stats_keys=['mean', 'std', 'sem', 'max', 'min']):
    '''
    Valid meta keys for LBHB (not exhaustive):
        r_test, r_fit, r_ceiling, r_floor, cellid, batch ... etc
    Valid stats_keys:
        mean, std, sem, max, min
    Valid 'multi' options (for dealing with multi-modelspec fits):
        mean (default), first, all (work in progress)
    '''

    # query nems_db results to get a list of modelspecs
    # (should end up with one modelspec per cell)
    modelspecs = _get_modelspecs(cellids, batch, modelname, multi=multi)
    if multi == 'all':
        raise NotImplementedError
        # Flatten sublists of modelspecs
        modelspecs = [m for ms in modelspecs for m in ms]

    stats = ms.summary_stats(modelspecs,
                             mod_key=mod_key,
                             meta_include=meta,
                             stats_keys=stats_keys)
    index = list(stats.keys())
    try:
        columns = [m[0].get('meta').get('cellid') for m in modelspecs]
    except:
        log.warning("Couldn't use cellids from modelspecs, using cellids "
                    "from function parameters instead.")
        columns = cellids

    data = {}
    current = columns[0]
    counter = 1
    for i, c in enumerate(columns):
        # Ensure unique column names, will have duplicates if multi='all'
        # and there were multi-fit models included.
        if i == 0:
            pass
        elif c == current:
            columns[i] = '%s<%d>' % (c, counter)
            counter += 1
        else:
            current = c
            counter = 1

        for k in index:
            val = ms.try_scalar(stats[k]['values'][i])
            if c in data.keys():
                data[c].append(val)
            else:
                data[c] = [val]

    if stats_keys:
        for s in reversed(stats_keys):
            columns.insert(0, s)
            data[s] = []
            for k in index:
                data[s].append(stats[k][s])

    return pd.DataFrame(data=data, index=index, columns=columns)