コード例 #1
0
ファイル: stp.py プロジェクト: LBHB/NEMS
def stp_magnitude(tau,
                  u,
                  u2=None,
                  tau2=None,
                  urat=0.5,
                  fs=100,
                  A=0.5,
                  quick_eval=False):
    """ compute effect of stp (tau,u) on a dummy signal and computer effect magnitude
    """
    c = len(tau)
    seg = np.int(fs * 0.05)
    pred = np.concatenate([
        np.zeros([c, seg * 2]),
        np.ones([c, seg * 4]) * A / 2,
        np.zeros([c, seg * 4]),
        np.ones([c, seg]) * A,
        np.zeros([c, seg]),
        np.ones([c, seg]) * A,
        np.zeros([c, seg]),
        np.ones([c, seg]) * A,
        np.zeros([c, seg * 2])
    ],
                          axis=1)

    kwargs = {
        'data': pred,
        'name': 'pred',
        'recording': 'rec',
        'chans': ['chan' + str(n) for n in range(c)],
        'fs': fs,
        'meta': {},
    }
    pred = signal.RasterizedSignal(**kwargs)
    r = recording.Recording({'pred': pred})
    if tau2 is None:
        r = stp.short_term_plasticity(r,
                                      'pred',
                                      'pred_out',
                                      u=u,
                                      tau=tau,
                                      quick_eval=quick_eval)
    else:
        r = stp.short_term_plasticity2(r,
                                       'pred',
                                       'pred_out',
                                       u=u,
                                       tau=tau,
                                       u2=u2,
                                       tau2=tau2,
                                       urat=urat,
                                       quick_eval=quick_eval)

    pred_out = r[0]

    stp_mag = (
        np.sum(pred.as_continuous() - pred_out.as_continuous(), axis=1) /
        np.sum(pred.as_continuous()))

    return (stp_mag, pred, pred_out)
コード例 #2
0
def test_stp():

    nchans = 1
    fs = 100
    data = np.concatenate([np.zeros([1, 10]), np.ones([1, 20]),
                          np.zeros([1, 20]), np.ones([1, 5]), np.zeros([1, 5]),
                          np.ones([1, 5]), np.zeros([1, 5]),
                          np.ones([1, 5]), np.zeros([1, 10])], axis=1)

    kwargs = {
        'data': data,
        'name': 'pred',
        'recording': 'rec',
        'chans': ['chan' + str(n) for n in range(nchans)],
        'fs': fs,
        'meta': {
            'for_testing': True,
            'date': "2018-01-10",
            'animal': "Donkey Hotey",
            'windmills': 'tilting'
        },
    }
    pred = signal.RasterizedSignal(**kwargs)
    rec = recording.Recording({'pred': pred})

    u = np.array([5.0])
    tau = np.array([8.0])

    r = stp.short_term_plasticity(rec, 'pred', 'pred_out', u=u, tau=tau)
    pred_out = r[0]
コード例 #3
0
ファイル: loaders.py プロジェクト: LBHB/NEMS
def load_sadagopan(cellid='MS_u0004_f0025',
                   recname='MS_u0004',
                   stimfile=None,
                   respfile=None,
                   epochsfile=None,
                   fs=50,
                   channel_num=0,
                   **context):
    """
    example file from Sadagopan lab
    """

    if stimfile is None:
        stimfile = signals_dir / (cellid + '_stim.csv.gz')
    if respfile is None:
        respfile = signals_dir / (cellid + '_resp.csv.gz')
    if epochsfile is None:
        epochsfile = signals_dir / (cellid + '_epochs.csv')

    X = np.loadtxt(gzip.open(stimfile, mode='rb'), delimiter=",", skiprows=0)
    Y = np.loadtxt(gzip.open(respfile, mode='rb'), delimiter=",", skiprows=0)
    # get list of stimuli with start and stop times (in sec)
    epochs = pd.read_csv(epochsfile)

    # create NEMS-format recording objects from the raw data
    resp = RasterizedSignal(fs,
                            Y,
                            'resp',
                            recname,
                            chans=[cellid],
                            epochs=epochs.loc[:])
    stim = RasterizedSignal(fs, X, 'stim', recname, epochs=epochs.loc[:])

    # create the recording object from the signals
    signals = {'resp': resp, 'stim': stim}
    rec = recording.Recording(signals)

    return {'rec': rec}
コード例 #4
0
# download demo data
recording.get_demo_recordings(signals_dir)
datafile = signals_dir / 'TAR010c-18-1.pkl'

# LOAD AND FORMAT RECORDING DATA

with open(datafile, 'rb') as f:
    #cellid, recname, fs, X, Y, X_val, Y_val = pickle.load(f)
    cellid, recname, fs, X, Y, epochs = pickle.load(f)
# create NEMS-format recording objects from the raw data
resp = RasterizedSignal(fs, Y, 'resp', recname, chans=[cellid])
stim = RasterizedSignal(fs, X, 'stim', recname)

# create the recording object from the signals
signals = {'resp': resp, 'stim': stim}
est = recording.Recording(signals)

val_signals = {
    'resp': RasterizedSignal(fs, Y_val, 'resp', recname, chans=[cellid]),
    'stim': RasterizedSignal(fs, X_val, 'stim', recname)
}
val = recording.Recording(val_signals)

# INITIALIZE MODELSPEC

log.info('Initializing modelspec...')

# Method #1: create from "shorthand" keyword string
#modelspec_name = 'wc.18x1.g-fir.1x15-lvl.1'           # very simple linear model
#modelspec_name = 'wc.18x2.g-fir.2x15-lvl.1'         # another simple model
modelspec_name = 'wc.18x2.g-fir.2x15-lvl.1-dexp.1'  # constrain spectral tuning to be gaussian, add static output NL
コード例 #5
0
ファイル: vatsun_test_2.py プロジェクト: LBHB/NEMS
Y = np.loadtxt(gzip.open(respfile, mode='rb'), delimiter=",", skiprows=0)
# get list of stimuli with start and stop times (in sec)
epochs = pd.read_csv(epochsfile)

# create NEMS-format recording objects from the raw data
resp = RasterizedSignal(fs,
                        Y,
                        'resp',
                        recname,
                        chans=[cellid],
                        epochs=epochs.loc[:])
stim = RasterizedSignal(fs, X, 'stim', recname, epochs=epochs.loc[:])

# create the recording object from the signals
signals = {'resp': resp, 'stim': stim}
rec = recording.Recording(signals)

#generate est/val set_sets
#nfolds=10
#est = rec.jackknife_masks_by_time(njacks=nfolds, invert=False) #VATSUN - doesnt work
#val = rec.jackknife_masks_by_time(njacks=nfolds, invert=True)

est, val = rec.split_at_time(
    fraction=0.1)  # VATSUN: Fraction=0.1 I think specifies the validation set

# INITIALIZE MODELSPEC

log.info('Initializing modelspec...')

# Method #1: create from "shorthand" keyword string
modelspec_name = 'fir.18x10-lvl.1-dexp.1'  # "canonical" linear STRF + nonlinearity
コード例 #6
0
def test_cell_fit_cellwise():
    # gets test recording, as would be the output of CPP preprocesing
    testfile = '/home/mateo/code/context_probe_analysis/pickles/BRT037b'
    # testfile = 'C:\\Users\\Mateo\\Science\\David_lab\\code\\context_probe_analysis\\pickles\\BRT037b'  # path for blade
    rec = jl.load(testfile)

    # ToDo split into estimation validation sets
    subsets = {'est': rec, 'val': rec}

    # basic modelspec, weighted channels, linear filter and DC shift
    modelspec_name = 'wc.2x2.g-fir.2x15-lvl.1'
    modelspec_name = 'fir.2x15-lvl.1'

    # iterates over each cell, creating a new stim recording containing the response of all other cells in the population
    ntime_modspecs = list()

    for ii, cellid in enumerate(rec.meta['cellid']):

        print('working on cell {}/{}, {}'.format(ii + 1,
                                                 len(rec.meta['cellid']),
                                                 cellid))

        working_sets = dict()
        # does the recording construction for each data set
        for key, subset in subsets.items():
            stim = subset['stim'].rasterize()
            resp = subset['resp'].rasterize()

            # splits the response into the cell to be fitted and the cells to use as predictors
            this_cell_resp = np.expand_dims(resp._data[ii, :], 0)
            other_cells_resp = np.delete(resp._data, ii, axis=0)

            # build signals with modified _data
            resp = resp._modified_copy(data=this_cell_resp)

            signals = {'stim': stim, 'resp': resp}

            # makes into a recording
            mod_rec = recording.Recording(signals)

            # adds metadata, e
            this_cell_meta = rec.meta.copy()
            this_cell_meta['cellid'] = [cellid]
            mod_rec.meta = this_cell_meta

            # orders in etimation validation sets
            working_sets[key] = mod_rec

        est = working_sets['est']
        val = working_sets['val']

        # parses some data from rec meta into the analysis meta
        analysis_meta = {
            'modelspec_name': modelspec_name,
            'recording': None,
            **this_cell_meta
        }
        modelspec = nems.initializers.from_keywords(modelspec_name,
                                                    meta=analysis_meta)

        # prefits the dc_shift
        modelspec = nems.initializers.prefit_to_target(
            est,
            modelspec,
            nems.analysis.api.fit_basic,
            target_module='levelshift',
            fitter=scipy_minimize,
            fit_kwargs={'options': {
                'ftol': 1e-4,
                'maxiter': 500
            }})

        # then fit full nonlinear model
        modelspecs = nems.analysis.api.fit_basic(est,
                                                 modelspec,
                                                 fitter=scipy_minimize)

        # ----------------------------------------------------------------------------
        # GENERATE SUMMARY STATISTICS

        # generate predictions
        est, val = nems.analysis.api.generate_prediction(est, val, modelspecs)

        # evaluate prediction accuracy
        modelspecs = nems.analysis.api.standard_correlation(
            est, val, modelspecs)

        ntime_modspecs.append(modelspecs)

        print("Performance: r_fit={0:.3f} r_test={1:.3f}".format(
            modelspecs[0][0]['meta']['r_fit'][0],
            modelspecs[0][0]['meta']['r_test'][0]))

    return ntime_modspecs