Exemplo n.º 1
0
def dPrime_from_NIT_site(site, duration, source, position, meta):
    options = {
        'batch': batch,
        'siteid': site,
        'stimfmt': 'envelope',
        'rasterfs': load_fs,
        'recache': False,
        'runclass': 'NTI',
        'stim': False
    }
    load_URI = nb.baphy_load_recording_uri(**options)
    rec = recording.load_recording(load_URI)

    rec = set_recording_subepochs(rec)
    sig = rec['resp']

    # calculates response realiability and select only good cells to improve analysis
    r_vals, goodcells = signal_reliability(sig,
                                           r'\ASTIM_*',
                                           threshold=meta['reliability'])
    goodcells = goodcells.tolist()

    probe_regex = NTI_epoch_name(duration, source, position)
    cp_regex = fr'\AC(({NTI_epoch_name()})|(PreStimSilence))_P{probe_regex}\Z'

    full_rast, transitions, contexts = raster_from_sig(
        sig, cp_regex, goodcells, raster_fs=meta['raster_fs'])

    if len(contexts) < 2:
        real = shuffled = simulated = None
        print(f'only one context for {probe_regex}, skiping analysis')
    else:
        real, shuffled, simulated = nway_analysis(full_rast, meta)

    return real, shuffled, simulated, transitions, contexts
Exemplo n.º 2
0
def load(site, boxload=True, **kwargs):

    # defaults
    options = {'batch': 316,
               'cellid': site,
               'stimfmt': 'envelope',
               'rasterfs': 100,
               'recache': False,
               'runclass': 'CPN',
               'stim': False}

    options.update(**kwargs)

    if boxload is True:
        toname = options.copy()
        del toname['recache']
        filename = pl.Path(config['paths']['recording_cache']) / set_name(toname)

        if not filename.parent.exists():
            filename.parent.mkdir()

        if filename.exists() and options['recache'] is False:
            print('loading recording from box')
            loaded_rec = jl.load(filename)

        elif filename.exists() is False or options['recache'] is True:
            load_URI, _ = nb.baphy_load_recording_uri(**options)
            loaded_rec = recording.load_recording(load_URI)
            print('cacheing recoring in box')
            jl.dump(loaded_rec, filename)
        else:
            raise SystemError('WTF?')

    elif boxload is False:
        load_URI, _ = nb.baphy_load_recording_uri(**options)
        loaded_rec = recording.load_recording(load_URI)

    else:
        raise ValueError('boxload must be boolean')

    CPN_rec = cpe.set_recording_subepochs(loaded_rec)

    recordings  = split_recording(CPN_rec)
    return recordings
Exemplo n.º 3
0
# If using database:
batch = 289  # NAT + pupil
cellid = 'BRT036b-45-2'
#cellid = 'BRT037b-63-1'
cellid = 'TAR010c-13-1'
cellid = 'TAR009d-42-1'
options = {
    'rasterfs': 100,
    'stimfmt': 'ozgf',
    'chancount': 18,
    'pupil': True,
    'stim': True
}

# get the name of the cached recording
uri = nb.baphy_load_recording_uri(cellid=cellid, batch=batch, **options)
rec = load_recording(uri)

# convert to rasterized signals from PointProcess and TiledSignal
rec['resp'] = rec['resp'].rasterize()
rec['stim'] = rec['stim'].rasterize()

rec['resp'] = rec['resp'].extract_channels([cellid])
rec.meta["cellid"] = cellid

#est, val = estimation, validation data sets
est, val = rec.split_using_epoch_occurrence_counts(epoch_regex="^STIM_")
est = average_away_epoch_occurrences(est, epoch_regex="^STIM_")
val = average_away_epoch_occurrences(val, epoch_regex="^STIM_")

# get matrices for fitting:
Exemplo n.º 4
0
fn = '/auto/users/hellerc/code/projects/Cosyne2020_poster/svgs/dec_cv_example.svg'

site = 'TAR010c'
nPCs = 4
batch = 307
fs = 20
rawid = pu.which_rawids(site)
ops = {
    'batch': batch,
    'pupil': 1,
    'rasterfs': fs,
    'siteid': site,
    'stim': 0,
    'rawid': rawid
}
uri = nb.baphy_load_recording_uri(**ops)
rec = Recording.load(uri)
rec['resp'] = rec['resp'].rasterize()
rec = rec.and_mask(['HIT_TRIAL', 'MISS_TRIAL', 'PASSIVE_EXPERIMENT'])

rec = rec.and_mask(['PreStimSilence', 'PostStimSilence'], invert=True)
rec = rec.apply_mask(reset_epochs=True)

# create four state masks
rec = preproc.create_ptd_masks(rec)

# create the appropriate dictionaries of responses
# don't *think* it's so important to balanced reps for
# this analysis since it's parametric (as opposed to the
# discrimination calc which depends greatly on being balanced)
Exemplo n.º 5
0
def generate_recording_uri(cellid=None, batch=None, loadkey=None,
                           siteid=None, **options):
    """
    required parameters (passed through to nb.baphy_data_path):
        cellid: string or list
            string can be a valid cellid or siteid
            list is a list of cellids from a single(?) site
        batch: integer

    figure out filename (or eventually URI) of pre-generated
    NEMS-format recording for a given cell/batch/loader string

    very baphy-specific. Needs to be coordinated with loader processing
    in nems.xform_helper
    """

    # remove any preprocessing keywords in the loader string.
    loader = nems.utils.escaped_split(loadkey, '-')[0]
    log.info('loader=%s',loader)

    ops = loader.split(".")

    # updates some some defaults
    options.update({'rasterfs': 100, 'chancount': 0})
    load_pop_file = False

    for op in ops:
        if op=='ozgf':
            options['stimfmt'] = 'ozgf'
        elif op=='parm':
            options['stimfmt'] = 'parm'
        elif op=='env':
            options['stimfmt'] = 'envelope'
        elif op in ['nostim','psth','ns', 'evt']:
            options.update({'stim': False, 'stimfmt': 'parm'})

        elif op.startswith('fs'):
            options['rasterfs'] = int(op[2:])
        elif op.startswith('ch'):
            options['chancount'] = int(op[2:])

        elif op=='pup':
            options.update({'pupil': True, 'pupil_deblink': True,
                            'pupil_deblink_dur': 1,
                            'pupil_median': 0})
        elif op=='rem':
            options['rem'] = True

        elif 'eysp' in ops:
            options['pupil_eyespeed'] = True
        elif op.startswith('pop'):
            load_pop_file = True

    if 'stimfmt' not in options.keys():
        raise ValueError('Valid stim format (ozgf, psth, parm, env, evt) not specified in loader='+loader)
    if (options['stimfmt']=='ozgf') and (options['chancount'] <= 0):
        raise ValueError('Stim format ozgf requires chancount>0 (.chNN) in loader='+loader)

    if int(batch) == 294:
        options["runclass"] = "VOC"

    if siteid is not None:
        options['siteid'] = siteid

    options["batch"] = batch
    options["cellid"] = cellid
    if load_pop_file:
        recording_uri = pop_file(siteid=cellid, **options)
    else:
        recording_uri = nb.baphy_load_recording_uri(**options)

    return recording_uri
Exemplo n.º 6
0
ax_lab_size = 15
ax_val_size = 11

########################################################################################################################

site = 'AMT028b'  # example site
options = {
    'batch': batch,
    'siteid': site,
    'stimfmt': 'envelope',
    'rasterfs': 100,
    'recache': False,
    'runclass': 'NTI',
    'stim': False
}
load_URI = nb.baphy_load_recording_uri(**options)
rec = recording.load_recording(load_URI)

rec = set_recording_subepochs(rec)
sig = rec['resp']

# calculates response realiability and select only good cells to improve analysis
r_vals, goodcells = signal_reliability(sig,
                                       r'\ASTIM_*',
                                       threshold=meta['reliability'])
goodcells = goodcells.tolist()

######### generates an array ordering a probe and its contexts. Defines the kind of transitions for such cp combinations
# [C, P, R, U, T] final array dimesions

# information of what set of sequences pairs to use
Exemplo n.º 7
0
def kamiak_array(cellids, batch, modelnames, output_path):
    # Put in folder for today's date
    subdirectory = str(datetime.datetime.now()).split(' ')[0]
    directory_path = os.path.join(output_path, subdirectory)
    if not os.path.exists(directory_path):
        os.makedirs(directory_path, exist_ok=True)

    # Create a manifest of the recording names needed
    manifest_path = os.path.join(directory_path, 'manifest.sh')
    reverse_manifest_path = os.path.join(directory_path, 'reverse_manifest.sh')
    args_path = os.path.join(directory_path, 'jobs.txt')
    script_path = os.path.join(directory_path, 'batch.srun')
    recording_entries = []
    args_entries = []
    remote_host = "*****@*****.**"
    recordings = "/home/jacob.pennington/nems/recordings/"
    results = f"/home/jacob.pennington/nems/results/{batch}"
    remote_recordings = f"{remote_host}:{recordings}"
    remote_results = f"{remote_host}:{results}"
    scripts = "/home/jacob.pennington/slurm_scripts/"
    remote_scripts = f"{remote_host}:{scripts}"
    logs = "/home/jacob.pennington/nems_logs/"
    jobs = f"{scripts}/{subdirectory}/jobs.txt"
    failed_jobs = jobs[:-4] + '_failed.txt'

    for j, c in enumerate(cellids):
        # TODO: Don't hardcode the loader options, parse from modelname
        options = {
            'cellid': c,
            'batch': batch,
            'stim': 1,
            'stimfmt': 'ozgf',
            'chancount': 18,
            'rasterfs': 100
        }
        # Record unique recording URIs (may be shared for c with same siteid)
        recording_uri = nb.baphy_load_recording_uri(**options)
        if recording_uri not in recording_entries:
            recording_entries.append(recording_uri)
            remote_rec = f'{recordings}/{recording_uri.split("/")[-1]}'

        for i, m in enumerate(modelnames):

            args = f"{c} {batch} {m} {remote_rec} {recording_uri}"
            args_entries.append(args)

    script_contents = (
        "#!/bin/bash\n"
        "#SBATCH --partition=kamiak\n"
        "#SBATCH --job-name=NEMS\n"
        f"#SBATCH --array=1-{len(args_entries)}\n"
        f"#SBATCH --output={logs}/{subdirectory}/NEMS.%A_%a.out\n"
        f"#SBATCH --error={logs}/{subdirectory}/NEMS.%A_%a.err\n"
        "#SBATCH --time=1-23:59:00\n"
        "#SBATCH --nodes=1\n"
        "#SBATCH --ntasks-per-node=1\n"
        "\n"
        f"job=$(sed \"${{SLURM_ARRAY_TASK_ID}}q;d\" {jobs})\n"
        "args=($job)\n"
        "\n"
        "# Call cleanup function on cancelled jobs\n"
        "function clean_up {\n"
        "    echo \"${args[0]} ${args[1]} ${args[2]}"
        f"    ${{args[3]}} ${{args[4]}}\" >> {failed_jobs}\n"
        "}\n"
        "# trap termination signals\n"
        "trap 'clean_up' SIGINT SIGTERM\n"
        "python3 /home/jacob.pennington/nems_scripts/fit_xforms.py "
        "${args[0]} ${args[1]} ${args[2]} ${args[3]} ${args[4]}\n"
        "echo \"task ${SLURM_ARRAY_TASK_ID} complete\"")

    # Write recording_uri list to manifest

    # Copy recording files in separate scp commands since
    # a full batch might become very large for a single copy
    manifest_lines = [
        f'rsync -avx --ignore-existing {e} {remote_recordings}/{e.split("/")[-1]}'
        for e in recording_entries
    ]
    # But all scripts can be copied over in one command
    manifest_lines.append(f'scp {args_path} {remote_scripts}/{subdirectory}/')
    manifest_lines.append(
        f'scp {script_path} {remote_scripts}/{subdirectory}/')
    manifest_lines.insert(
        0, f'ssh {remote_host} "mkdir -p {scripts}/{subdirectory}"')
    manifest_lines.insert(
        0, f'ssh {remote_host} "mkdir -p {logs}/{subdirectory}"')
    manifest_lines.insert(0, '#!/bin/bash')
    manifest_contents = '\n'.join([e for e in manifest_lines])

    reverse_manifest_lines = [f'rsync -avx {remote_results} $1']
    reverse_manifest_lines.insert(0, '#!/bin/bash')
    reverse_manifest_contents = '\n'.join([r for r in reverse_manifest_lines])

    args_contents = '\n'.join([a for a in args_entries])

    with open(manifest_path, 'w+') as manifest:
        manifest.write(manifest_contents)
    os.chmod(manifest_path, 0o777)  # open up permissions for other users
    st = os.stat(manifest_path)
    os.chmod(manifest_path, st.st_mode | stat.S_IEXEC)  # make it executable

    with open(reverse_manifest_path, 'w+') as reverse_manifest:
        reverse_manifest.write(reverse_manifest_contents)
    os.chmod(reverse_manifest_path, 0o777)
    rst = os.stat(reverse_manifest_path)
    os.chmod(reverse_manifest_path, rst.st_mode | stat.S_IEXEC)

    with open(args_path, 'w+') as args:
        args.write(args_contents)
    with open(script_path, 'w+') as script:
        script.write(script_contents)
Exemplo n.º 8
0
def kamiak_batch(cellids, batch, modelnames, output_path):
    # Put in folder for today's date
    subdirectory = str(datetime.datetime.now()).split(' ')[0]
    directory_path = os.path.join(output_path, subdirectory)
    if not os.path.exists(directory_path):
        os.makedirs(directory_path, exist_ok=True)

    # Create a manifest of the recording names needed
    manifest_path = os.path.join(directory_path, 'manifest.sh')
    recording_entries = []
    script_entries = []
    remote_host = "*****@*****.**"
    recordings = "/home/jacob.pennington/nems/recordings/"
    remote_recordings = f"{remote_host}:{recordings}"
    scripts = "/home/jacob.pennington/slurm_scripts/"
    remote_scripts = f"{remote_host}:{scripts}"

    for j, c in enumerate(cellids):
        # TODO: Don't hardcode the loader options, parse from modelname
        options = {
            'cellid': c,
            'batch': batch,
            'stim': 1,
            'stimfmt': 'ozgf',
            'chancount': 18,
            'rasterfs': 100
        }
        # Record unique recording URIs (may be shared for c with same siteid)
        recording_uri = nb.baphy_load_recording_uri(**options)
        if recording_uri not in recording_entries:
            recording_entries.append(recording_uri)
            remote_rec = f'{recordings}/{recording_uri.split("/")[-1]}'

        for i, m in enumerate(modelnames):
            name = f'{m}__{batch}__{c}'
            filename = f'{name}.srun'
            jobname = f'NEMS_model{i}_cell{j}'
            full_path = os.path.join(directory_path, filename)

            contents = (
                "#!/bin/bash\n"
                "#SBATCH --partition=kamiak\n"
                f"#SBATCH --job-name={jobname}\n"
                f"#SBATCH --output=/home/jacob.pennington/nems_logs/{subdirectory}/{name}.out\n"
                f"#SBATCH --error=/home/jacob.pennington/nems_logs/{subdirectory}/{name}.err\n"
                "#SBATCH --time=1-23:59:00\n"
                "#SBATCH --nodes=1\n"
                "#SBATCH --ntasks-per-node=1\n"
                "python3 /home/jacob.pennington/nems_scripts/fit_xforms.py "
                f"'{c}' '{batch}' '{m}' '{remote_rec}' '{recording_uri}'")

            with open(full_path, 'w+') as script:
                script.write(contents)
            script_entries.append(full_path)

    # Write recording_uri list to manifest

    # Copy recording files in separate scp commands since
    # a full batch might become very large for a single copy
    manifest_lines = [
        f'scp {e} {remote_recordings}/{e.split("/")[-1]}'
        for e in recording_entries
    ]
    # But all scripts can be copied over in one command
    all_scripts = ' '.join(script_entries)
    manifest_lines += [f'scp {all_scripts} {remote_scripts}/{subdirectory}/']
    manifest_lines.insert(
        0, f'ssh {remote_host} "mkdir -p {scripts}/{subdirectory}"')
    manifest_lines.insert(0, '#!/bin/bash')
    manifest_contents = '\n'.join([str(e) for e in manifest_lines])
    with open(manifest_path, 'w+') as manifest:
        manifest.write(manifest_contents)
    st = os.stat(manifest_path)
    os.chmod(manifest_path, st.st_mode | stat.S_IEXEC)  # make it executable
Exemplo n.º 9
0
def plot_collapsed_ref_tar(animal, site, cellids=None):
    site += "%"

    sql = "SELECT DISTINCT cellid, rawid, respfile FROM sCellFile WHERE cellid like %s AND runclassid=%s"
    d = nd.pd_query(sql, params=(
        site,
        42,
    ))

    mfile = []
    for f in np.unique(d.respfile):
        f_ = f.split('.')[0]
        mfile.append('/auto/data/daq/{0}/{1}/{2}'.format(
            animal, site[:-2], f_))

    if cellids is None:
        cellid = np.unique(d.cellid).tolist()
    else:
        cellid = cellids
    options = {
        "siteid": site[:-1],
        'cellid': cellids,
        "mfilename": mfile,
        'stim': False,
        'pupil': True,
        'rasterfs': 1000
    }

    uri = nb.baphy_load_recording_uri(**options)
    rec = Recording.load(uri)
    all_pupil = rec['pupil']._data
    ncols = len(mfile)
    if cellids is None:
        cellids = rec['resp'].chans

    for c in cellids:
        f, ax = plt.subplots(1, ncols, sharey=True, figsize=(12, 5))
        ref_base = 0
        for i, mf in enumerate(mfile):
            fn = mf.split('/')[-1]
            ep_mask = [ep for ep in rec.epochs.name if fn in ep]
            R = rec.copy()
            R['resp'] = R['resp'].rasterize(fs=20)
            R['resp'].fs = 20
            R = R.and_mask(ep_mask).apply_mask(reset_epochs=True)
            if '_a_' in fn:
                R = R.and_mask(['HIT_TRIAL']).apply_mask(reset_epochs=True)

            resp = R['resp'].extract_channels([c])

            tar_reps = resp.extract_epoch('TARGET').shape[0]
            tar_m = np.nanmean(resp.extract_epoch('TARGET'),
                               0).squeeze() * R['resp'].fs
            tar_sem = R['resp'].fs * np.nanstd(resp.extract_epoch('TARGET'),
                                               0).squeeze() / np.sqrt(tar_reps)

            ref_reps = resp.extract_epoch('REFERENCE').shape[0]
            ref_m = np.nanmean(resp.extract_epoch('REFERENCE'),
                               0).squeeze() * R['resp'].fs
            ref_sem = R['resp'].fs * np.nanstd(resp.extract_epoch('REFERENCE'),
                                               0).squeeze() / np.sqrt(ref_reps)

            # plot psth's
            time = np.linspace(0, len(tar_m) / R['resp'].fs, len(tar_m))
            ax[i].plot(time, tar_m, color='red', lw=2)
            ax[i].fill_between(time,
                               tar_m + tar_sem,
                               tar_m - tar_sem,
                               color='coral')
            time = np.linspace(0, len(ref_m) / R['resp'].fs, len(ref_m))
            ax[i].plot(time, ref_m, color='blue', lw=2)
            ax[i].fill_between(time,
                               ref_m + ref_sem,
                               ref_m - ref_sem,
                               color='lightblue')

            # set title
            ax[i].set_title(fn, fontsize=8)
            # set labels
            ax[i].set_xlabel('Time (s)')
            ax[i].set_ylabel('Spk / sec')

            # get raster plot baseline
            base = np.max(np.concatenate((tar_m + tar_sem, ref_m + ref_sem)))
            if base > ref_base:
                ref_base = base

        for i, mf in enumerate(mfile):
            # plot the rasters
            fn = mf.split('/')[-1]
            ep_mask = [ep for ep in rec.epochs.name if fn in ep]
            rast_rec = rec.and_mask(ep_mask).apply_mask(reset_epochs=True)
            if '_a_' in fn:
                rast_rec = rast_rec.and_mask(['HIT_TRIAL'
                                              ]).apply_mask(reset_epochs=True)
            rast = rast_rec['resp'].extract_channels([c])

            ref_times = np.where(rast.extract_epoch('REFERENCE').squeeze())
            base = ref_base
            ref_pupil = np.nanmean(
                rast_rec['pupil'].extract_epoch('REFERENCE'), -1)
            xoffset = rast.extract_epoch(
                'TARGET').shape[-1] / rec['resp'].fs + 0.01
            ax[i].plot(ref_pupil / np.max(all_pupil) + xoffset,
                       np.linspace(base, int(base * 2), len(ref_pupil)),
                       color='k')
            ax[i].axvline(xoffset + np.median(all_pupil / np.max(all_pupil)),
                          linestyle='--',
                          color='lightgray')
            #import pdb; pdb.set_trace()
            if ref_times[0].size != 0:
                max_rep = ref_pupil.shape[0] - 1
                ref_locs = ref_times[0] * (base / max_rep)
                ref_locs = ref_locs + base
                ax[i].plot(ref_times[1] / rec['resp'].fs,
                           ref_locs,
                           '|',
                           color='blue',
                           markersize=1)

            tar_times = np.where(rast.extract_epoch('TARGET').squeeze())
            tar_pupil = np.nanmean(rast_rec['pupil'].extract_epoch('TARGET'),
                                   -1)
            tar_base = np.max(ref_locs) + 1
            ax[i].plot(tar_pupil / np.max(all_pupil) + xoffset,
                       np.linspace(tar_base, tar_base + base, len(tar_pupil)),
                       color='k')
            if tar_times[0].size != 0:
                max_rep = tar_pupil.shape[0] - 1
                tar_locs = tar_times[0] * (base / max_rep)
                tar_locs = tar_locs + tar_base
                ax[i].plot(tar_times[1] / rec['resp'].fs,
                           tar_locs,
                           '|',
                           color='red',
                           markersize=1)

            # set ylim
            #ax[i].set_ylim((0, top))

            # set plot aspect
            #asp = np.diff(ax[i].get_xlim())[0] / np.diff(ax[i].get_ylim())[0]
            #ax[i].set_aspect(asp / 2)

        f.suptitle(c, fontsize=8)
        f.tight_layout()
Exemplo n.º 10
0
def generate_recording_uri(cellid=None,
                           batch=None,
                           loadkey="",
                           siteid=None,
                           force_old_loader=False,
                           **options):
    """
    required parameters (passed through to nb.baphy_data_path):
        cellid: string or list
            string can be a valid cellid or siteid
            list is a list of cellids from a single(?) site
        batch: integer

    figure out filename (or eventually URI) of pre-generated
    NEMS-format recording for a given cell/batch/loader string

    very baphy-specific. Needs to be coordinated with loader processing
    in nems.xform_helper
    """
    # remove any preprocessing keywords in the loader string.
    if '-' in loadkey:
        loader = nems.utils.escaped_split(loadkey, '-')[0]
    else:
        loader = loadkey
    if loader != '':
        log.info('loader=%s', loader)

    options = baphy_io.parse_loadkey(loadkey=loader,
                                     batch=batch,
                                     siteid=siteid,
                                     cellid=cellid,
                                     **options)
    """ moved functionality to baphy_io
    ops = loader.split(".")

    # updates some some defaults
    options['rasterfs'] = options.get('rasterfs', 100)
    options['chancount'] = options.get('chancount', 0)
    load_pop_file = False

    for op in ops:
        if op=='ozgf':
            options['stimfmt'] = 'ozgf'
        elif op=='parm':
            options['stimfmt'] = 'parm'
        elif op=='ll':
            options['stimfmt'] = 'll'
        elif op=='env':
            options['stimfmt'] = 'envelope'
        elif op in ['nostim','psth','ns', 'evt']:
            options.update({'stim': False, 'stimfmt': 'parm'})

        elif op.startswith('fs'):
            options['rasterfs'] = int(op[2:])
        elif op.startswith('ch'):
            options['chancount'] = int(op[2:])

        elif op.startswith('fmap'):
            options['facemap'] = int(op[4:])

        elif op=='pup':
            options.update({'pupil': True, 'rem': 1})
            #options.update({'pupil': True, 'pupil_deblink': True,
            #                'pupil_deblink_dur': 1,
            #                'pupil_median': 0, 'rem': 1})
        elif op=='dlc':
            options.update({'dlc': True})

        elif op=='rem':
            options['rem'] = True

        elif 'eysp' in ops:
            options['pupil_eyespeed'] = True
        elif op.startswith('pop'):
            load_pop_file = True
        elif op == 'voc':
            options.update({'runclass': 'VOC'})
    if 'stimfmt' not in options.keys():
        raise ValueError('Valid stim format (ozgf, psth, parm, env, evt) not specified in loader='+loader)
    if (options['stimfmt']=='ozgf') and (options['chancount'] <= 0):
        raise ValueError('Stim format ozgf requires chancount>0 (.chNN) in loader='+loader)

    if int(batch) in [263,294]:
        options["runclass"] = "VOC"

    if siteid is not None:
        options['siteid'] = siteid

    options["batch"] = batch
    options["cellid"] = cellid
    """
    ops = loader.split(".")
    load_pop_file = ("pop" in ops)

    if load_pop_file:
        recording_uri = pop_file(siteid=cellid, **options)
    elif force_old_loader:  #  | (batch==316):
        log.info("Using 'old' baphy.py loader")
        recording_uri, _ = nb.baphy_load_recording_uri(**options)
    else:
        manager = BAPHYExperiment(batch=batch, cellid=cellid)
        recording_uri = manager.get_recording_uri(**options)

    return recording_uri
Exemplo n.º 11
0
#site = 'ley070a' # good site. A1
#site = 'AMT030a' # low responses, Ok but not as good
#site = 'ley072b' # Primary looking responses with strong contextual effects

modelname = 'resp'
options = {
    'batch': 316,
    'siteid': site,
    'stimfmt': 'envelope',
    'rasterfs': 100,
    'recache': False,
    'runclass': 'CPN',
    'stim': False
}  #ToDo chace stims, spectrograms???

load_URI = nb.baphy_load_recording_uri(**options)
loaded_rec = recording.load_recording(load_URI)

##########

rec = loaded_rec
dir(rec)
rec.signals

start = 15
stop = 986

signal = rec['resp']
data = signal.rasterize()._data
data.shape
data[2, 1:1000:1]