Example #1
0
def test_configure_model():
    p = re.compile("(\d+)(.*?)_(.*)_mlin_model_config.json")

    if not os.path.exists(OUT_DIR):
        os.makedirs(OUT_DIR)

    for mcf in MODEL_CONFIG_FILES:
        fname = os.path.basename(mcf)
        m = p.match(fname)
        
        sid, cre, config =  m.groups()

        method_config_file = os.path.join(METHOD_CONFIG_DIR, "%s.json" % config)
        prep_file = os.path.join(PREP_DIR, "%s_preprocessed_dict.json" % sid)
        out_file = os.path.join(OUT_DIR, "%s_%s_model_config.json" % (sid, config))

        if os.path.exists(prep_file):
            print "testing", prep_file
            out_config = configure_model(ju.read(method_config_file), 
                                         ju.read(prep_file))

            ju.write(out_file, out_config)

            test_config = ju.read(mcf)

            cmpdict(test_config['neuron'], out_config['neuron'])
            try:
                cmpdict(test_config['optimizer'], out_config['optimizer'])
            except ComparisonException, e:
                if e.key != "param_fit_names":
                    raise e
                

        else:
            logging.error("preprocessor file %s does not exist" % prep_file)
Example #2
0
def main():
    input_data = ju.read(INPUT_JSON)

    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # experiments to exclude
    experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON)

    # load hyperparameter dict
    suffix = 'high_res' if HIGH_RES else 'standard'

    # get caching object
    cache = VoxelModelCache(manifest_file=manifest_file)

    fit_kwargs = dict(high_res=HIGH_RES,
                      threshold_injection=THRESHOLD_INJECTION,
                      experiments_exclude=experiments_exclude)
    model = fit(cache, **fit_kwargs)

    # write results
    logging.debug('saving')
    output_file = os.path.join(OUTPUT_DIR, 'homogeneous-%s-model.csv' % suffix)
    model.to_csv(output_file)
Example #3
0
def main():
    input_data = ju.read(INPUT_JSON)

    structures = input_data.get('structures')

    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # experiments to exclude
    experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON)

    # get caching object
    cache = VoxelModelCache(manifest_file=manifest_file)

    # get full map
    full_map = get_full_map(cache, structures, experiments_exclude)

    # convert to df
    df = get_df(full_map, structures)

    # save
    df.to_csv(OUTPUT_FILE)
Example #4
0
def main():
    module = ags.ArgSchemaParser(schema_type=ModelSelectionParameters)

    swc_path = module.args["paths"]["swc"]
    fit_style_paths = module.args["paths"]["fit_styles"]
    best_fit_json_path = module.args["paths"]["best_fit_json_path"]
    passive = ju.read(module.args["paths"]["passive_results"])
    preprocess = ju.read(module.args["paths"]["preprocess_results"])

    fits = module.args["paths"]["fits"]
    fit_results = ms.fit_info(fits)
    best_fit = ms.select_model(fit_results, module.args["paths"], passive,
                               preprocess["v_baseline"],
                               module.args["noise_1_sweeps"],
                               module.args["noise_2_sweeps"])
    if best_fit is None:
        raise Exception("Failed to find acceptable optimized model")

    logging.info("building fit data")
    fit_style_data = ju.read(
        module.args["paths"]["fit_styles"][best_fit["fit_type"]])
    fit_data = ms.build_fit_data(best_fit["params"], passive, preprocess,
                                 fit_style_data)

    logging.info("writing fit data: %s", best_fit_json_path)
    ju.write(best_fit_json_path, fit_data)

    output = {
        "paths": {
            "fit_json": best_fit_json_path,
        }
    }

    logging.info("writing output json: %s", module.args["output_json"])
    ju.write(module.args["output_json"], output)
Example #5
0
def main():
    module = ags.ArgSchemaParser(schema_type=OptimizeParameters)

    preprocess_results = ju.read(module.args["paths"]["preprocess_results"])
    passive_results = ju.read(module.args["paths"]["passive_results"])
    fit_style_data = ju.read(module.args["paths"]["fit_style"])

    results = optimize(
        hoc_files=module.args["paths"]["hoc_files"],
        compiled_mod_library=module.args["paths"]["compiled_mod_library"],
        morphology_path=module.args["paths"]["swc"],
        features=preprocess_results["features"],
        targets=preprocess_results["target_features"],
        stim_params=StimParams(preprocess_results["stimulus"]),
        passive_results=passive_results,
        fit_type=module.args["fit_type"],
        fit_style_data=fit_style_data,
        seed=module.args["seed"],
        ngen=module.args["ngen"],
        mu=module.args["mu"],
        storage_directory=module.args["paths"]["storage_directory"],
        starting_population=module.args["paths"].get("starting_population",
                                                     None))

    logging.info("Writing optimization output")
    ju.write(module.args["output_json"], results)
Example #6
0
def main():
    input_data = ju.read(INPUT_JSON)

    structures = input_data.get('structures')
    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # experiments to exclude
    experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON)

    # get caching object
    cache = VoxelModelCache(manifest_file=manifest_file)

    output_file = os.path.join(OUTPUT_DIR, 'hyperparameters-%s.json' % OPTION)

    results = dict()
    for structure in structures:
        logging.debug("Running cross validation for structure: %s", structure)
        structure_id = get_structure_id(cache, structure)

        results[structure] = fit_structure(cache,
                                           structure_id,
                                           experiments_exclude,
                                           kernel=KERNEL,
                                           model_option=OPTION)

    # write results
    ju.write(output_file, results)
def generate_pipeline_input(cell_dir=None,
                            specimen_id=None,
                            input_nwb_file=None,
                            plot_figures=False,
                            qc_fig_dirname="qc_figs",
                            qc_criteria_file=None,
                            stimulus_ontology_file=None):

    se_input = generate_se_input(cell_dir,
                                 specimen_id=specimen_id,
                                 input_nwb_file=input_nwb_file)
    pipe_input = dict(se_input)

    if specimen_id:
        pipe_input['manual_sweep_states'] = lq.get_sweep_states(specimen_id)

    elif input_nwb_file:
        pipe_input['manual_sweep_states'] = []

    if plot_figures:
        pipe_input['qc_fig_dir'] = os.path.join(cell_dir, qc_fig_dirname)

    pipe_input['output_nwb_file'] = os.path.join(cell_dir, "output.nwb")
    pipe_input['stimulus_ontology_file'] = stimulus_ontology_file

    if qc_criteria_file is not None:
        pipe_input['qc_criteria'] = ju.read(qc_criteria_file)
    else:
        pipe_input['qc_criteria'] = ju.read(qcp.DEFAULT_QC_CRITERIA_FILE)

    return pipe_input
Example #8
0
def validate_se(test_output_json="test/sweep_extraction_output.json"):
    print("**** SWEEP EXTRACTION")
    pipeline_output_json = "/allen/programs/celltypes/production/humancelltypes/prod242/Ephys_Roi_Result_642966460/EPHYS_NWB_STIMULUS_SUMMARY_QUEUE_642966460_output.json"

    pipeline_output = ju.read(pipeline_output_json)
    test_output = ju.read(test_output_json)

    sweep_features = [
        "stimulus_interval", "post_vm_mv", "pre_vm_mv", "stimulus_duration",
        "stimulus_start_time", "sweep_number", "vm_delta_mv", "leak_pa",
        "pre_noise_rms_mv", "slow_noise_rms_mv", "post_noise_rms_mv",
        "slow_vm_mv", "stimulus_amplitude", "stimulus_units",
        "bridge_balance_mohm"
    ]

    test_sweeps = {s['sweep_number']: s for s in test_output['sweep_data']}
    for d1 in pipeline_output['sweep_data'].values():
        try:
            d2 = test_sweeps[d1['sweep_number']]
            validate_feature_set(sweep_features, d1, d2)
        except KeyError as e:
            print(e)

    other_sweep_features = [
        "stimulus_name", "clamp_mode", "stimulus_scale_factor", "stimulus_code"
    ]
Example #9
0
def test_simulate():
    logging.getLogger().setLevel(logging.DEBUG)
    neuron_config = json_utilities.read(os.path.join(OUTPUT_DIR, '%d_neuron_config.json' % NEURONAL_MODEL_ID))
    ephys_sweeps = json_utilities.read(os.path.join(OUTPUT_DIR, 'ephys_sweeps.json'))
    ephys_file_name = os.path.join(OUTPUT_DIR, '%d.nwb' % NEURONAL_MODEL_ID)

    neuron = GlifNeuron.from_dict(neuron_config)

    sweep_numbers = [ s['sweep_number'] for s in ephys_sweeps ]
    simulate_neuron(neuron, sweep_numbers, ephys_file_name, ephys_file_name, 0.05)
Example #10
0
def validate_fx(test_output_json="test/fx_output.json"):
    print("**** FX")
    pipeline_output_json = "/allen/programs/celltypes/production/humancelltypes/prod242/Ephys_Roi_Result_642966460/EPHYS_FEATURE_EXTRACTION_V2_QUEUE_642966460_output.json"

    pipeline_output = ju.read(pipeline_output_json)
    test_output = ju.read(test_output_json)

    validate_cell_features(
        pipeline_output, pipeline_output['specimens'][0]['ephys_features'][0],
        test_output['cell_record'])
Example #11
0
def main():
    args = parse_arguments()

    logging.getLogger().setLevel(args.log_level)

    glif_api = None
    if (args.neuron_config_file is None or args.sweeps_file is None
            or args.ephys_file is None):

        assert args.neuronal_model_id is not None, Exception(
            "A neuronal model id is required if no neuron config file, sweeps file, or ephys data file is provided."
        )

        glif_api = GlifApi()
        glif_api.get_neuronal_model(args.neuronal_model_id)

    if args.neuron_config_file:
        neuron_config = json_utilities.read(args.neuron_config_file)
    else:
        neuron_config = glif_api.get_neuron_config()

    if args.sweeps_file:
        sweeps = json_utilities.read(args.sweeps_file)
    else:
        sweeps = glif_api.get_ephys_sweeps()

    if args.ephys_file:
        ephys_file = args.ephys_file
    else:
        ephys_file = 'stimulus_%d.nwb' % args.neuronal_model_id

        if not os.path.exists(ephys_file):
            logging.info("Downloading stimulus to %s." % ephys_file)
            glif_api.cache_stimulus_file(ephys_file)
        else:
            logging.warning("Reusing %s because it already exists." %
                            ephys_file)

    if args.output_ephys_file:
        output_ephys_file = args.output_ephys_file
    else:
        logging.warning(
            "Overwriting input file data with simulated data in place.")
        output_ephys_file = ephys_file

    neuron = GlifNeuron.from_dict(neuron_config)

    # filter out test sweeps
    sweep_numbers = [
        s['sweep_number'] for s in sweeps if s['stimulus_name'] != 'Test'
    ]

    simulate_neuron(neuron, sweep_numbers, ephys_file, output_ephys_file,
                    args.spike_cut_value)
Example #12
0
def stimulus(neuron_config_file, ephys_sweeps_file):
    neuron_config = json_utilities.read(neuron_config_file)
    ephys_sweeps = json_utilities.read(ephys_sweeps_file)
    ephys_file_name = 'stimulus.nwb'

    # pull out the stimulus for the first sweep
    ephys_sweep = ephys_sweeps[0]
    ds = NwbDataSet(ephys_file_name)
    data = ds.get_sweep(ephys_sweep['sweep_number'])
    stimulus = data['stimulus']

    return stimulus
Example #13
0
def test_3():
    neuron_config = json_utilities.read('neuron_config.json')
    ephys_sweeps = json_utilities.read('ephys_sweeps.json')
    ephys_file_name = 'stimulus.nwb'

    neuron = GlifNeuron.from_dict(neuron_config)

    # sweep_numbers = [ s['sweep_number'] for s in ephys_sweeps
    #                  if s['stimulus_units'] == 'Amps' ]
    sweep_numbers = [7]
    simulate_neuron(neuron, sweep_numbers,
                    ephys_file_name, ephys_file_name, 0.05)
Example #14
0
def stimulus():
    neuron_config = json_utilities.read('neuron_config.json')
    ephys_sweeps = json_utilities.read('ephys_sweeps.json')
    ephys_file_name = 'stimulus.nwb'

    # pull out the stimulus for the first sweep
    ephys_sweep = ephys_sweeps[0]
    ds = NwbDataSet(ephys_file_name)
    data = ds.get_sweep(ephys_sweep['sweep_number'])
    stimulus = data['stimulus']

    return stimulus
Example #15
0
def test_3(configured_glif_api, neuron_config_file, ephys_sweeps_file):
    neuron_config = json_utilities.read(neuron_config_file)
    ephys_sweeps = json_utilities.read(ephys_sweeps_file)
    ephys_file_name = 'stimulus.nwb'

    neuron = GlifNeuron.from_dict(neuron_config)

    # sweep_numbers = [ s['sweep_number'] for s in ephys_sweeps
    #                  if s['stimulus_units'] == 'Amps' ]
    sweep_numbers = [7]
    simulate_neuron(neuron, sweep_numbers,
                    ephys_file_name, ephys_file_name, 0.05)
Example #16
0
def load_dicts(structures, runs, keys, outdir):
    d = {s: dict(full=dict(), ptp=dict()) for s in structures}
    for structure in structures:
        full = ju.read(os.path.join(outdir, "%s_scores_full.json" % structure))
        ptp = ju.read(os.path.join(outdir, "%s_scores_ptp.json" % structure))

        d[structure]['full'] = {
            k: -np.mean(full.get(k, [np.nan]))
            for k in keys
        }
        d[structure]['ptp'] = {k: -np.mean(ptp.get(k, [np.nan])) for k in keys}

    return d
Example #17
0
def main():
    module = ags.ArgSchemaParser(schema_type=ConsolidateParameters)

    preprocess_results = ju.read(module.args["paths"]["preprocess_results"])
    is_spiny = preprocess_results["is_spiny"]
    info = ju.read(module.args["paths"]["passive_info"])

    if info["should_run"]:
        fit_1_path = module.args["paths"]["passive_fit_1"]
        fit_1 = ju.read(fit_1_path)

        fit_2_path = module.args["paths"]["passive_fit_2"]
        fit_2 = ju.read(fit_2_path)

        fit_3_path = module.args["paths"]["passive_fit_elec"]
        fit_3 = ju.read(fit_3_path)

        ra, cm1, cm2 = cpf.compare_runs(preprocess_results, fit_1, fit_2,
                                        fit_3)
    else:
        ra = 100.
        cm1 = 1.
        if is_spiny:
            cm2 = 2.
        else:
            cm2 = 1.

    passive = {
        "ra": ra,
        "cm": {
            "soma": cm1,
            "axon": cm1,
            "dend": cm2
        },
        "e_pas": preprocess_results["v_baseline"]
    }

    passive["e_pas"] = preprocess_results["v_baseline"]
    if preprocess_results["has_apical"]:
        passive["cm"]["apic"] = cm2

    passive_results_path = module.args["paths"]["passive_results"]
    ju.write(passive_results_path, passive)

    output = {
        "paths": {
            "passive_results": passive_results_path,
        }
    }

    ju.write(module.args["output_json"], output)
def main():
    input_data = ju.read(INPUT_JSON)
    structures = input_data.get('structures')
    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # experiments to exclude
    experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON)
    eid_set = ju.read(EXPERIMENTS_PTP_JSON)
    hyperparameters = ju.read(HYPERPARAMETER_JSON)

    # get caching object
    cache = VoxelModelCache(manifest_file=manifest_file)

    output_dir = os.path.join(OUTPUT_DIR, 'voxel-%s' % ERROR_OPTION)
    run_kwargs = dict(experiments_exclude=experiments_exclude,
                      error_option=ERROR_OPTION)

    print(structures)
    for structure in reversed(structures):
        # get structure id
        logging.debug("Running nested cross validation for structure: %s",
                      structure)
        structure_id = get_structure_id(cache, structure)

        run_kwargs.update(hyperparameters[structure])

        scores = run_structure(cache, structure_id, eid_set=None, **run_kwargs)
        logging.debug("voxel score    : %.2f", scores['test_voxel'].mean())
        logging.debug("regional score : %.2f", scores['test_regional'].mean())
        write_output(output_dir, structure, structure_id, scores,
                     'scores_full')

        logging.debug("Scoring only where power to predict")
        try:
            scores = run_structure(cache,
                                   structure_id,
                                   eid_set=eid_set,
                                   **run_kwargs)
            logging.debug("voxel score    : %.2f", scores['test_voxel'].mean())
            logging.debug("regional score : %.2f",
                          scores['test_regional'].mean())
        except:
            logging.debug("Not enough exps")
        else:
            write_output(output_dir, structure, structure_id, scores,
                         'scores_ptp')
Example #19
0
def main():
    args = parse_arguments()

    logging.getLogger().setLevel(args.log_level)

    glif_api = None
    if (args.neuron_config_file is None or 
        args.sweeps_file is None or
        args.ephys_file is None):

        assert args.neuronal_model_id is not None, Exception("A neuronal model id is required if no neuron config file, sweeps file, or ephys data file is provided.")

        glif_api = GlifApi()
        glif_api.get_neuronal_model(args.neuronal_model_id)

    if args.neuron_config_file:
        neuron_config = json_utilities.read(args.neuron_config_file)
    else:
        neuron_config = glif_api.get_neuron_config()

    if args.sweeps_file:
        sweeps = json_utilities.read(args.sweeps_file)
    else:
        sweeps = glif_api.get_ephys_sweeps()

    if args.ephys_file:
        ephys_file = args.ephys_file
    else:
        ephys_file = 'stimulus_%d.nwb' % args.neuronal_model_id

        if not os.path.exists(ephys_file):
            logging.info("Downloading stimulus to %s." % ephys_file)
            glif_api.cache_stimulus_file(ephys_file)
        else:
            logging.warning("Reusing %s because it already exists." % ephys_file)

    if args.output_ephys_file:
        output_ephys_file = args.output_ephys_file
    else:
        logging.warning("Overwriting input file data with simulated data in place.")
        output_ephys_file = ephys_file
        

    neuron = GlifNeuron.from_dict(neuron_config)

    # filter out test sweeps
    sweep_numbers = [ s['sweep_number'] for s in sweeps if s['stimulus_name'] != 'Test' ]

    simulate_neuron(neuron, sweep_numbers, ephys_file, output_ephys_file, args.spike_cut_value) 
Example #20
0
def test_mies_nwb_pipeline_output(input_json, output_json, tmpdir_factory):
    """
    Runs pipeline, saves to a json file and compares to the existing pipeline output.
    Raises assertion error if test output is different from the benchmark.

    Parameters
    ----------
    input_json: string json file name of input
    output_json: string json file name of benchmark output
    tmpdir_factory: pytest fixture

    Returns
    -------

    """
    pipeline_input = ju.read(input_json)
    test_dir = str(tmpdir_factory.mktemp("test_mies_nwb2_specimens"))

    pipeline_input["output_nwb_file"] = os.path.join(
        test_dir, "output.nwb")  # Modify path for the test output
    pipeline_input["qc_figs_dir"] = None

    stimulus_ontology_file = pipeline_input.get("stimulus_ontology_file", None)

    obtained = run_pipeline(pipeline_input["input_nwb_file"],
                            pipeline_input["output_nwb_file"],
                            stimulus_ontology_file,
                            pipeline_input["qc_figs_dir"],
                            pipeline_input["qc_criteria"],
                            pipeline_input["manual_sweep_states"])

    ju.write(os.path.join(test_dir, 'pipeline_output.json'), obtained)
    obtained = ju.read(os.path.join(test_dir, 'pipeline_output.json'))
    expected = ju.read(output_json)

    output_diff = list(diff(expected, obtained, tolerance=0.001))

    # There is a known issue with newer MIES-generated NWBs: They report
    # recording date in offsetless UTC, rather than local time +- an offset to
    # UTC as in the older generation.
    unacceptable = []
    for item in output_diff:
        if not "recording_date" in item[1]:
            unacceptable.append(item)

    if unacceptable:
        print(unacceptable)
    assert len(unacceptable) == 0
Example #21
0
def main(paths, passive_fit_type, output_json, **kwargs):
    info = ju.read(paths["passive_info"])
    if not info["should_run"]:
        ju.write(output_json, { "paths": {} })
        return

    swc_path = paths["swc"].encode('ascii', 'ignore')
    up_data = np.loadtxt(paths["up"])
    down_data = np.loadtxt(paths["down"])
    results_file = paths["passive_fit_results_file"]

    npf.initialize_neuron(swc_path, paths["fit"])

    if passive_fit_type == npf.PASSIVE_FIT_1:
        results = npf.passive_fit_1(up_data, down_data,
            info["fit_window_start"], info["fit_window_end"])
    elif passive_fit_type == npf.PASSIVE_FIT_2:
        results = npf.passive_fit_2(up_data, down_data,
            info["fit_window_start"], info["fit_window_end"])
    elif passive_fit_type == npf.PASSIVE_FIT_ELEC:
        results = npf.passive_fit_elec(up_data, down_data,
            info["fit_window_start"], info["fit_window_end"],
            info["bridge"], info["electrode_cap"])
    else:
        raise Exception("unknown passive fit type: %s" % passive_fit_type)

    ju.write(results_file, asdict(results))
    ju.write(output_json, { "paths": { passive_fit_type: results_file } })
Example #22
0
def run_feature_collection(ids=None,
                           project="T301",
                           include_failed_sweeps=True,
                           include_failed_cells=False,
                           output_file="",
                           run_parallel=True,
                           data_source="lims",
                           **kwargs):
    if ids is not None:
        specimen_ids = ids
    else:
        specimen_ids = lq.project_specimen_ids(
            project, passed_only=not include_failed_cells)

    logging.info("Number of specimens to process: {:d}".format(
        len(specimen_ids)))

    ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    get_data_partial = partial(data_for_specimen_id,
                               passed_only=not include_failed_sweeps,
                               data_source=data_source,
                               ontology=ontology)

    if run_parallel:
        pool = Pool()
        results = pool.map(get_data_partial, specimen_ids)
    else:
        results = map(get_data_partial, specimen_ids)

    df = pd.DataFrame([r for r in results if len(r) > 0])
    logging.info("shape {}".format(df.shape))
    df.set_index("specimen_id").to_csv(output_file)
Example #23
0
    def get_cells(self, file_name=None, require_morphology=False, require_reconstruction=False):
        '''
        Download metadata for all cells in the database and optionally return a
        subset filtered by whether or not they have a morphology or reconstruction.

        Parameters
        ----------
        
        file_name: string
            File name to save/read the cell metadata as JSON.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        require_morphology: boolean
            Filter out cells that have no morphological images.

        require_reconstruction: boolean
            Filter out cells that have no morphological reconstructions.
        '''

        file_name = self.get_cache_path(file_name, self.CELLS_KEY)

        if os.path.exists(file_name):
            cells = json_utilities.read(file_name)
        else:
            cells = self.api.list_cells(False, False)

            if self.cache:
                json_utilities.write(file_name, cells)

        # filter the cells on the way out
        return self.api.filter_cells(cells, require_morphology, require_reconstruction)
Example #24
0
 def test_spike_times(self):
     expected = [
         2.937305,   3.16453 ,   3.24271 ,   4.1622  ,   4.24182 ,
         10.0898  ,  10.132545,  10.176095,  10.2361  ,  10.660655,
         10.762125,  10.863465,  10.93833 ,  11.140815,  11.19246 ,
         11.24553 ,  11.696305,  11.812655,  11.90469 ,  12.056755,
         12.15794 ,  12.233905,  12.47577 ,  12.741295,  12.82861 ,
         12.923175,  18.05068 ,  18.139875,  18.17693 ,  18.221485,
         18.24337 ,  18.39981 ,  18.470705,  18.759675,  18.82183 ,
         18.877475,  18.91033 ,  18.941195,  19.050515,  19.12557 ,
         19.15963 ,  19.188655,  19.226205,  19.29813 ,  19.420665,
         19.47627 ,  19.763365,  19.824225,  19.897995,  19.93155 ,
         20.04916 ,  20.11832 ,  20.148755,  20.18004 ,  20.22173 ,
         20.2433  ,  20.40018 ,  20.470915,  20.759715,  20.82156 ,
         20.866465,  20.90807 ,  20.939175]
     
     bp = BiophysicalPerisomaticApi('http://api.brain-map.org')
     bp.cache_stimulus = True # change to False to not download the large stimulus NWB file
     neuronal_model_id = 472451419    # get this from the web site as above
     bp.cache_data(neuronal_model_id, working_directory='neuronal_model')
     cwd = os.path.realpath(os.curdir)
     print(cwd)
     os.chdir(os.path.join(cwd, 'neuronal_model'))
     manifest = ju.read('manifest.json')
     manifest['biophys'][0]['model_file'][0] = 'manifest_51.json'
     manifest['runs'][0]['sweeps'] = [51]
     ju.write('manifest_51.json', manifest)
     subprocess.call(['nrnivmodl', './modfiles'])
     run(Config().load('manifest_51.json'))
     #os.chdir(cwd)
     nwb_out = NwbDataSet('work/386049444.nwb')
     spikes = nwb_out.get_spike_times(51)
     
     numpy.testing.assert_array_almost_equal(spikes, expected)
Example #25
0
    def test_tilt_correction_end_to_end(self):
        input = {
            'swc_path': self.swc_path,
            'marker_path': self.marker_path,
            'ccf_soma_location': self.ccf_soma_location,
            'slice_transform_list': self.slice_transform_list,
            'slice_image_flip': self.slice_image_flip,
            'ccf_path': self.ccf_path,
            'output_json': self.output_json_path
        }
        cmd = [
            'python', '-m',
            'neuron_morphology.transforms.tilt_correction.compute_tilt_correction'
        ]
        for key, value in input.items():
            cmd.append(f'--{key}')
            cmd.append(f'{value}')

        sp.check_call(cmd)

        outputs = ju.read(self.output_json_path)
        self.assertAlmostEqual(outputs['tilt_correction'], -np.pi / 2)

        aff_t = AffineTransform.from_dict(outputs['tilt_transform_dict'])
        morph_t = aff_t.transform_morphology(self.morphology)

        axon = morph_t.node_by_id(1)
        self.assertAlmostEqual(axon['x'], 0)
        self.assertAlmostEqual(axon['y'], 3)
        self.assertAlmostEqual(axon['z'], -2)
def grab_diff_v_from_folder(ew, folder):
    '''get the voltage difference if the file exists
    inputs:
        ew: string that is to be matched with a file in the folder in order to return a 
            value. i.e. if GLIF2 is being requested but there is not GLIF2 file in the
            folder a nan will be returned regardless of whether there is a value of explain 
            variance in the database. For example this would happen if the model was excluded from 
            analysis because of an aberrant parameter.  
        folder: path to the structured folder used in the rest of analysis
    returns:
        either or nan or the explained variance ratio for the requested model
        
    '''
    try:
        file=get_file_path_endswith(folder, ew)
        contents=ju.read(file)
        RSS_of_voltage_diff=contents['noise2']['RSS_of_voltage_diff']
        var_of_voltage_data=contents['noise2']['var_of_voltage_data']
        num_data_points_wo_spike_shape=contents['noise2']['num_data_points_wo_spike_shape']
    except:
        RSS_of_voltage_diff=np.nan
        var_of_voltage_data=np.nan
        num_data_points_wo_spike_shape=np.nan
        
    return RSS_of_voltage_diff, var_of_voltage_data, num_data_points_wo_spike_shape
Example #27
0
def main(injection_region, filtered=False):
    input_data = ju.read(INPUT_JSON)

    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # get voxel_model_cache
    cache = VoxelModelCache(manifest_file=manifest_file)

    # get region id
    region_id = get_region_id(cache, injection_region)

    logging.debug("performing virtual injection into %s (%s)" %
                  (injection_region, region_id))
    projection = get_projection(
        cache, region_id, full=FULL_INJECTION, filtered=filtered)

    # get projection (row)
    logging.debug("upscaling projection to 10 micron")
    projection = upscale_projection(projection, SCALE, **UPSCALE_KWARGS)

    # file name
    suffix = injection_region + "full" if FULL_INJECTION else injection_region
    vol_file = os.path.join(VOLUME_DIR, "projection_density_%s.nrrd" % suffix)
    logging.debug("saving projection volume : %s" % vol_file)
    nrrd.write(vol_file, projection, options=dict(encoding='raw'))

    return vol_file
Example #28
0
    def load_manifest(self, file_name):
        '''Read a keyed collection of path specifications.
        
        Parameters
        ----------
        file_name : string
            path to the manifest file
        
        Returns
        -------
        Manifest
        '''
        if file_name != None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            
            self.manifest = Manifest(ju.read(file_name)['manifest'], os.path.dirname(file_name))
        else:
            self.manifest = None
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("input_json")
    parser.add_argument("output_json")
    parser.add_argument("--log_level", default=logging.DEBUG)
    parser.add_argument("--input_dataset", default="FC")
    parser.add_argument("--roi_field", default="roi_names")
    parser.add_argument("--output_dataset", default="data")
    args = parser.parse_args()

    logging.getLogger().setLevel(args.log_level)

    input_data = ju.read(args.input_json)
    input_file, output_file = parse_input(input_data)

    # read from "data"
    input_h5 = h5py.File(input_file, "r")
    traces = input_h5[args.input_dataset].value
    roi_names = input_h5[args.roi_field][:]
    input_h5.close()

    dff = calculate_dff(traces)

    # write to "data"
    output_h5 = h5py.File(output_file, "w")
    output_h5[args.output_dataset] = dff
    output_h5[args.roi_field] = roi_names
    output_h5.close()

    output_data = {}

    ju.write(args.output_json, output_data)
def main():
    module = ags.ArgSchemaParser(schema_type=PassiveFittingParameters)

    info = ju.read(module.args["paths"]["passive_info"])
    if not info["should_run"]:
        ju.write(module.args["output_json"], { "paths": {} })
        return

    swc_path = module.args["paths"]["swc"].encode('ascii', 'ignore')
    up_data = np.loadtxt(module.args["paths"]["up"])
    down_data = np.loadtxt(module.args["paths"]["down"])
    passive_fit_type = module.args["passive_fit_type"]
    results_file = module.args["paths"]["passive_fit_results_file"]


    npf.initialize_neuron(swc_path, module.args["paths"]["fit"])

    if passive_fit_type == npf.PASSIVE_FIT_1:
        results = npf.passive_fit_1(info, up_data, down_data)
    elif passive_fit_type == npf.PASSIVE_FIT_2:
        results = npf.passive_fit_2(info, up_data, down_data)
    elif passive_fit_type == npf.PASSIVE_FIT_ELEC:
        results = npf.passive_fit_elec(info, up_data, down_data)
    else:
        raise Exception("unknown passive fit type: %s" % passive_fit_type)

    ju.write(results_file, results)

    ju.write(module.args["output_json"], { "paths": { passive_fit_type: results_file } })
Example #31
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('cells_csv', help='CSV containing cell metadata')
    parser.add_argument('connections_h5', help='HDF5 file containing cell connectivity')
    parser.add_argument('network_vtk_file', help='.vtk output file')
    parser.add_argument('--manifest')
    parser.add_argument('--morphology_vtk_file')

    args = parser.parse_args()

    # read in the cell CSV
    with open(args.cells_csv, 'r') as f:
        r = csv.DictReader(f)
        cells = list(r)

    # read in the connections from the H5 file
    h5u = Hdf5Util()
    connections = h5u.read(args.connections_h5)

    # write out the results
    write_network_vtk(args.network_vtk_file, cells, connections)

    if args.manifest:
        config = ju.read(args.manifest)
        manifest = Manifest(config['manifest'], relative_base_dir=os.path.dirname(args.manifest))
        write_morphology_vtk(args.morphology_vtk_file, cells, manifest)
Example #32
0
def main():
    input_data = ju.read(INPUT_JSON)

    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # get cache, metric
    logging.debug("loading regional matrix")
    cache = VoxelModelCache(manifest_file=manifest_file)
    df_metric = cache.get_normalized_connection_density(dataframe=True)

    # plot
    fig = plot(df_metric,
               STRUCTURES,
               cache,
               GRID_KWS,
               CBAR_KWS,
               HEATMAP_KWS,
               figsize=FIGSIZE)

    fig.savefig(OUTPUT_FILE, **SAVEFIG_KWARGS)
    plt.close(fig)
Example #33
0
def run_qc(stimulus_ontology_file, cell_features, sweep_features, qc_criteria):
    """

    Parameters
    ----------
    stimulus_ontology_file : str
        ontology file name
    cell_features: dict
        cell features
    sweep_features : list of dicts
        sweep features
    qc_criteria: dict
        qc criteria

    Returns
    -------
    dict
        containing state of the cell and sweeps
    """

    lu.log_pretty_header("Perform QC checks", level=1)

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    cell_state, sweep_states = qcp.qc_experiment(ont, cell_features,
                                                 sweep_features, qc_criteria)

    qc_summary(sweep_features, sweep_states, cell_features, cell_state)

    return dict(cell_state=cell_state, sweep_states=sweep_states)
Example #34
0
def main():
    """
    # Usage:
    $ python plot_ephys_nwb_file.py NWB_FILE_NAME

    """

    nwb_file = sys.argv[1]
    print("plotting file: %s" % nwb_file)

    stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_data_set(nwb_file=nwb_file,
                               validate_stim=False,
                               ontology=ont)

    vclamp_sweep_table = data_set.sweep_table[
        data_set.sweep_table["clamp_mode"] == "VoltageClamp"]
    plot_data_set(data_set, vclamp_sweep_table, nwb_file)

    iclamp_sweep_table = data_set.sweep_table[
        data_set.sweep_table["clamp_mode"] == "CurrentClamp"]
    plot_data_set(data_set, iclamp_sweep_table, nwb_file)

    plt.show()
Example #35
0
def main():
    parser = argparse.ArgumentParser(
        description='analyze specimens for cell-wide features')
    parser.add_argument('nwb_file')
    parser.add_argument('feature_json')
    parser.add_argument('--output_directory', default='.')
    parser.add_argument('--no-sweep-page',
                        action='store_false',
                        dest='sweep_page')
    parser.add_argument('--no-cell-page',
                        action='store_false',
                        dest='cell_page')
    parser.add_argument('--log_level')

    args = parser.parse_args()

    if args.log_level:
        logging.getLogger().setLevel(args.log_level)

    ephys_roi_result = json_utilities.read(args.feature_json)

    if args.sweep_page:
        logging.debug("making sweep page")
        make_sweep_page(args.nwb_file, ephys_roi_result, args.output_directory)

    if args.cell_page:
        logging.debug("making cell page")
        make_cell_page(args.nwb_file, ephys_roi_result, args.output_directory,
                       True)
Example #36
0
    def load_manifest(self, file_name):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            self.manifest = Manifest(
                ju.read(file_name)['manifest'], os.path.dirname(file_name))
        else:
            self.manifest = None
Example #37
0
    def from_file_name(cls, file_name, cache=True, **kwargs):
        '''Alternative constructor using cache path file_name.

        Parameters
        ----------
        file_name : string
            Path where storage_directories will be saved.
        **kwargs
            Keyword arguments to be supplied to __init__

        Returns
        -------
        cls : instance of GridDataApiPrerelease
        '''
        if os.path.exists(file_name):
            storage_directories = json_utilities.read(file_name)
        else:
            storage_directories = _get_grid_storage_directories(
                cls.GRID_DATA_DIRECTORY)

            if cache:
                Manifest.safe_make_parent_dirs(file_name)
                json_utilities.write(file_name, storage_directories)

        return cls(storage_directories, **kwargs)
Example #38
0
def run_feature_extraction(input_nwb_file, stimulus_ontology_file,
                           output_nwb_file, qc_fig_dir, sweep_info, cell_info):

    lu.log_pretty_header("Extract ephys features", level=1)

    sp.drop_failed_sweeps(sweep_info)
    if len(sweep_info) == 0:
        raise er.FeatureError(
            "There are no QC-passed sweeps available to analyze")

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_data_set(sweep_info=sweep_info,
                               nwb_file=input_nwb_file,
                               ontology=ont,
                               api_sweeps=False)

    try:
        cell_features, sweep_features, cell_record, sweep_records = dsft.extract_data_set_features(
            data_set)

        if cell_info: cell_record.update(cell_info)

        cell_state = {"failed_fx": False, "fail_fx_message": None}

        feature_data = {
            'cell_features': cell_features,
            'sweep_features': sweep_features,
            'cell_record': cell_record,
            'sweep_records': sweep_records,
            'cell_state': cell_state
        }

    except (er.FeatureError, IndexError) as e:
        cell_state = {"failed_fx": True, "fail_fx_message": str(e)}
        logging.warning(e)
        feature_data = {'cell_state': cell_state}

    if not cell_state["failed_fx"]:
        sweep_spike_times = collect_spike_times(sweep_features)
        embed_spike_times(input_nwb_file, output_nwb_file, sweep_spike_times)

        if qc_fig_dir is None:
            logging.info("qc_fig_dir is not provided, will not save figures")
        else:
            plotqc.display_features(qc_fig_dir, data_set, feature_data)

        # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing.
        # Thus, we are converting them here:
        feature_data["sweep_features"] = {
            str(k): v
            for k, v in feature_data["sweep_features"].items()
        }

    return feature_data
    def get_experiments(self, dataframe=False, file_name=None, cre=None, injection_structure_ids=None):
        """
        Read a list of experiments that match certain criteria.  If caching is enabled,
        this will save the whole (unfiltered) list of experiments to a file.

        Parameters
        ----------
        
        dataframe: boolean
            Return the list of experiments as a Pandas DataFrame.  If False,
            return a list of dictionaries.  Default False. 

        file_name: string
            File name to save/read the structures table.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        cre: boolean or list
            If True, return only cre-positive experiments.  If False, return only
            cre-negative experiments.  If None, return all experients. If list, return
            all experiments with cre line names in the supplied list. Default None.

        injection_structure_ids: list
            Only return experiments that were injected in the structures provided here.
            If None, return all experiments.  Default None.

        """

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        if os.path.exists(file_name):
            experiments = json_utilities.read(file_name)
        else:
            experiments = self.api.experiment_source_search(injection_structures='root')
            
            # removing these elements because they are specific to a particular resolution
            for e in experiments:
                del e['num-voxels']
                del e['injection-volume']
                del e['sum']
                del e['name']

            if self.cache:
                self.safe_mkdir(os.path.dirname(file_name))

                json_utilities.write(file_name, experiments)

        # filter the read/downloaded list of experiments
        experiments = self.filter_experiments(experiments, cre, injection_structure_ids)

        if dataframe:
            experiments = pd.DataFrame(experiments)
            experiments.set_index(['id'], inplace=True, drop=False)

        return experiments
Example #40
0
def output(neuron_config_file, ephys_sweeps_file):
    neuron_config = json_utilities.read(neuron_config_file)
    ephys_sweeps = json_utilities.read(ephys_sweeps_file)
    ephys_file_name = 'stimulus.nwb'

    # pull out the stimulus for the first sweep
    ephys_sweep = ephys_sweeps[0]
    ds = NwbDataSet(ephys_file_name)
    data = ds.get_sweep(ephys_sweep['sweep_number'])
    stimulus = data['stimulus']

    # initialize the neuron
    # important! update the neuron's dt for your stimulus
    neuron = GlifNeuron.from_dict(neuron_config)
    neuron.dt = 1.0 / data['sampling_rate']

    # simulate the neuron
    truncate = 56041
    output = neuron.run(stimulus[0:truncate])

    return output
Example #41
0
def test_optimize_neuron():
    p = re.compile("(\d+)_(.*)_model_config.json")

    if not os.path.exists(OUT_DIR):
        os.makedirs(OUT_DIR)

    for model_config_file in MODEL_CONFIG_FILES:
        logging.info("testing %s" % model_config_file)

        fname = os.path.basename(model_config_file)
        m = p.match(fname)

        sid, config = m.groups()
        data_config_file = DATA_CONFIG_PATTERN % int(sid)

        data_config = ju.read(data_config_file)
        nwb_file = data_config["filename"]
        sweep_list = data_config["sweeps"].values()

        model_config = ju.read(model_config_file)

        #DBG
        model_config['optimizer']['inner_iterations'] = 1
        model_config['optimizer']['outer_iterations'] = 1
        #DBG
        
        sweep_index = { s['sweep_number']:s for s in sweep_list }    

        optimizer, best_param, begin_param = optimize_neuron(model_config, sweep_index)

        out_file = os.path.join(OUT_DIR, "%s_%s_neuron_config.json" % (sid, config))
        ju.write(out_file, optimizer.experiment.neuron.to_dict())

        out_config_file = os.path.join(OUT_DIR, "%s_%s_optimized_model_config.json" % (sid, config))
        ju.write(out_config_file, {
                'optimizer': optimizer.to_dict(),
                'neuron': optimizer.experiment.neuron.to_dict()
                })
Example #42
0
    def load_manifest(self, file_name, version=None):
        '''Read a keyed collection of path specifications.

        Parameters
        ----------
        file_name : string
            path to the manifest file

        Returns
        -------
        Manifest
        '''
        if file_name is not None:
            if not os.path.exists(file_name):

                # make the directory if it doesn't exist already
                dirname = os.path.dirname(file_name)
                if dirname:
                    Manifest.safe_mkdir(dirname)

                self.build_manifest(file_name)

            try:
                self.manifest = Manifest(
                    ju.read(file_name)['manifest'],
                    os.path.dirname(file_name),
                    version=version)
            except ManifestVersionError as e:
                if e.outdated is True:
                    intro = "is out of date"
                elif e.outdated is False:
                    intro = "was made with a newer version of the AllenSDK"
                elif e.outdated is None:
                    intro = "version did not match the expected version"

                raise ManifestVersionError(("Your manifest file (%s) %s" +
                                            " (its version is '%s', but version '%s' is expected).  Please remove this file" +
                                            " and it will be regenerated for you the next" +
                                            " time you instantiate this class." +
                                            " WARNING: There may be new data files available that replace the ones you already have downloaded." +
                                            " Read the notes for this release for more details on what has changed" +
                                            " (https://github.com/alleninstitute/allensdk/wiki).") % 
                                           (file_name, intro, e.found_version, e.version),
                                           e.version, e.found_version)

            self.manifest_path = file_name

        else:
            self.manifest = None
Example #43
0
def test_run_glifneuron(configured_glif_api, neuron_config_file):
    # initialize the neuron
    neuron_config = json_utilities.read(neuron_config_file)
    neuron = GlifNeuron.from_dict(neuron_config)

    # make a short square pulse. stimulus units should be in Amps.
    stimulus = [0.0] * 100 + [10e-9] * 100 + [0.0] * 100

    # important! set the neuron's dt value for your stimulus in seconds
    neuron.dt = 5e-6

    # simulate the neuron
    output = neuron.run(stimulus)

    voltage = output['voltage']
    threshold = output['threshold']
    spike_times = output['interpolated_spike_times']
Example #44
0
def test_6(configured_glif_api, neuron_config_file, stimulus):
    # define your own custom voltage reset rule
    # this one linearly scales the input voltage
    def custom_voltage_reset_rule(neuron, voltage_t0, custom_param_a, custom_param_b):
        return custom_param_a * voltage_t0 + custom_param_b

    # initialize a neuron from a neuron config file
    neuron_config = json_utilities.read(neuron_config_file)
    neuron = GlifNeuron.from_dict(neuron_config)

    # configure a new method and overwrite the neuron's old method
    method = neuron.configure_method('custom', custom_voltage_reset_rule,
                                     {'custom_param_a': 0.1, 'custom_param_b': 0.0})
    neuron.voltage_reset_method = method

    truncate = 56041
    output = neuron.run(stimulus[0:truncate])
Example #45
0
def test_run():
    # initialize the neuron
    neuron_config = json_utilities.read(os.path.join(
        OUTPUT_DIR, '%d_neuron_config.json' % NEURONAL_MODEL_ID))
    neuron = GlifNeuron.from_dict(neuron_config)

    # make a short square pulse. stimulus units should be in Amps.
    stimulus = [0.0] * 100 + [10e-9] * 100 + [0.0] * 100

    # important! set the neuron's dt value for your stimulus in seconds
    neuron.dt = 5e-6

    # simulate the neuron
    output = neuron.run(stimulus)

    voltage = output['voltage']
    threshold = output['threshold']

    plt.plot(voltage)
    plt.plot(threshold)
    plt.savefig(os.path.join(OUTPUT_DIR, 'plot.png'))
Example #46
0
    def get_ephys_sweeps(self, specimen_id, file_name=None):
        """
        Download sweep metadata for a single cell specimen.  

        Parameters
        ----------
        
        specimen_id: int
             ID of a cell.
        """

        file_name = self.get_cache_path(file_name, self.EPHYS_SWEEPS_KEY, specimen_id)
        
        if os.path.exists(file_name):
            sweeps = json_utilities.read(file_name)
        else:
            sweeps = self.api.get_ephys_sweeps(specimen_id)

            if self.cache:
                json_utilities.write(file_name, sweeps)

        return sweeps
from allensdk.brain_observatory.natural_movie import NaturalMovie
from allensdk.brain_observatory.locally_sparse_noise import LocallySparseNoise
import allensdk.brain_observatory.stimulus_info as stiminfo
import allensdk.core.json_utilities as ju
from pkg_resources import resource_filename  # @UnresolvedImport


data_file = os.environ.get('TEST_OBSERVATORY_EXPERIMENT_PLOTS_DATA', 'skip')
if data_file == 'default':
    data_file = resource_filename(__name__, 'test_observatory_plots_data.json')

if data_file == 'skip':
    EXPERIMENT_CONTAINER=None
    TEST_DATA_DIR=None
else:
    EXPERIMENT_CONTAINER = ju.read(data_file)
    TEST_DATA_DIR = EXPERIMENT_CONTAINER['image_directory']

class AnalysisSingleton(object):
    def __init__(self, klass, session, *args):
        self.klass = klass
        self.session = session
        self.args = args

        self.obj = None

    @staticmethod
    def experiment_for_session(session):
        return next(exp for exp in EXPERIMENT_CONTAINER['experiments'] if exp['session'] == session)

    def __call__(self):
Example #48
0
import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron
from allensdk.core.nwb_data_set import NwbDataSet

neuron_config = json_utilities.read('472423251_neuron_config.json')
ephys_sweeps = json_utilities.read('ephys_sweeps.json')
ephys_file_name = '472423251.nwb'

# pull out the stimulus for the first sweep
ephys_sweep = ephys_sweeps[0]
ds = NwbDataSet(ephys_file_name)
data = ds.get_sweep(ephys_sweep['sweep_number']) 
stimulus = data['stimulus']

# initialize the neuron
# important! update the neuron's dt for your stimulus
neuron = GlifNeuron.from_dict(neuron_config)
neuron.dt = 1.0 / data['sampling_rate']

# simulate the neuron
output = neuron.run(stimulus)

voltage = output['voltage']
threshold = output['threshold']
spike_times = output['interpolated_spike_times']
Example #49
0
import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron

# initialize the neuron
neuron_config = json_utilities.read('neuron_config.json')
neuron = GlifNeuron.from_dict(neuron_config)

# make a short square pulse. stimulus units should be in Amps.
stimulus = [ 0.0 ] * 100 + [ 10e-9 ] * 100 + [ 0.0 ] * 100

# important! set the neuron's dt value for your stimulus in seconds
neuron.dt = 5e-6

# simulate the neuron
output = neuron.run(stimulus)

voltage = output['voltage']
threshold = output['threshold']
spike_times = output['interpolated_spike_times']
Example #50
0
import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron

# initialize the neuron
neuron_config = json_utilities.read('472423251_neuron_config.json')
neuron = GlifNeuron.from_dict(neuron_config)

# make a short square pulse. stimulus units should be in Amps.
stimulus = [ 0.0 ] * 100 + [ 10e-9 ] * 100 + [ 0.0 ] * 100

# important! set the neuron's dt value for your stimulus in seconds
neuron.dt = 5e-6

# simulate the neuron
output = neuron.run(stimulus)

voltage = output['voltage']
threshold = output['threshold']
spike_times = output['interpolated_spike_times']
Example #51
0
def test_preprocess_neuron():
    logging.getLogger().setLevel(logging.DEBUG)
    p = re.compile("(\d+)_data_config.json")
    dt = 5e-05
    bessel = { 'N': 4, 'Wn': .1 }
    cut = 0

    if not os.path.exists(OUT_DIR):
        os.makedirs(OUT_DIR)


    for data_config_file in DATA_CONFIG_FILES:
        logging.info("testing %s" % data_config_file)
        fname = os.path.basename(data_config_file)
        m = p.match(fname)

        sid = m.groups()

        test_data_file = os.path.join(TEST_DIR, "%s_preprocessed_dict.json" % sid)

        if not os.path.exists(test_data_file):
            logging.warning("no test file %s" % test_data_file)
            continue

        out_file = os.path.join(OUT_DIR, "%s_preprocessed_dict.json" % sid)

        data_config = ju.read(data_config_file)
        nwb_file = data_config["filename"]
        sweep_list = data_config["sweeps"].values()

        d = preprocess_neuron(nwb_file, sweep_list, dt, cut, bessel)

        dictionary = ju.read(test_data_file)

        ju.write(out_file, d)

        errs = []
        assert_equal(d['El'], 0.0, 'El', errs)
        assert_equal(d['El_reference'], dictionary['El']['El_noise']['measured']['mean'], 'El_reference', errs)
        assert_equal(d['deltaV'], None, 'deltaV', errs)
        assert_equal(d['dt'], dictionary['dt_used_for_preprocessor_calculations'], 'dt', errs)
        assert_equal(d['R_input'], dictionary['resistance']['R_lssq_Wrest']['mean'], 'R_input', errs)
        assert_equal(d['C'], dictionary['capacitance']['C_lssq_Wrest']['mean'], 'C', errs)
        assert_equal(d['th_inf'], dictionary['th_inf']['via_Vmeasure']['from_zero'], 'th_inf', errs)
        assert_equal(d['th_adapt'], dictionary['th_adapt']['from_95percentile_noise']['deltaV'], 'th_adapt', errs)
        assert_equal(d['spike_cut_length'], dictionary['spike_cutting']['NOdeltaV']['cut_length'], 'spike_cut_length', errs)
        assert_equal(d['spike_cutting_intercept'], dictionary['spike_cutting']['NOdeltaV']['intercept'], 'spike_cutting_intercept', errs)
        assert_equal(d['spike_cutting_slope'], dictionary['spike_cutting']['NOdeltaV']['slope'], 'spike_cutting_slope', errs)
        assert_equal(d['asc_amp_array'], dictionary['asc']['amp'], 'asc_amp_array', errs)
        assert_equal(d['asc_tau_array'], 1./np.array(dictionary['asc']['k']), 'asc_tau_array', errs)

        nlp = d['nonlinearity_parameters']
        assert_equal(nlp['line_param_RV_all'], dictionary['nonlinearity_parameters']['line_param_RV_all'], 'line_param_RV_all', errs)
        assert_equal(nlp['line_param_ElV_all'], dictionary['nonlinearity_parameters']['line_param_ElV_all'], 'line_param_ElV_all', errs)

        ta = d['threshold_adaptation']
        assert_equal(ta['a_spike_component_of_threshold'], dictionary['threshold_adaptation']['a_spike_component_of_threshold'], 'a_spike', errs)
        assert_equal(ta['b_spike_component_of_threshold'], dictionary['threshold_adaptation']['b_spike_component_of_threshold'], 'b_spike', errs) 
        assert_equal(ta['a_voltage_component_of_threshold'], dictionary['threshold_adaptation']['a_voltage_component_of_threshold'], 'a_voltage', errs)
        assert_equal(ta['b_voltage_component_of_threshold'], dictionary['threshold_adaptation']['b_voltage_component_of_threshold'], 'b_voltage', errs) 

        mlin = d['MLIN']
        assert_equal(mlin['var_of_section'], dictionary['MLIN']['var_of_section'], 'var_of_section', errs)
        assert_equal(mlin['sv_for_expsymm'],  dictionary['MLIN']['sv_for_expsymm'], 'sv_for_expsymm', errs)
        assert_equal(mlin['tau_from_AC'], dictionary['MLIN']['tau_from_AC'], 'tau_from_AC', errs)

        if len(errs) > 0:
            for err in errs:
                logging.error(err)
            raise Exception("Preprocessor outputs did not match.")
Example #52
0
json_utilities.write('neuron_config.json', neuron_config)

# download information about the cell
ctc = CellTypesCache()
ctc.get_ephys_data(nm['specimen_id'], file_name='stimulus.nwb')
ctc.get_ephys_sweeps(nm['specimen_id'], file_name='ephys_sweeps.json')

#===============================================================================
# example 2
#===============================================================================

import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron

# initialize the neuron
neuron_config = json_utilities.read('neuron_config.json')['566302806']
neuron = GlifNeuron.from_dict(neuron_config)

# make a short square pulse. stimulus units should be in Amps.
stimulus = [ 0.0 ] * 100 + [ 10e-9 ] * 100 + [ 0.0 ] * 100

# important! set the neuron's dt value for your stimulus in seconds
neuron.dt = 5e-6

# simulate the neuron
output = neuron.run(stimulus)

voltage = output['voltage']
threshold = output['threshold']
spike_times = output['interpolated_spike_times']
Example #53
0
import allensdk.core.json_utilities as json_utilities

from allensdk.model.glif.glif_neuron import GlifNeuron
from allensdk.model.glif.simulate_neuron import simulate_neuron

neuron_config = json_utilities.read("neuron_config.json")
ephys_sweeps = json_utilities.read("ephys_sweeps.json")
ephys_file_name = "stimulus.nwb"

neuron = GlifNeuron.from_dict(neuron_config)

sweep_numbers = [s["sweep_number"] for s in ephys_sweeps if s["stimulus_units"] == "Amps"]
simulate_neuron(neuron, sweep_numbers, ephys_file_name, ephys_file_name, 0.05)
Example #54
0
 def wrap(self, fn, path, cache,
          save_as_json=True,
          return_dataframe=False,
          index=None,
          rename=None,
          **kwargs):
     '''make an rma query, save it and return the dataframe.
     
     Parameters
     ----------
     fn : function reference
         makes the actual query using kwargs.
     path : string
         where to save the data
     cache : boolean
         True will make the query, False just loads from disk
     save_as_json : boolean, optional
         True (default) will save data as json, False as csv
     return_dataframe : boolean, optional
         True will cast the return value to a pandas dataframe, False (default) will not 
     index : string, optional
         column to use as the pandas index
     rename : list of string tuples, optional
         (new, old) columns to rename
     kwargs : objects
         passed through to the query function
     
     Returns
     -------
     dict or DataFrame
         data type depends on return_dataframe option.
     
     Notes
     -----
     Column renaming happens after the file is reloaded for json
     '''
     if cache == True:
         json_data = fn(**kwargs)
         
         if save_as_json == True:
             ju.write(path, json_data)
         else:
             df = pd.DataFrame(json_data)
             self.rename_columns(df, rename)
             
             if index is not None:
                 df.set_index([index], inplace=True)
     
             df.to_csv(path)
 
     # read it back in
     if save_as_json == True:
         if return_dataframe == True:
             data = pj.read_json(path, orient='records')
             self.rename_columns(data, rename)
             if index != None:
                 data.set_index([index], inplace=True)
         else:
             data = ju.read(path)
     elif return_dataframe == True:
         data = pd.DataFrame.from_csv(path)
     else:
         raise ValueError('save_as_json=False cannot be used with return_dataframe=False')
     
     return data