示例#1
0
 def write_output_data(self, data):
     try:
         ju.write(self.args.output_json, data)
     except Exception as e:
         logging.error("could not write output json: %s",
                       self.args.output_json)
         raise e
示例#2
0
def mkjson(): #Only ascii as in dictionary contents
    from allensdk.core.json_utilities import write

    #can be serialised into dictionary contents.
    for m in morphs:
        write(str(m)+'.json',m.root)
    return 0
示例#3
0
def main():
    input_data = ju.read(INPUT_JSON)

    structures = input_data.get('structures')
    manifest_file = input_data.get('manifest_file')
    manifest_file = os.path.join(TOP_DIR, manifest_file)

    log_level = input_data.get('log_level', logging.DEBUG)
    logging.getLogger().setLevel(log_level)

    # experiments to exclude
    experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON)

    # get caching object
    cache = VoxelModelCache(manifest_file=manifest_file)

    output_file = os.path.join(OUTPUT_DIR, 'hyperparameters-%s.json' % OPTION)

    results = dict()
    for structure in structures:
        logging.debug("Running cross validation for structure: %s", structure)
        structure_id = get_structure_id(cache, structure)

        results[structure] = fit_structure(cache,
                                           structure_id,
                                           experiments_exclude,
                                           kernel=KERNEL,
                                           model_option=OPTION)

    # write results
    ju.write(output_file, results)
示例#4
0
def make_stimulus_ontology_from_lims(file_name):

    if lq.able_to_connect_to_lims():
        stims = lq.get_stimuli_description()
        stim_ontology = make_stimulus_ontology(stims)
        ju.write(file_name, stim_ontology)
        logging.info("Updated stimulus ontology from LIMS")
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("input_json")
    parser.add_argument("output_json")
    parser.add_argument("--log_level", default=logging.DEBUG)
    parser.add_argument("--input_dataset", default="FC")
    parser.add_argument("--roi_field", default="roi_names")
    parser.add_argument("--output_dataset", default="data")
    args = parser.parse_args()

    logging.getLogger().setLevel(args.log_level)

    input_data = ju.read(args.input_json)
    input_file, output_file = parse_input(input_data)

    # read from "data"
    input_h5 = h5py.File(input_file, "r")
    traces = input_h5[args.input_dataset].value
    roi_names = input_h5[args.roi_field][:]
    input_h5.close()

    dff = calculate_dff(traces)

    # write to "data"
    output_h5 = h5py.File(output_file, "w")
    output_h5[args.output_dataset] = dff
    output_h5[args.roi_field] = roi_names
    output_h5.close()

    output_data = {}

    ju.write(args.output_json, output_data)
示例#6
0
def main():
    module = ags.ArgSchemaParser(schema_type=OptimizeParameters)

    preprocess_results = ju.read(module.args["paths"]["preprocess_results"])
    passive_results = ju.read(module.args["paths"]["passive_results"])
    fit_style_data = ju.read(module.args["paths"]["fit_style"])

    results = optimize(
        hoc_files=module.args["paths"]["hoc_files"],
        compiled_mod_library=module.args["paths"]["compiled_mod_library"],
        morphology_path=module.args["paths"]["swc"],
        features=preprocess_results["features"],
        targets=preprocess_results["target_features"],
        stim_params=StimParams(preprocess_results["stimulus"]),
        passive_results=passive_results,
        fit_type=module.args["fit_type"],
        fit_style_data=fit_style_data,
        seed=module.args["seed"],
        ngen=module.args["ngen"],
        mu=module.args["mu"],
        storage_directory=module.args["paths"]["storage_directory"],
        starting_population=module.args["paths"].get("starting_population",
                                                     None))

    logging.info("Writing optimization output")
    ju.write(module.args["output_json"], results)
示例#7
0
    def to_json(self, file_name=None):
        """JSON serialize object parameters to file or string.

        Parameters
        ----------
        file_name : string, optional (default None)
            Path to .json file containing VoxelModelCache parameters. If None,
            a string will be returned.

        Returns
        -------
        string
            If file_name == None, a string of the JSON serialization is returned.
        """
        params = dict(resolution=self.resolution,
                      cache=self.cache,
                      manifest_file=self.manifest_file,
                      ccf_version=self.ccf_version,
                      base_uri=self.base_uri,
                      version=self.version)

        if file_name is None:
            return json_utilities.write_string(params)

        json_utilities.write(file_name, params)
示例#8
0
    def from_file_name(cls, file_name, cache=True, **kwargs):
        '''Alternative constructor using cache path file_name.

        Parameters
        ----------
        file_name : string
            Path where storage_directories will be saved.
        **kwargs
            Keyword arguments to be supplied to __init__

        Returns
        -------
        cls : instance of GridDataApiPrerelease
        '''
        if os.path.exists(file_name):
            storage_directories = json_utilities.read(file_name)
        else:
            storage_directories = _get_grid_storage_directories(
                cls.GRID_DATA_DIRECTORY)

            if cache:
                Manifest.safe_make_parent_dirs(file_name)
                json_utilities.write(file_name, storage_directories)

        return cls(storage_directories, **kwargs)
示例#9
0
 def test_spike_times(self):
     expected = [
         2.937305,   3.16453 ,   3.24271 ,   4.1622  ,   4.24182 ,
         10.0898  ,  10.132545,  10.176095,  10.2361  ,  10.660655,
         10.762125,  10.863465,  10.93833 ,  11.140815,  11.19246 ,
         11.24553 ,  11.696305,  11.812655,  11.90469 ,  12.056755,
         12.15794 ,  12.233905,  12.47577 ,  12.741295,  12.82861 ,
         12.923175,  18.05068 ,  18.139875,  18.17693 ,  18.221485,
         18.24337 ,  18.39981 ,  18.470705,  18.759675,  18.82183 ,
         18.877475,  18.91033 ,  18.941195,  19.050515,  19.12557 ,
         19.15963 ,  19.188655,  19.226205,  19.29813 ,  19.420665,
         19.47627 ,  19.763365,  19.824225,  19.897995,  19.93155 ,
         20.04916 ,  20.11832 ,  20.148755,  20.18004 ,  20.22173 ,
         20.2433  ,  20.40018 ,  20.470915,  20.759715,  20.82156 ,
         20.866465,  20.90807 ,  20.939175]
     
     bp = BiophysicalPerisomaticApi('http://api.brain-map.org')
     bp.cache_stimulus = True # change to False to not download the large stimulus NWB file
     neuronal_model_id = 472451419    # get this from the web site as above
     bp.cache_data(neuronal_model_id, working_directory='neuronal_model')
     cwd = os.path.realpath(os.curdir)
     print(cwd)
     os.chdir(os.path.join(cwd, 'neuronal_model'))
     manifest = ju.read('manifest.json')
     manifest['biophys'][0]['model_file'][0] = 'manifest_51.json'
     manifest['runs'][0]['sweeps'] = [51]
     ju.write('manifest_51.json', manifest)
     subprocess.call(['nrnivmodl', './modfiles'])
     run(Config().load('manifest_51.json'))
     #os.chdir(cwd)
     nwb_out = NwbDataSet('work/386049444.nwb')
     spikes = nwb_out.get_spike_times(51)
     
     numpy.testing.assert_array_almost_equal(spikes, expected)
def test_write_nan():
    with patch(builtins.__name__ + ".open",
               mock_open(),
               create=True) as mo:
        ju.write('/some/file/test.json', { "thing": float('nan')})
    
    assert 'null' in str(mo().write.call_args_list[0])
示例#11
0
def test_configure_model():
    p = re.compile("(\d+)(.*?)_(.*)_mlin_model_config.json")

    if not os.path.exists(OUT_DIR):
        os.makedirs(OUT_DIR)

    for mcf in MODEL_CONFIG_FILES:
        fname = os.path.basename(mcf)
        m = p.match(fname)
        
        sid, cre, config =  m.groups()

        method_config_file = os.path.join(METHOD_CONFIG_DIR, "%s.json" % config)
        prep_file = os.path.join(PREP_DIR, "%s_preprocessed_dict.json" % sid)
        out_file = os.path.join(OUT_DIR, "%s_%s_model_config.json" % (sid, config))

        if os.path.exists(prep_file):
            print "testing", prep_file
            out_config = configure_model(ju.read(method_config_file), 
                                         ju.read(prep_file))

            ju.write(out_file, out_config)

            test_config = ju.read(mcf)

            cmpdict(test_config['neuron'], out_config['neuron'])
            try:
                cmpdict(test_config['optimizer'], out_config['optimizer'])
            except ComparisonException, e:
                if e.key != "param_fit_names":
                    raise e
                

        else:
            logging.error("preprocessor file %s does not exist" % prep_file)
示例#12
0
    def get_cells(self, file_name=None, require_morphology=False, require_reconstruction=False):
        '''
        Download metadata for all cells in the database and optionally return a
        subset filtered by whether or not they have a morphology or reconstruction.

        Parameters
        ----------
        
        file_name: string
            File name to save/read the cell metadata as JSON.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        require_morphology: boolean
            Filter out cells that have no morphological images.

        require_reconstruction: boolean
            Filter out cells that have no morphological reconstructions.
        '''

        file_name = self.get_cache_path(file_name, self.CELLS_KEY)

        if os.path.exists(file_name):
            cells = json_utilities.read(file_name)
        else:
            cells = self.api.list_cells(False, False)

            if self.cache:
                json_utilities.write(file_name, cells)

        # filter the cells on the way out
        return self.api.filter_cells(cells, require_morphology, require_reconstruction)
示例#13
0
def main():
    module = ags.ArgSchemaParser(schema_type=ModelSelectionParameters)

    swc_path = module.args["paths"]["swc"]
    fit_style_paths = module.args["paths"]["fit_styles"]
    best_fit_json_path = module.args["paths"]["best_fit_json_path"]
    passive = ju.read(module.args["paths"]["passive_results"])
    preprocess = ju.read(module.args["paths"]["preprocess_results"])

    fits = module.args["paths"]["fits"]
    fit_results = ms.fit_info(fits)
    best_fit = ms.select_model(fit_results, module.args["paths"], passive,
                               preprocess["v_baseline"],
                               module.args["noise_1_sweeps"],
                               module.args["noise_2_sweeps"])
    if best_fit is None:
        raise Exception("Failed to find acceptable optimized model")

    logging.info("building fit data")
    fit_style_data = ju.read(
        module.args["paths"]["fit_styles"][best_fit["fit_type"]])
    fit_data = ms.build_fit_data(best_fit["params"], passive, preprocess,
                                 fit_style_data)

    logging.info("writing fit data: %s", best_fit_json_path)
    ju.write(best_fit_json_path, fit_data)

    output = {
        "paths": {
            "fit_json": best_fit_json_path,
        }
    }

    logging.info("writing output json: %s", module.args["output_json"])
    ju.write(module.args["output_json"], output)
示例#14
0
def debug(container_id, local=False, plots=None):
    SCRIPT = "/data/informatics/CAM/analysis/allensdk/allensdk/internal/pipeline_modules/run_observatory_container_thumbnails.py"
    SDK_PATH = "/data/informatics/CAM/analysis/allensdk/"
    OUTPUT_DIR = "/data/informatics/CAM/analysis/containers"

    container_dir = os.path.join(OUTPUT_DIR, str(container_id))

    input_data = []
    for exp in get_container_info(container_id):
        exp_data = robsth.get_input_data(exp['id'])
        exp_input_json = os.path.join(exp_data["output_directory"],
                                      "input.json")
        input_data.append(
            dict(input_json=exp_input_json,
                 output_json=os.path.join(exp_data["output_directory"],
                                          "output.json")))

        Manifest.safe_make_parent_dirs(exp_input_json)
        ju.write(exp_input_json, exp_data)

    run_module(SCRIPT,
               input_data,
               container_dir,
               sdk_path=SDK_PATH,
               pbs=dict(vmem=32,
                        job_name="cthumbs_%d" % container_id,
                        walltime="10:00:00"),
               local=local,
               optional_args=['--types=' + ','.join(plots)] if plots else None)
示例#15
0
def test_1(glif_api, neuronal_model_id):
    glif_api.get_neuronal_model(neuronal_model_id)
    glif_api.cache_stimulus_file('stimulus.nwb')

    neuron_config = glif_api.get_neuron_config()
    json_utilities.write('neuron_config.json', neuron_config)

    ephys_sweeps = glif_api.get_ephys_sweeps()
    json_utilities.write('ephys_sweeps.json', ephys_sweeps)
示例#16
0
def test_1(glif_api, neuronal_model_id):
    glif_api.get_neuronal_model(neuronal_model_id)
    glif_api.cache_stimulus_file('stimulus.nwb')

    neuron_config = glif_api.get_neuron_config()
    json_utilities.write('neuron_config.json', neuron_config)

    ephys_sweeps = glif_api.get_ephys_sweeps()
    json_utilities.write('ephys_sweeps.json', ephys_sweeps)
    def get_cell_specimens(self,
                           file_name=None,
                           ids=None,
                           experiment_container_ids=None,
                           simple=True):
        """ Return cell specimens that have certain properies.
        
        Parameters
        ----------
        file_name: string
            File name to save/read the cell specimens.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        ids: list
            List of cell specimen ids.  

        experiment_container_ids: list
            List of experiment container ids.
            
        simple: boolean
            Whether or not to simplify the dictionary properties returned by this method
            to a more concise subset.
            
        Returns
        -------
        list of dictionaries
        """

        file_name = self.get_cache_path(file_name, self.CELL_SPECIMENS_KEY)

        if os.path.exists(file_name):
            cell_specimens = ju.read(file_name)
        else:
            cell_specimens = self.api.get_cell_metrics()

            if self.cache:
                ju.write(file_name, cell_specimens)

        cell_specimens = self.api.filter_cell_specimens(
            cell_specimens,
            ids=ids,
            experiment_container_ids=experiment_container_ids)

        # drop the thumbnail columns
        if simple:
            mappings = self._get_stimulus_mappings()
            thumbnails = [
                m['item'] for m in mappings
                if m['item_type'] == 'T' and m['level'] == 'R'
            ]
            for cs in cell_specimens:
                for t in thumbnails:
                    del cs[t]

        return cell_specimens
示例#18
0
def write_output(output_dir, acronym, sid, data_dict, suffix=''):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    ouput_json = os.path.join(output_dir, "%s_%s.json" % (acronym, suffix))

    output_data = dict(structure_acronym=acronym, structure_id=sid)
    output_data.update(data_dict)

    ju.write(ouput_json, output_data)
示例#19
0
def mcc(fn_temp_dir):
    storage_dirs = {"111" : os.path.join(fn_temp_dir, "111"),
                    "222" : os.path.join(fn_temp_dir, "222")}

    file_name = os.path.join(fn_temp_dir, 'storage_directories.json')
    json_utilities.write(file_name, storage_dirs)

    manifest_path = os.path.join(fn_temp_dir, 'manifest.json')
    return MouseConnectivityCachePrerelease(
            manifest_file=manifest_path, storage_directories_file_name=file_name)
    def get_experiments(self, dataframe=False, file_name=None, cre=None, injection_structure_ids=None):
        """
        Read a list of experiments that match certain criteria.  If caching is enabled,
        this will save the whole (unfiltered) list of experiments to a file.

        Parameters
        ----------
        
        dataframe: boolean
            Return the list of experiments as a Pandas DataFrame.  If False,
            return a list of dictionaries.  Default False. 

        file_name: string
            File name to save/read the structures table.  If file_name is None, 
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.

        cre: boolean or list
            If True, return only cre-positive experiments.  If False, return only
            cre-negative experiments.  If None, return all experients. If list, return
            all experiments with cre line names in the supplied list. Default None.

        injection_structure_ids: list
            Only return experiments that were injected in the structures provided here.
            If None, return all experiments.  Default None.

        """

        file_name = self.get_cache_path(file_name, self.EXPERIMENTS_KEY)

        if os.path.exists(file_name):
            experiments = json_utilities.read(file_name)
        else:
            experiments = self.api.experiment_source_search(injection_structures='root')
            
            # removing these elements because they are specific to a particular resolution
            for e in experiments:
                del e['num-voxels']
                del e['injection-volume']
                del e['sum']
                del e['name']

            if self.cache:
                self.safe_mkdir(os.path.dirname(file_name))

                json_utilities.write(file_name, experiments)

        # filter the read/downloaded list of experiments
        experiments = self.filter_experiments(experiments, cre, injection_structure_ids)

        if dataframe:
            experiments = pd.DataFrame(experiments)
            experiments.set_index(['id'], inplace=True, drop=False)

        return experiments
示例#21
0
def configured_glif_api(glif_api, neuronal_model_id, neuron_config_file,
                        ephys_sweeps_file):
    glif_api.get_neuronal_model(neuronal_model_id)

    neuron_config = glif_api.get_neuron_config()
    json_utilities.write(neuron_config_file, neuron_config)

    ephys_sweeps = glif_api.get_ephys_sweeps()
    json_utilities.write(ephys_sweeps_file, ephys_sweeps)

    return glif_api
示例#22
0
def configured_glif_api(glif_api, neuronal_model_id, neuron_config_file,
                        ephys_sweeps_file):
    glif_api.get_neuronal_model(neuronal_model_id)

    neuron_config = glif_api.get_neuron_config()
    json_utilities.write(neuron_config_file, neuron_config)

    ephys_sweeps = glif_api.get_ephys_sweeps()
    json_utilities.write(ephys_sweeps_file, ephys_sweeps)

    return glif_api
def main():

    module = ags.ArgSchemaParser(schema_type=MorphologySummaryParameters)
    run_morphology_summary(module.args["pia_transform"],
                           module.args["relative_soma_depth"],
                           module.args["soma_depth"], module.args["swc_file"],
                           module.args["thumbnail_file"],
                           module.args["cortex_thumbnail_file"],
                           module.args["normal_depth_thumbnail_file"],
                           module.args["high_resolution_thumbnail_file"])

    ju.write(module.args["output_json"], {})
示例#24
0
def main():
    """
    Usage:
    python run_sweep_extraction.py
        --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=SweepExtractionParameters)
    output = run_sweep_extraction(**module.args)

    json_utilities.write(module.args["output_json"], output)
示例#25
0
def main():
    args = parse_arguments()

    sweep_list = ju.read(args.sweep_list_file)

    data = find_sweeps(sweep_list)

    ju.write(args.output_file, data)

    if len(errs > 0):
        for err in errs:
            logging.error(err)
        sys.exit(1)
示例#26
0
def main():
    """
    Usage:
    python run_sweep_extraction.py --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=SweepExtractionParameters)
    output = run_sweep_extraction(
        module.args["input_nwb_file"], module.args.get("input_h5_file", None),
        module.args.get("stimulus_ontology_file", None))

    ju.write(module.args["output_json"], output)
示例#27
0
def main():
    module = ags.ArgSchemaParser(schema_type=ConsolidateParameters)

    preprocess_results = ju.read(module.args["paths"]["preprocess_results"])
    is_spiny = preprocess_results["is_spiny"]
    info = ju.read(module.args["paths"]["passive_info"])

    if info["should_run"]:
        fit_1_path = module.args["paths"]["passive_fit_1"]
        fit_1 = ju.read(fit_1_path)

        fit_2_path = module.args["paths"]["passive_fit_2"]
        fit_2 = ju.read(fit_2_path)

        fit_3_path = module.args["paths"]["passive_fit_elec"]
        fit_3 = ju.read(fit_3_path)

        ra, cm1, cm2 = cpf.compare_runs(preprocess_results, fit_1, fit_2,
                                        fit_3)
    else:
        ra = 100.
        cm1 = 1.
        if is_spiny:
            cm2 = 2.
        else:
            cm2 = 1.

    passive = {
        "ra": ra,
        "cm": {
            "soma": cm1,
            "axon": cm1,
            "dend": cm2
        },
        "e_pas": preprocess_results["v_baseline"]
    }

    passive["e_pas"] = preprocess_results["v_baseline"]
    if preprocess_results["has_apical"]:
        passive["cm"]["apic"] = cm2

    passive_results_path = module.args["paths"]["passive_results"]
    ju.write(passive_results_path, passive)

    output = {
        "paths": {
            "passive_results": passive_results_path,
        }
    }

    ju.write(module.args["output_json"], output)
def test_from_json(fn_temp_dir):
    # ------------------------------------------------------------------------
    # tests alternative constructor
    manifest_file = 'manifest.json'
    resolution = 100
    path = os.path.join(fn_temp_dir, 'input.json')

    input_data = dict(manifest_file=manifest_file, resolution=resolution)
    json_utilities.write(path, input_data)

    cache = VoxelModelCache.from_json(path)

    assert cache.manifest_file == manifest_file
    assert cache.resolution == resolution
    def _get_stimulus_mappings(self, file_name=None):
        """ Returns a mapping of which metrics are related to which stimuli. Internal use only. """

        file_name = self.get_cache_path(file_name, self.STIMULUS_MAPPINGS_KEY)

        if os.path.exists(file_name):
            mappings = ju.read(file_name)
        else:
            mappings = self.api.get_stimulus_mappings()

            if self.cache:
                ju.write(file_name, mappings)

        return mappings
示例#30
0
def main():
    """
    Usage:
    > python generate_se_input.py --specimen_id SPECIMEN_ID --cell_dir CELL_DIR
    > python generate_se_input.py --input_nwb_file input_nwb_file --cell_dir CELL_DIR

    """

    kwargs = parse_args()
    se_input = generate_se_input(**kwargs)

    input_json = os.path.join(kwargs["cell_dir"], 'se_input.json')

    ju.write(input_json, se_input)
示例#31
0
文件: run_qc.py 项目: smestern/ipfx
def main():
    """
    Usage:
    python run_qc.py --input_json INPUT_JSON --output_json OUTPUT_JSON


    """
    module = ags.ArgSchemaParser(schema_type=QcParameters)

    output = run_qc(module.args.get("stimulus_ontology_file",
                                    None), module.args["cell_features"],
                    module.args["sweep_features"], module.args["qc_criteria"])

    ju.write(module.args["output_json"], output)
示例#32
0
def main():
    """
    Usage:
    > python generate_fx_input.py --specimen_id SPECIMEN_ID --cell_dir CELL_DIR
    > python generate_fx_input.py --input_nwb_file INPUT_NWB_FILE --cell_dir CELL_DIR

    """

    kwargs = parse_args()
    se_input = generate_se_input(**kwargs)
    cell_dir = kwargs["cell_dir"]
    lu.configure_logger(cell_dir)

    if not os.path.exists(cell_dir):
        os.makedirs(cell_dir)

    ju.write(os.path.join(cell_dir, 'se_input.json'), se_input)

    se_output = run_sweep_extraction(
        se_input["input_nwb_file"], se_input.get("input_h5_file", None),
        se_input.get("stimulus_ontology_file", None))

    ju.write(os.path.join(cell_dir, 'se_output.json'), se_output)

    sp.drop_tagged_sweeps(se_output["sweep_features"])

    qc_input = generate_qc_input(se_input, se_output)
    ju.write(os.path.join(cell_dir, 'qc_input.json'), qc_input)

    qc_output = run_qc(qc_input.get("stimulus_ontology_file",
                                    None), qc_input["cell_features"],
                       qc_input["sweep_features"], qc_input["qc_criteria"])
    ju.write(os.path.join(cell_dir, 'qc_output.json'), qc_output)

    if kwargs["specimen_id"]:
        manual_sweep_states = lq.get_sweep_states(kwargs["specimen_id"])
    elif kwargs["input_nwb_file"]:
        manual_sweep_states = []

    sp.override_auto_sweep_states(manual_sweep_states,
                                  qc_output["sweep_states"])
    sp.assign_sweep_states(qc_output["sweep_states"],
                           se_output["sweep_features"])

    fx_input = generate_fx_input(se_input,
                                 se_output,
                                 cell_dir,
                                 plot_figures=True)

    ju.write(os.path.join(cell_dir, 'fx_input.json'), fx_input)
示例#33
0
    def get_experiments(self,
                        dataframe=False,
                        file_name=None,
                        cre=None,
                        injection_structure_ids=None,
                        age=None,
                        gender=None,
                        workflow_state=None,
                        workflows=None,
                        project_code=None):
        """Read a list of experiments.

        If caching is enabled, this will save the whole (unfiltered) list of
        experiments to a file.

        Parameters
        ----------
        dataframe: boolean
            Return the list of experiments as a Pandas DataFrame.  If False,
            return a list of dictionaries.  Default False.

        file_name: string
            File name to save/read the structures table.  If file_name is None,
            the file_name will be pulled out of the manifest.  If caching
            is disabled, no file will be saved. Default is None.
        """
        file_name = self.get_cache_path(file_name,
                                        self.EXPERIMENTS_PRERELEASE_KEY)

        if os.path.exists(file_name):
            experiments = json_utilities.read(file_name)
        else:
            experiments = self.api.get_experiments()

        if self.cache:
            Manifest.safe_make_parent_dirs(file_name)
            json_utilities.write(file_name, experiments)

        # filter the read/downloaded list of experiments
        experiments = self.filter_experiments(experiments, cre,
                                              injection_structure_ids, age,
                                              gender, workflow_state,
                                              workflows, project_code)

        if dataframe:
            experiments = pd.DataFrame(experiments)
            experiments.set_index(['id'], inplace=True, drop=False)

        return experiments
示例#34
0
def QueryAPI(model,
             criteriaString,
             includeString=None,
             optionsString=None,
             writeOut=False):
    # Send a query to the Allen API, and assemble results

    # Initiate RMA API for Allen data retrieval
    api = RmaApi()

    # Settings for retrieval
    rows = []
    blockSize = 2000
    done = False
    startRow = 0
    # for i in range(0, total_rows, blockSize):

    while not done:
        print("Row %d, attempting to retrieve %d rows..." %
              (startRow, blockSize))

        tot_rows = len(rows)

        # apiQueryPartial = partial(api.model_query,model=model,criteria=criteriaString,
        # startRow=startRow,num_rows=blockSize)

        rows += api.model_query(model=model,
                                criteria=criteriaString,
                                include=includeString,
                                options=optionsString,
                                start_row=startRow,
                                num_rows=blockSize)

        numRows = len(
            rows) - tot_rows  # additional rows retrieved on running the query
        startRow += numRows

        print("%d rows retrieved." % numRows)

        # Check if we're at the end of the road
        if numRows == 0 or numRows < blockSize:
            done = True

        # Write out the results as they come in, if requested
        if writeOut:
            json_utilities.write(json_file_name, rows)
            print("Wrote to %s" % json_file_name)

    return rows
示例#35
0
def QueryAPI(model,
             criteriaString,
             includeString="",
             optionsString="",
             writeOut=[]):
    # Initiate RMA API for Allen data retrieval
    api = RmaApi()
    # Settings for retrieval
    rows = []
    blockSize = 2000
    done = False
    startRow = 0
    # for i in range(0, total_rows, blockSize):

    while not done:
        print "Row %d, attempting to retrieve %d rows..." % (startRow,
                                                             blockSize)

        tot_rows = len(rows)
        if len(includeString) == 0:
            rows += api.model_query(model=model,
                                    criteria=criteriaString,
                                    options=optionsString,
                                    start_row=startRow,
                                    num_rows=blockSize)
        else:
            rows += api.model_query(model=model,
                                    criteria=criteriaString,
                                    include=includeString,
                                    options=optionsString,
                                    start_row=startRow,
                                    num_rows=blockSize)

        numRows = len(
            rows) - tot_rows  # additional rows retrieved on running the query
        startRow += numRows

        print "%d rows retrieved." % numRows

        # Check if we're at the end of the road
        if numRows == 0 or numRows < blockSize:
            done = True

        # Write out the results as they come in, if requested
        if isinstance(writeOut, basestring):
            json_utilities.write(json_file_name, rows)
            print "Wrote to %s" % json_file_name

    return rows
示例#36
0
def main():
    """
    Usage:
    > python generate_pipeline_input.py --specimen_id SPECIMEN_ID --cell_dir CELL_DIR
    > python generate_pipeline_input.py --input_nwb_file INPUT_NWB_FILE --cell_dir CELL_DIR

    """

    kwargs = parse_args()
    pipe_input = generate_pipeline_input(**kwargs)
    cell_dir = kwargs["cell_dir"]

    input_json = os.path.join(cell_dir, 'pipeline_input.json')

    ju.write(input_json, pipe_input)
def main():
    module = ags.ArgSchemaParser(schema_type=PopulationSelectionParameters)
    print module.args

    fits = module.args["paths"]["fits"]
    populations = ps.population_info(fits)
    starting_populations = ps.select_starting_population(populations)

    output = {
        "paths": {
            "starting_populations": starting_populations,
        }
    }

    ju.write(module.args["output_json"], output)
示例#38
0
def test_download():
    if os.path.exists(OUTPUT_DIR):
        shutil.rmtree(OUTPUT_DIR)
        
    os.makedirs(OUTPUT_DIR)

    glif_api = GlifApi()
    glif_api.get_neuronal_model(NEURONAL_MODEL_ID)
    glif_api.cache_stimulus_file(os.path.join(OUTPUT_DIR, '%d.nwb' % NEURONAL_MODEL_ID))

    neuron_config = glif_api.get_neuron_config()
    json_utilities.write(os.path.join(OUTPUT_DIR, '%d_neuron_config.json' % NEURONAL_MODEL_ID), neuron_config)

    ephys_sweeps = glif_api.get_ephys_sweeps()
    json_utilities.write(os.path.join(OUTPUT_DIR, 'ephys_sweeps.json'), ephys_sweeps)
示例#39
0
    def get_ephys_sweeps(self, specimen_id, file_name=None):
        """
        Download sweep metadata for a single cell specimen.  

        Parameters
        ----------
        
        specimen_id: int
             ID of a cell.
        """

        file_name = self.get_cache_path(file_name, self.EPHYS_SWEEPS_KEY, specimen_id)
        
        if os.path.exists(file_name):
            sweeps = json_utilities.read(file_name)
        else:
            sweeps = self.api.get_ephys_sweeps(specimen_id)

            if self.cache:
                json_utilities.write(file_name, sweeps)

        return sweeps
示例#40
0
def test_optimize_neuron():
    p = re.compile("(\d+)_(.*)_model_config.json")

    if not os.path.exists(OUT_DIR):
        os.makedirs(OUT_DIR)

    for model_config_file in MODEL_CONFIG_FILES:
        logging.info("testing %s" % model_config_file)

        fname = os.path.basename(model_config_file)
        m = p.match(fname)

        sid, config = m.groups()
        data_config_file = DATA_CONFIG_PATTERN % int(sid)

        data_config = ju.read(data_config_file)
        nwb_file = data_config["filename"]
        sweep_list = data_config["sweeps"].values()

        model_config = ju.read(model_config_file)

        #DBG
        model_config['optimizer']['inner_iterations'] = 1
        model_config['optimizer']['outer_iterations'] = 1
        #DBG
        
        sweep_index = { s['sweep_number']:s for s in sweep_list }    

        optimizer, best_param, begin_param = optimize_neuron(model_config, sweep_index)

        out_file = os.path.join(OUT_DIR, "%s_%s_neuron_config.json" % (sid, config))
        ju.write(out_file, optimizer.experiment.neuron.to_dict())

        out_config_file = os.path.join(OUT_DIR, "%s_%s_optimized_model_config.json" % (sid, config))
        ju.write(out_config_file, {
                'optimizer': optimizer.to_dict(),
                'neuron': optimizer.experiment.neuron.to_dict()
                })
示例#41
0
def test_preprocess_neuron():
    logging.getLogger().setLevel(logging.DEBUG)
    p = re.compile("(\d+)_data_config.json")
    dt = 5e-05
    bessel = { 'N': 4, 'Wn': .1 }
    cut = 0

    if not os.path.exists(OUT_DIR):
        os.makedirs(OUT_DIR)


    for data_config_file in DATA_CONFIG_FILES:
        logging.info("testing %s" % data_config_file)
        fname = os.path.basename(data_config_file)
        m = p.match(fname)

        sid = m.groups()

        test_data_file = os.path.join(TEST_DIR, "%s_preprocessed_dict.json" % sid)

        if not os.path.exists(test_data_file):
            logging.warning("no test file %s" % test_data_file)
            continue

        out_file = os.path.join(OUT_DIR, "%s_preprocessed_dict.json" % sid)

        data_config = ju.read(data_config_file)
        nwb_file = data_config["filename"]
        sweep_list = data_config["sweeps"].values()

        d = preprocess_neuron(nwb_file, sweep_list, dt, cut, bessel)

        dictionary = ju.read(test_data_file)

        ju.write(out_file, d)

        errs = []
        assert_equal(d['El'], 0.0, 'El', errs)
        assert_equal(d['El_reference'], dictionary['El']['El_noise']['measured']['mean'], 'El_reference', errs)
        assert_equal(d['deltaV'], None, 'deltaV', errs)
        assert_equal(d['dt'], dictionary['dt_used_for_preprocessor_calculations'], 'dt', errs)
        assert_equal(d['R_input'], dictionary['resistance']['R_lssq_Wrest']['mean'], 'R_input', errs)
        assert_equal(d['C'], dictionary['capacitance']['C_lssq_Wrest']['mean'], 'C', errs)
        assert_equal(d['th_inf'], dictionary['th_inf']['via_Vmeasure']['from_zero'], 'th_inf', errs)
        assert_equal(d['th_adapt'], dictionary['th_adapt']['from_95percentile_noise']['deltaV'], 'th_adapt', errs)
        assert_equal(d['spike_cut_length'], dictionary['spike_cutting']['NOdeltaV']['cut_length'], 'spike_cut_length', errs)
        assert_equal(d['spike_cutting_intercept'], dictionary['spike_cutting']['NOdeltaV']['intercept'], 'spike_cutting_intercept', errs)
        assert_equal(d['spike_cutting_slope'], dictionary['spike_cutting']['NOdeltaV']['slope'], 'spike_cutting_slope', errs)
        assert_equal(d['asc_amp_array'], dictionary['asc']['amp'], 'asc_amp_array', errs)
        assert_equal(d['asc_tau_array'], 1./np.array(dictionary['asc']['k']), 'asc_tau_array', errs)

        nlp = d['nonlinearity_parameters']
        assert_equal(nlp['line_param_RV_all'], dictionary['nonlinearity_parameters']['line_param_RV_all'], 'line_param_RV_all', errs)
        assert_equal(nlp['line_param_ElV_all'], dictionary['nonlinearity_parameters']['line_param_ElV_all'], 'line_param_ElV_all', errs)

        ta = d['threshold_adaptation']
        assert_equal(ta['a_spike_component_of_threshold'], dictionary['threshold_adaptation']['a_spike_component_of_threshold'], 'a_spike', errs)
        assert_equal(ta['b_spike_component_of_threshold'], dictionary['threshold_adaptation']['b_spike_component_of_threshold'], 'b_spike', errs) 
        assert_equal(ta['a_voltage_component_of_threshold'], dictionary['threshold_adaptation']['a_voltage_component_of_threshold'], 'a_voltage', errs)
        assert_equal(ta['b_voltage_component_of_threshold'], dictionary['threshold_adaptation']['b_voltage_component_of_threshold'], 'b_voltage', errs) 

        mlin = d['MLIN']
        assert_equal(mlin['var_of_section'], dictionary['MLIN']['var_of_section'], 'var_of_section', errs)
        assert_equal(mlin['sv_for_expsymm'],  dictionary['MLIN']['sv_for_expsymm'], 'sv_for_expsymm', errs)
        assert_equal(mlin['tau_from_AC'], dictionary['MLIN']['tau_from_AC'], 'tau_from_AC', errs)

        if len(errs) > 0:
            for err in errs:
                logging.error(err)
            raise Exception("Preprocessor outputs did not match.")
示例#42
0
#===============================================================================

from allensdk.api.queries.glif_api import GlifApi
from allensdk.core.cell_types_cache import CellTypesCache
import allensdk.core.json_utilities as json_utilities

neuronal_model_id = 566302806

# download model metadata
glif_api = GlifApi()
nm = glif_api.get_neuronal_models_by_id([neuronal_model_id])[0]

# download the model configuration file
nc = glif_api.get_neuron_configs([neuronal_model_id])[neuronal_model_id]
neuron_config = glif_api.get_neuron_configs([neuronal_model_id])
json_utilities.write('neuron_config.json', neuron_config)

# download information about the cell
ctc = CellTypesCache()
ctc.get_ephys_data(nm['specimen_id'], file_name='stimulus.nwb')
ctc.get_ephys_sweeps(nm['specimen_id'], file_name='ephys_sweeps.json')

#===============================================================================
# example 2
#===============================================================================

import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron

# initialize the neuron
neuron_config = json_utilities.read('neuron_config.json')['566302806']
示例#43
0
文件: cache.py 项目: FloFra/AllenSDK
 def wrap(self, fn, path, cache,
          save_as_json=True,
          return_dataframe=False,
          index=None,
          rename=None,
          **kwargs):
     '''make an rma query, save it and return the dataframe.
     
     Parameters
     ----------
     fn : function reference
         makes the actual query using kwargs.
     path : string
         where to save the data
     cache : boolean
         True will make the query, False just loads from disk
     save_as_json : boolean, optional
         True (default) will save data as json, False as csv
     return_dataframe : boolean, optional
         True will cast the return value to a pandas dataframe, False (default) will not 
     index : string, optional
         column to use as the pandas index
     rename : list of string tuples, optional
         (new, old) columns to rename
     kwargs : objects
         passed through to the query function
     
     Returns
     -------
     dict or DataFrame
         data type depends on return_dataframe option.
     
     Notes
     -----
     Column renaming happens after the file is reloaded for json
     '''
     if cache == True:
         json_data = fn(**kwargs)
         
         if save_as_json == True:
             ju.write(path, json_data)
         else:
             df = pd.DataFrame(json_data)
             self.rename_columns(df, rename)
             
             if index is not None:
                 df.set_index([index], inplace=True)
     
             df.to_csv(path)
 
     # read it back in
     if save_as_json == True:
         if return_dataframe == True:
             data = pj.read_json(path, orient='records')
             self.rename_columns(data, rename)
             if index != None:
                 data.set_index([index], inplace=True)
         else:
             data = ju.read(path)
     elif return_dataframe == True:
         data = pd.DataFrame.from_csv(path)
     else:
         raise ValueError('save_as_json=False cannot be used with return_dataframe=False')
     
     return data
示例#44
0
from allensdk.api.queries.glif_api import GlifApi
import allensdk.core.json_utilities as json_utilities

neuronal_model_id = 472423251

glif_api = GlifApi()
glif_api.get_neuronal_model(neuronal_model_id)
glif_api.cache_stimulus_file('stimulus.nwb')

neuron_config = glif_api.get_neuron_config()
json_utilities.write('neuron_config.json', neuron_config)

ephys_sweeps = glif_api.get_ephys_sweeps()
json_utilities.write('ephys_sweeps.json', ephys_sweeps)