示例#1
0
def test_my_default_nested_parser():
    input_data = {
        'a': 5
    }
    argschema.ArgSchemaParser(input_data=input_data,
                              schema_type=MySchema2,
                              args=[])
示例#2
0
def main():

    logging.basicConfig(
        format='%(asctime)s - %(process)s - %(levelname)s - %(message)s')

    args = sys.argv[1:]
    try:
        parser = argschema.ArgSchemaParser(
            args=args,
            schema_type=InputSchema,
            output_schema_type=OutputSchema,
        )
        logging.info('Input successfully parsed')
    except marshmallow.exceptions.ValidationError as err:
        logging.error('Parsing failure')
        print(err)
        raise err

    try:
        output = write_behavior_nwb(parser.args['session_data'],
                                    parser.args['output_path'])
        logging.info('File successfully created')
    except Exception as err:
        logging.error('NWB write failure')
        print(err)
        raise err

    write_or_print_outputs(output, parser)
示例#3
0
def main():
    module = ags.ArgSchemaParser(schema_type=OptimizeParameters)

    preprocess_results = ju.read(module.args["paths"]["preprocess_results"])
    passive_results = ju.read(module.args["paths"]["passive_results"])
    fit_style_data = ju.read(module.args["paths"]["fit_style"])

    results = optimize(
        hoc_files=module.args["paths"]["hoc_files"],
        compiled_mod_library=module.args["paths"]["compiled_mod_library"],
        morphology_path=module.args["paths"]["swc"],
        features=preprocess_results["features"],
        targets=preprocess_results["target_features"],
        stim_params=StimParams(preprocess_results["stimulus"]),
        passive_results=passive_results,
        fit_type=module.args["fit_type"],
        fit_style_data=fit_style_data,
        seed=module.args["seed"],
        ngen=module.args["ngen"],
        mu=module.args["mu"],
        storage_directory=module.args["paths"]["storage_directory"],
        starting_population=module.args["paths"].get("starting_population",
                                                     None))

    logging.info("Writing optimization output")
    ju.write(module.args["output_json"], results)
def test_simple_description():
    d =    {
            'a': "hello",
            'b': 1,
            'd': [1, 5, 4]
        }
    mod = argschema.ArgSchemaParser(input_data = d, schema_type=MyShorterExtension)
示例#5
0
def test_SessionUploadSchema(tmpdir):
    src_file = Path(tmpdir) / 'src.csv'
    src_file.touch()

    dst_file = Path(tmpdir) / 'dst.csv'

    output_json = Path(tmpdir) / 'output.json'

    test_data = {
        'files': [{
            'source': str(src_file),
            'destination': str(dst_file),
            'key': ''
        }],
        'output_json':
        str(output_json)
    }

    parser = argschema.ArgSchemaParser(
        test_data,
        schema_type=SessionUploadInputSchema,
        output_schema_type=SessionUploadOutputSchema,
        args=[])

    # Mocking the functionality of the main method
    shutil.copy(src_file, dst_file)
    parser.output({'files': test_data['files']})
def main():
    module = ags.ArgSchemaParser(schema_type=MergeParameters)
    project = module.args["project"]
    gmm_types = module.args["gmm_types"]

    sub_dirs = [
        s.format(project) for s in ["all_{:s}", "exc_{:s}", "inh_{:s}"]
    ]
    piecewise_components = [2, 2, 3]
    for sub_dir, gmm_type, pw_comp in zip(sub_dirs, gmm_types,
                                          piecewise_components):
        print("merging for ", sub_dir, "with", gmm_type)
        merge_info, new_labels, tau_merged, _ = entropy_merges(
            sub_dir, project, gmm_type=gmm_type, piecewise_components=pw_comp)
        print(merge_info)
        data = pd.read_csv(os.path.join(
            sub_dir, "sparse_pca_components_{:s}.csv".format(project)),
                           index_col=0)
        new_labels, tau_merged, _, _ = order_new_labels(
            new_labels, tau_merged, data)

        np.savetxt(os.path.join(sub_dir, "post_merge_proba.txt"), tau_merged)
        np.save(os.path.join(sub_dir, "post_merge_cluster_labels.npy"),
                new_labels)
        df = pd.read_csv(
            os.path.join(sub_dir, "all_tsne_coords_{:s}.csv".format(project)))
        df["clustering_3"] = new_labels
        df.to_csv(
            os.path.join(sub_dir,
                         "all_tsne_coords_{:s}_plus.csv".format(project)))
示例#7
0
def main():

    logging.basicConfig(
        format='%(asctime)s - %(process)s - %(levelname)s - %(message)s')

    # TODO replace with argschema implementation of multisource parser
    remaining_args = sys.argv[1:]
    input_data = {}
    if '--get_inputs_from_lims' in sys.argv:
        lims_parser = argparse.ArgumentParser(add_help=False)
        lims_parser.add_argument('--host', type=str, default='http://lims2')
        lims_parser.add_argument('--job_queue', type=str, default=None)
        lims_parser.add_argument('--strategy', type=str, default=None)
        lims_parser.add_argument('--image_series_id', type=int, default=None)
        lims_parser.add_argument('--output_root', type=str, default=None)

        lims_args, remaining_args = lims_parser.parse_known_args(
            remaining_args)
        remaining_args = [
            item for item in remaining_args if item != '--get_inputs_from_lims'
        ]
        input_data = get_inputs_from_lims(**lims_args.__dict__)

    parser = argschema.ArgSchemaParser(
        args=remaining_args,
        input_data=input_data,
        schema_type=InputParameters,
        output_schema_type=OutputParameters,
    )

    output = run_grid(parser.args)
    write_or_print_outputs(output, parser)
def main():
    module = ags.ArgSchemaParser(schema_type=PassiveFittingParameters)

    info = ju.read(module.args["paths"]["passive_info"])
    if not info["should_run"]:
        ju.write(module.args["output_json"], { "paths": {} })
        return

    swc_path = module.args["paths"]["swc"].encode('ascii', 'ignore')
    up_data = np.loadtxt(module.args["paths"]["up"])
    down_data = np.loadtxt(module.args["paths"]["down"])
    passive_fit_type = module.args["passive_fit_type"]
    results_file = module.args["paths"]["passive_fit_results_file"]


    npf.initialize_neuron(swc_path, module.args["paths"]["fit"])

    if passive_fit_type == npf.PASSIVE_FIT_1:
        results = npf.passive_fit_1(info, up_data, down_data)
    elif passive_fit_type == npf.PASSIVE_FIT_2:
        results = npf.passive_fit_2(info, up_data, down_data)
    elif passive_fit_type == npf.PASSIVE_FIT_ELEC:
        results = npf.passive_fit_elec(info, up_data, down_data)
    else:
        raise Exception("unknown passive fit type: %s" % passive_fit_type)

    ju.write(results_file, results)

    ju.write(module.args["output_json"], { "paths": { passive_fit_type: results_file } })
示例#9
0
def main():
    """
    Convert nwb version 1 to nwb version 2

    Usage:
    -----
    python run_nwb1_to_nwb2_conversion.py --input_nwb_file /path/to/version1/file.nwb
    or
    python -m ipfx.bin.run_nwb1_to_nwb2_conversion --input_nwb_file /path/to/version1/file.nwb

    Will produce nwb version 2 file in the same folder
    """

    module = args.ArgSchemaParser(schema_type=ConvertNWBParameters)

    nwb1_file_name = module.args["input_nwb_file"]
    dir_name = os.path.dirname(nwb1_file_name)
    base_name = os.path.basename(nwb1_file_name)

    nwb2_file_name = make_nwb2_file_name(dir_name, base_name)

    if not os.path.exists(nwb1_file_name):
        raise ValueError(f"The file {nwb1_file_name} does not exist.")

    NWBConverter(
        nwb1_file_name,
        nwb2_file_name,
    )
示例#10
0
def main():
    module = ags.ArgSchemaParser(schema_type=ModelSelectionParameters)

    swc_path = module.args["paths"]["swc"]
    fit_style_paths = module.args["paths"]["fit_styles"]
    best_fit_json_path = module.args["paths"]["best_fit_json_path"]
    passive = ju.read(module.args["paths"]["passive_results"])
    preprocess = ju.read(module.args["paths"]["preprocess_results"])

    fits = module.args["paths"]["fits"]
    fit_results = ms.fit_info(fits)
    best_fit = ms.select_model(fit_results, module.args["paths"], passive,
                               preprocess["v_baseline"],
                               module.args["noise_1_sweeps"],
                               module.args["noise_2_sweeps"])
    if best_fit is None:
        raise Exception("Failed to find acceptable optimized model")

    logging.info("building fit data")
    fit_style_data = ju.read(
        module.args["paths"]["fit_styles"][best_fit["fit_type"]])
    fit_data = ms.build_fit_data(best_fit["params"], passive, preprocess,
                                 fit_style_data)

    logging.info("writing fit data: %s", best_fit_json_path)
    ju.write(best_fit_json_path, fit_data)

    output = {
        "paths": {
            "fit_json": best_fit_json_path,
        }
    }

    logging.info("writing output json: %s", module.args["output_json"])
    ju.write(module.args["output_json"], output)
示例#11
0
def test_david_example(tmpdir_factory):
    file_ = tmpdir_factory.mktemp('test').join('testinput.json')
    file_.write(json.dumps(david_data))
    args = ['--input_json', str(file_)]
    mod = argschema.ArgSchemaParser(schema_type=PopulationSelectionParameters,args=args)
    print(mod.args)
    assert(len(mod.args['paths']['fits'])==2)
示例#12
0
def main():
    module = ags.ArgSchemaParser(schema_type=CollectFeatureVectorParameters)

    if module.args["input"]:  # input file should be list of IDs on each line
        with open(module.args["input"], "r") as f:
            ids = [int(line.strip("\n")) for line in f]
        run_feature_vector_extraction(ids=ids, **module.args)
    else:
        run_feature_vector_extraction(**module.args)
示例#13
0
 def test_training_schema_fails_nonexistent_data(self, s3_file):
     args = {
         "training_data": "s3://myschematest/does/not/exist.txt",
         "test_data": s3_file
     }
     with pytest.raises(mm.ValidationError, match=r".*does not exist"):
         argschema.ArgSchemaParser(input_data=args,
                                   schema_type=train.TrainingSchema,
                                   args=[])
示例#14
0
 def test_training_schema_fails_bad_optimizer(self, s3_file):
     args = {
         "training_data": s3_file,
         "test_data": s3_file,
         "optimizer": "CoolOptimizer"
     }
     with pytest.raises(mm.ValidationError, match=r"Must be one of"):
         argschema.ArgSchemaParser(input_data=args,
                                   schema_type=train.TrainingSchema,
                                   args=[])
示例#15
0
 def test_training_schema_fails_bad_model(self, s3_file):
     args = {
         "training_data": s3_file,
         "test_data": s3_file,
         "model": "SomeRandomAlgorithm"
     }
     with pytest.raises(mm.ValidationError, match=r"Must be one of"):
         argschema.ArgSchemaParser(input_data=args,
                                   schema_type=train.TrainingSchema,
                                   args=[])
def main():

    module = args.ArgSchemaParser(schema_type=ConvertNWBParameters)

    nwb1_file_name = module.args["input_nwb_file"]
    nwb2_file_name = make_nwb2_file_name(nwb1_file_name)

    if not os.path.exists(nwb1_file_name):
        raise ValueError(f"The file {nwb1_file_name} does not exist.")

    convert(nwb1_file=nwb1_file_name, nwb2_file=nwb2_file_name)
def main():

    module = ags.ArgSchemaParser(schema_type=MorphologySummaryParameters)
    run_morphology_summary(module.args["pia_transform"],
                           module.args["relative_soma_depth"],
                           module.args["soma_depth"], module.args["swc_file"],
                           module.args["thumbnail_file"],
                           module.args["cortex_thumbnail_file"],
                           module.args["normal_depth_thumbnail_file"],
                           module.args["high_resolution_thumbnail_file"])

    ju.write(module.args["output_json"], {})
示例#18
0
def main():
    """
    Usage:
    python run_sweep_extraction.py
        --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=SweepExtractionParameters)
    output = run_sweep_extraction(**module.args)

    json_utilities.write(module.args["output_json"], output)
示例#19
0
def main():
    """
    Usage:
    python run_sweep_extraction.py --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=SweepExtractionParameters)
    output = run_sweep_extraction(
        module.args["input_nwb_file"], module.args.get("input_h5_file", None),
        module.args.get("stimulus_ontology_file", None))

    ju.write(module.args["output_json"], output)
示例#20
0
def main():
    module = ags.ArgSchemaParser(schema_type=ConsolidateParameters)

    preprocess_results = ju.read(module.args["paths"]["preprocess_results"])
    is_spiny = preprocess_results["is_spiny"]
    info = ju.read(module.args["paths"]["passive_info"])

    if info["should_run"]:
        fit_1_path = module.args["paths"]["passive_fit_1"]
        fit_1 = ju.read(fit_1_path)

        fit_2_path = module.args["paths"]["passive_fit_2"]
        fit_2 = ju.read(fit_2_path)

        fit_3_path = module.args["paths"]["passive_fit_elec"]
        fit_3 = ju.read(fit_3_path)

        ra, cm1, cm2 = cpf.compare_runs(preprocess_results, fit_1, fit_2,
                                        fit_3)
    else:
        ra = 100.
        cm1 = 1.
        if is_spiny:
            cm2 = 2.
        else:
            cm2 = 1.

    passive = {
        "ra": ra,
        "cm": {
            "soma": cm1,
            "axon": cm1,
            "dend": cm2
        },
        "e_pas": preprocess_results["v_baseline"]
    }

    passive["e_pas"] = preprocess_results["v_baseline"]
    if preprocess_results["has_apical"]:
        passive["cm"]["apic"] = cm2

    passive_results_path = module.args["paths"]["passive_results"]
    ju.write(passive_results_path, passive)

    output = {
        "paths": {
            "passive_results": passive_results_path,
        }
    }

    ju.write(module.args["output_json"], output)
示例#21
0
文件: run_qc.py 项目: smestern/ipfx
def main():
    """
    Usage:
    python run_qc.py --input_json INPUT_JSON --output_json OUTPUT_JSON


    """
    module = ags.ArgSchemaParser(schema_type=QcParameters)

    output = run_qc(module.args.get("stimulus_ontology_file",
                                    None), module.args["cell_features"],
                    module.args["sweep_features"], module.args["qc_criteria"])

    ju.write(module.args["output_json"], output)
def main():
    module = ags.ArgSchemaParser(schema_type=PopulationSelectionParameters)
    print module.args

    fits = module.args["paths"]["fits"]
    populations = ps.population_info(fits)
    starting_populations = ps.select_starting_population(populations)

    output = {
        "paths": {
            "starting_populations": starting_populations,
        }
    }

    ju.write(module.args["output_json"], output)
示例#23
0
def main():
    global files
    global ids
    module = ags.ArgSchemaParser(schema_type=CollectFeatureVectorParameters)

    path = module.args["input"]

    no = 0
    # r=root, d=directories, f = files
    for r, d, f in os.walk(path):
        for file in f:
            if '.nwb' in file:
                files.append(os.path.join(r, file))
                ids.append(no)
                no += 1

    run_feature_vector_extraction(ids=ids, files=files, **module.args)
def main():
    """Main sequence of pre-processing and passive fitting"""

    # This argschema package reads arguments from a JSON file
    module = ags.ArgSchemaParser(schema_type=PreprocessorParameters,
                                 logger_name=None)

    nwb_path = module.args["paths"][
        "nwb"]  # nwb - neurodata without borders (ephys data)
    swc_path = module.args["paths"]["swc"]  # swc - morphology data
    storage_directory = module.args["paths"]["storage_directory"]

    try:
        paths, results, passive_info, s1_tasks, s2_tasks = \
            preprocess(data_set=NwbDataSet(nwb_path),
                       swc_data=pd.read_table(swc_path, sep='\s+', comment='#', header=None),
                       dendrite_type_tag=module.args["dendrite_type_tag"],
                       sweeps=module.args["sweeps"],
                       bridge_avg=module.args["bridge_avg"],
                       storage_directory=storage_directory)
    except NoUsableSweepsException as e:
        ju.write(module.args["output_json"], {'error': e.message})
        return

    preprocess_results_path = os.path.join(storage_directory,
                                           "preprocess_results.json")
    ju.write(preprocess_results_path, results)

    passive_info_path = os.path.join(storage_directory, "passive_info.json")
    ju.write(passive_info_path, passive_info)

    paths.update({
        "swc": swc_path,
        "nwb": nwb_path,
        "storage_directory": storage_directory,
        "preprocess_results": preprocess_results_path,
        "passive_info": passive_info_path,
    })

    output = {
        "paths": paths,
        "stage_1_task_list": s1_tasks,
        "stage_2_task_list": s2_tasks,
    }

    ju.write(module.args["output_json"], output)
示例#25
0
def main():
    global files
    global ids
    module = ags.ArgSchemaParser(schema_type=CollectFeatureVectorParameters)

    path = module.args["input"]

    no = 0
    # r=root, d=directories, f = files
    for r, d, f in os.walk(path):
        for file in f:
            if '.nwb' in file:
                files.append(os.path.join(r, file))
                ids.append(no)
                no += 1
    np.savetxt('test.csv', np.array([files, ids]), delimiter=',', fmt="%s")
    run_feature_vector_extraction(ids=ids, nfiles=files, **module.args)
def test_annotation_job_schema_make_objects(upload_manifest, context,
                                            tmp_path):
    """Test that the objects are properly loaded and turned into
    appropriate objects."""
    input_dict = {
        "slapp_upload_manifest_path": upload_manifest,
        "annotation_output_manifest": str(tmp_path),
        "labeling_project_key": "astley",
        "annotation_id_key": "id",
        "output_location": str(tmp_path)
    }
    with context:
        result = argschema.ArgSchemaParser(
            schema_type=AnnotationIngestJobInput,
            input_data=input_dict,
            args=[])
        assert "manifest_data" in result.args
示例#27
0
def main():
    """
    Usage:
    python run_feature_extraction.py --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=FeatureExtractionParameters)

    feature_data = run_feature_extraction(
        module.args["input_nwb_file"],
        module.args.get("stimulus_ontology_file",
                        None), module.args["output_nwb_file"],
        module.args.get("qc_fig_dir", None), module.args["sweep_features"],
        module.args["cell_features"])

    ju.write(module.args["output_json"], feature_data)
示例#28
0
def main():
    """
    Usage:
    python run_pipeline.py --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=PipelineParameters)

    output = run_pipeline(
        module.args["input_nwb_file"],
        module.args["output_nwb_file"],
        module.args.get("stimulus_ontology_file", None),
        module.args.get("qc_fig_dir", None),
        module.args.get("qc_criteria", None),
        module.args.get("manual_sweep_states", None),
        module.args.get("write_spikes", None),
    )

    json_utilities.write(module.args["output_json"], output)
示例#29
0
def main():
    """
    Usage:
    python run_pipeline.py --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=PipelineParameters)

    output = run_pipeline(module.args["input_nwb_file"],
                          module.args.get("input_h5_file", None),
                          module.args["output_nwb_file"],
                          module.args.get("stimulus_ontology_file", None),
                          module.args.get("qc_fig_dir", None),
                          module.args.get("qc_criteria", None),
                          module.args.get("manual_sweep_states", None))

    ju.write(module.args["output_json"], output)

    lu.log_pretty_header("Analysis completed!", level=1)
示例#30
0
    def load_hdf5(self):
        """
        Opens hdf5 files and loads its datasets
        """
        #Get input data from the json file or command line
        data = argschema.ArgSchemaParser(schema_type=TopSchema)
        input_args = data.args

        #Open 2P video hdf5 file and load its content
        video_file = h5py.File(input_args['video']['uri'], 'r')
        self.buffer_video_frame = video_file.get(
            input_args['video']['hdf5']['dataset'][0])

        #open segemenation file and load its datasets
        mask_file = h5py.File(input_args['segmentation']['uri'], 'r')

        self.masks = mask_file.get(
            input_args['segmentation']['hdf5']['dataset'][0])
        self.masks = self.masks[:, :, :]
        self.masks = self.masks.transpose(0, 2, 1)

        offset = mask_file.get(
            input_args['segmentation']['hdf5']['dataset'][1])
        self.offset_x = offset['x']
        self.offset_y = offset['y']

        size = mask_file.get(input_args['segmentation']['hdf5']['dataset'][2])
        self.size_x = size['x']
        self.size_y = size['y']

        mask_file.close()

        #Open ROI Trace hdf5 file and load its content
        trace_file = h5py.File(input_args['traces']['uri'], 'r')
        self.traces = trace_file.get(
            input_args['traces']['hdf5']['dataset'][0])
        self.traces = self.traces[:, :]
        self.traces = np.asarray(self.traces, dtype=np.float16)
        self.trace_min = np.min(self.traces)
        self.trace_max = np.max(self.traces)
        trace_file.close()