Ejemplo n.º 1
0
def main():
    """Main entry point for running sg-prototype."""
    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)
    output = {}
    # YOUR STUFF GOES HERE
    output.update({"input_parameters": mod.args})
    if "output_json" in mod.args:
        mod.output(output)
    else:
        print(mod.get_output_json(output))
Ejemplo n.º 2
0
def test_bad_output(tmpdir):
    file_out = tmpdir.join('test_output_bad.json')
    input_parameters = {'output_json': str(file_out)}
    mod = ArgSchemaParser(input_data=input_parameters,
                          output_schema_type=MyOutputSchema,
                          args=[])
    M = [[5, 5], [7, 2]]
    Mnp = np.array(M)
    output = {"a": "example", "b": "not a number", "M": Mnp}

    with pytest.raises(mm.ValidationError):
        mod.output(output)
Ejemplo n.º 3
0
def test_output_unvalidated(tmpdir):
    file_out = tmpdir.join('test_output_unvalidated.json')
    input_parameters = {'output_json': str(file_out)}
    mod = ArgSchemaParser(input_data=input_parameters, args=[])

    output = {
        "a": "example",
    }
    mod.output(output)
    with open(str(file_out), 'r') as fp:
        actual_output = json.load(fp)
    assert actual_output == output
Ejemplo n.º 4
0
def main():
    """Main entry point for running AllenSDK Eye Tracking."""
    try:
        mod = ArgSchemaParser(schema_type=InputParameters,
                              output_schema_type=OutputParameters)

        istream = CvInputStream(mod.args["input_source"])

        ostream = setup_annotation(
            istream.frame_shape,
            **mod.args.get("annotation", DEFAULT_ANNOTATION))

        qc_params = mod.args.get("qc", {})
        generate_plots = qc_params.get("generate_plots",
                                       EyeTracker.DEFAULT_GENERATE_QC_OUTPUT)

        tracker = EyeTracker(istream, ostream, mod.args.get("starburst", {}),
                             mod.args.get("ransac",
                                          {}), mod.args["pupil_bounding_box"],
                             mod.args["cr_bounding_box"], generate_plots,
                             **mod.args.get("eye_params", {}))
        cr_params, pupil_params, cr_err, pupil_err = tracker.process_stream(
            start=mod.args.get("start_frame", 0),
            stop=mod.args.get("stop_frame", None),
            step=mod.args.get("frame_step", 1))

        output = write_output(mod.args["output_dir"], cr_params, pupil_params,
                              tracker.mean_frame)

        pupil_intensity = None
        if tracker.adaptive_pupil:
            pupil_intensity = tracker.pupil_colors
        if generate_plots:
            write_QC_output(tracker.annotator,
                            cr_params,
                            pupil_params,
                            cr_err,
                            pupil_err,
                            tracker.mean_frame,
                            pupil_intensity=pupil_intensity,
                            **mod.args)

        output["input_parameters"] = mod.args
        if "output_json" in mod.args:
            mod.output(output, indent=1)
        else:
            print(json.dumps(mod.get_output_json(output), indent=1))
    except marshmallow.ValidationError as e:
        print(e)
        argparser = schema_argparser(InputParameters())
        argparser.print_usage()
Ejemplo n.º 5
0
def main():

    from ._schemas import InputParameters, OutputParameters

    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)

    output = calculate_quality_metrics(mod.args)

    output.update({"input_parameters": mod.args})
    if "output_json" in mod.args:
        mod.output(output, indent=2)
    else:
        print(mod.get_output_json(output))
Ejemplo n.º 6
0
def main():
    from ._schemas import InputParameters, OutputParameters

    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)
    # output = calculate_stimulus_metrics_ondisk(mod.args)
    output = calculate_stimulus_metrics_gather(mod.args)
    if MPI_rank == 0:
        output.update({"input_parameters": mod.args})
        if "output_json" in mod.args:
            mod.output(output, indent=2)
        else:
            log_info(mod.get_output_json(output))
    barrier()
Ejemplo n.º 7
0
def main():

    from ._schemas import InputParameters, OutputParameters

    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)

    output = classify_noise_templates(mod.args)

    output.update({"input_parameters": mod.args})
    if "output_json" in mod.args:
        mod.output(output, indent=2)
    else:
        print(mod.get_output_json(output))
Ejemplo n.º 8
0
def main():

    from ._schemas import InputParameters, OutputParameters
    """Main entry point:"""
    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)

    output = get_psth_events(mod.args)

    output.update({"input_parameters": mod.args})
    if "output_json" in mod.args:
        mod.output(output, indent=2)
    else:
        print(mod.get_output_json(output))
Ejemplo n.º 9
0
def test_output(tmpdir):
    file_out = tmpdir.join('test_output.json')
    input_parameters = {'output_json': str(file_out)}
    mod = ArgSchemaParser(input_data=input_parameters,
                          output_schema_type=MyOutputSchema,
                          args=[])
    M = [[5, 5], [7, 2]]
    Mnp = np.array(M)
    output = {"a": "example", "M": Mnp}
    expected_output = {"a": "example", "b": 5, "M": M}
    mod.output(output)
    with open(str(file_out), 'r') as fp:
        actual_output = json.load(fp)
    assert actual_output == expected_output
Ejemplo n.º 10
0
def main():

    from ._schemas import InputParameters, OutputParameters

    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)

    output = run_automerging(mod.args)

    output.update({"input_parameters": mod.args})
    if "output_json" in mod.args:
        mod.output(output, indent=2)
    else:
        print(mod.get_output_json(output))
Ejemplo n.º 11
0
def main():

    from ._schemas import InputParameters, OutputParameters
    """Main entry point:"""
    mod = ArgSchemaParser(schema_type=InputParameters,
                          output_schema_type=OutputParameters)

    if mod.args['tPrime_helper_params']['tPrime_3A']:
        output = call_TPrime_3A(mod.args)
    else:
        output = call_TPrime(mod.args)

    output.update({"input_parameters": mod.args})
    if "output_json" in mod.args:
        mod.output(output, indent=2)
    else:
        print(mod.get_output_json(output))
Ejemplo n.º 12
0
def main():
    parser = ArgSchemaParser(
        schema_type=InputParameters,
        output_schema_type=OutputParameters
    )

    inputs_record = cp.deepcopy(parser.args)
    logging.getLogger().setLevel(inputs_record.pop("log_level"))
    inputs_record.pop("input_json", None)
    inputs_record.pop("output_json", None)
    output_table_path = inputs_record.pop("output_table_path", None)
    
    output = {}
    output.update({"inputs": parser.args})
    output.update({"results": extract_multiple(**inputs_record)})

    parser.output(output)
Ejemplo n.º 13
0
def main():
    """Main entry point for running AllenSDK Eye Tracking."""
    try:
        mod = ArgSchemaParser(schema_type=InputParameters,
                              output_schema_type=OutputParameters)

        starburst_args = get_starburst_args(mod.args["starburst"])

        istream = CvInputStream(mod.args["input_source"])

        im_shape = istream.frame_shape

        ostream = setup_annotation(im_shape, **mod.args["annotation"])

        tracker = EyeTracker(im_shape, istream, ostream, starburst_args,
                             mod.args["ransac"],
                             mod.args["pupil_bounding_box"],
                             mod.args["cr_bounding_box"],
                             mod.args["qc"]["generate_plots"],
                             **mod.args["eye_params"])
        pupil_parameters, cr_parameters = tracker.process_stream(
            start=mod.args.get("start_frame", 0),
            stop=mod.args.get("stop_frame", None),
            step=mod.args.get("frame_step", 1))

        output = write_output(mod.args["output_dir"], cr_parameters,
                              pupil_parameters, tracker.mean_frame)

        if mod.args["qc"]["generate_plots"]:
            write_QC_output(tracker.annotator, cr_parameters, pupil_parameters,
                            tracker.mean_frame, **mod.args)

        output["input_parameters"] = mod.args
        if "output_json" in mod.args:
            mod.output(output, indent=1)
        else:
            print(json.dumps(mod.get_output_json(output), indent=1))
    except marshmallow.ValidationError as e:
        print(e)
        argparser = schema_argparser(InputParameters())
        argparser.print_usage()
Ejemplo n.º 14
0
def main():
    parser = ArgSchemaParser(schema_type=PiaWmStreamlineSchema,
                             output_schema_type=OutputParameters)

    args = cp.deepcopy(parser.args)
    logging.getLogger().setLevel(args.pop("log_level"))
    output_dir = args.pop('output_dir')

    depth_field, gradient_field, translation = run_streamlines(**args)

    # save results to file
    depth_field_file = os.path.join(output_dir, 'depth_field.nc')
    gradient_field_file = os.path.join(output_dir, 'gradient_field.nc')
    depth_field.to_netcdf(depth_field_file)
    gradient_field.to_netcdf(gradient_field_file)

    output = {
        'inputs': parser.args,
        'translation': translation,
        'depth_field_file': depth_field_file,
        'gradient_field_file': gradient_field_file,
    }

    parser.output(output)
Ejemplo n.º 15
0
def main():
    mod = ArgSchemaParser(schema_type=ApplyAffineSchema,
                          output_schema_type=OutputParameters)
    args = mod.args

    if 'affine_dict' in args:
        affine_transform = AffineTransform.from_dict(args['affine_dict'])
    elif 'affine_list' in args:
        affine_transform = AffineTransform.from_list(args['affine_list'])
    else:
        raise ValueError('must provide either an affine_dict or affine_list')

    morph_in = morphology_from_swc(args['input_swc'])

    morph_out = affine_transform.transform_morphology(morph_in)

    morphology_to_swc(morph_out, args['output_swc'])

    output = {
        'inputs': args,
        'transformed_swc': args['output_swc'],
    }

    mod.output(output)
Ejemplo n.º 16
0
def main():
    mod = ArgSchemaParser(schema_type=InputSchema,
                          output_schema_type=OutputSchema)

    if mod.args['test_mode']:
        global volume_to_h5, volume_to_tif
        volume_to_h5 = mock_h5
        volume_to_tif = mock_tif

    h5_opts = {}
    if mod.args['compression_level']:
        h5_opts = {"compression": "gzip",
                   "compression_opts": mod.args['compression_level']}

    stack_tifs = set()
    ready_to_archive = set()
    session_storage = mod.args["storage_directory"]

    output = {"column_stacks": [],
              "file_metadata": []}

    experiments = []
    z_outs = {}

    for plane_group in mod.args["plane_groups"]:
        column_stack = plane_group.get("column_z_stack_tif", None)
        if column_stack:
            ready_to_archive.add(column_stack)
            if column_stack not in stack_tifs:
                try:
                    out, meta = convert_column(
                        column_stack,
                        session_storage,
                        plane_group["ophys_experiments"][0],
                        **h5_opts
                    )
                    output["column_stacks"].append(out)
                    output["file_metadata"].append(meta)
                except ValueError as e:
                    # don't break on failed column stack conversion
                    logging.error(e)
                stack_tifs.add(column_stack)
        for exp in plane_group["ophys_experiments"]:
            localz = plane_group["local_z_stack_tif"]
            ready_to_archive.add(localz)
            out, meta = split_z(localz, exp, **h5_opts)
            if localz not in stack_tifs:
                output["file_metadata"].append(meta)
                stack_tifs.add(localz)
            experiments.append(exp)
            z_outs[exp["experiment_id"]] = out

    surf_outs, surf_meta = split_image(mod.args["surface_tif"],
                                       experiments,
                                       "surface")
    depth_outs, depth_meta = split_image(mod.args["depths_tif"],
                                         experiments,
                                         "depth")
    ts_outs, ts_meta = split_timeseries(mod.args["timeseries_tif"],
                                        experiments,
                                        **h5_opts)

    output["file_metadata"].extend([surf_meta, depth_meta, ts_meta])

    exp_out = []
    for exp in experiments:
        eid = exp["experiment_id"]
        sync_stride = ts_outs[eid].pop("sync_stride")
        sync_offset = ts_outs[eid].pop("sync_offset")
        exp_data = {"experiment_id": eid,
                    "local_z_stack": z_outs[eid],
                    "surface_2p": surf_outs[eid],
                    "depth_2p": depth_outs[eid],
                    "timeseries": ts_outs[eid],
                    "sync_offset": sync_offset,
                    "sync_stride": sync_stride}
        exp_out.append(exp_data)

    output["experiment_output"] = exp_out

    ready_to_archive.add(mod.args["surface_tif"])
    ready_to_archive.add(mod.args["depths_tif"])
    ready_to_archive.add(mod.args["timeseries_tif"])

    output["ready_to_archive"] = list(ready_to_archive)

    mod.output(output, indent=1)
Ejemplo n.º 17
0
    # this defines a default dictionary that will be used if input_json is not specified
    example_input = {
        "inc": {
            "name": "from_dictionary",
            "increment": 5,
            "array": [0, 2, 5],
            "write_output": True
        },
        "output_json": "output_dictionary.json"
    }

    # here is my ArgSchemaParser that processes my inputs
    mod = ArgSchemaParser(input_data=example_input,
                          schema_type=MyParameters,
                          output_schema_type=MyOutputParams)

    # pull out the inc section of the parameters
    inc_params = mod.args['inc']

    # do my simple addition of the parameters
    inc_array = inc_params['array'] + inc_params['increment']

    # define the output dictionary
    output = {'name': inc_params['name'], 'inc_array': inc_array}

    # if the parameters are set as such write the output
    if inc_params['write_output']:
        mod.output(output)

    pp.pprint(mod.args)