Esempio n. 1
0
def setup_data(
    reconstruction: Dict[str, Any], 
    global_parameters: Dict[str, Any]
) -> Tuple[str, Data]:
    """ Construct a Data for extracting features from a single reconstruction.

    Parameters
    ----------
    reconstruction : The reconstruction to be setup. Must specify an swc_path
    global_parameters : any cross-reconstruction feature parameters

    Returns 
    -------
    identifier : a label for this reconstruction
    data suitable for feature extraction

    """

    parameters: Dict[str, Any] = {}
    identifier = reconstruction.get("identifier", reconstruction.get("swc_path"))
    swc_path = reconstruction.pop("swc_path")
    morphology = morphology_from_swc(swc_path)

    parameters.update(hydrate_parameters(global_parameters))
    parameters.update(hydrate_parameters(reconstruction))

    return identifier, Data(morphology, **parameters)
def collect_inputs(args: Dict[str,Any]) -> Dict[str,Any]:
    """

    Parameters
    ----------
    args: dict of InputParameters

    Returns
    -------
    dict with string keys:
        morphology: Morphology object
        soma_marker_z: z value from the marker file
        soma_depth: soma depth
        cut_thickness: slice thickness
    """
    morphology = morphology_from_swc(args["swc_path"])
    soma_marker = get_soma_marker_from_marker_file(args["marker_path"])
    soma_depth = args["soma_depth"]
    cut_thickness = args["cut_thickness"]

    return {
        "morphology": morphology,
        "soma_marker_z": soma_marker["z"],
        "soma_depth": soma_depth,
        "cut_thickness": cut_thickness,
    }
def collect_inputs(working_bucket: str, run_prefix: str,
                   morphology_scaled_key: str,
                   gradient_field_key: str) -> Dict[str, Any]:
    """
    Gather from AWS the inputs required to run the depth field module.

    Parameters
    ----------
    working_bucket : The name of this pipeline's working bucket
    run_prefix : This run's resources (within the working bucket) have keys 
        beginning with this prefix.
    morphology_scaled_key : identifier for the scaled corrected morphology
    gradient_field_key: identifier for the gradient field

    Returns
    -------
    morphology : Morphology object of scale corrected reconstruction
    gradient_field : xarray object of the gradient field
    """

    # boto3 get bytes from s3 working buckets
    gradient_field_response = s3.get_object(Bucket=working_bucket,
                                            Key=gradient_field_key)
    gradient_field = xr.open_dataarray(gradient_field_response["Body"].read())

    swc_file_response = s3.get_object(Bucket=working_bucket,
                                      Key=morphology_scaled_key)
    morphology = morphology_from_swc(swc_file_response["Body"])

    return morphology, gradient_field
Esempio n. 4
0
def run_layered_point_depths(
    swc_path: str,
    depth: Dict,
    layers: List[Dict],
    step_size: float,
    max_iter: int,
    output_path: str
):

    morpho = morphology_from_swc(swc_path)
    gradient_field: xr.DataArray = xr.open_dataarray(depth["gradient_field_path"])
    depth_field: xr.DataArray = xr.open_dataarray(depth["depth_field_path"])
    setup_layers(layers)
    step_size = depth["pia_sign"] * step_size

    if depth["soma_origin"]:
        soma = morpho.get_soma()
        translate_field(
            gradient_field,
            soma["x"],
            soma["y"],
            inplace=True
        )
        translate_field(
            depth_field,
            soma["x"],
            soma["y"],
            inplace=True
        )

    depth_interp = setup_interpolator(
        depth_field, None, method="linear", 
        bounds_error=False, fill_value=None)
    dx_interp = setup_interpolator(
        gradient_field, "dx", method="linear", 
        bounds_error=False, fill_value=None)
    dy_interp = setup_interpolator(
        gradient_field, "dy", method="linear", 
        bounds_error=False, fill_value=None)

    outputs = []
    for node in morpho.nodes():
        outputs.append(get_node_intersections(
            node, 
            depth_interp, 
            dx_interp, 
            dy_interp, 
            layers, 
            step_size, 
            max_iter
        ))

    depths = LayeredPointDepths.from_dataframe(pd.DataFrame(outputs))
    depths.to_csv(output_path)

    gradient_field.close()
    depth_field.close()

    return {"output_path": output_path}
def collect_inputs(working_bucket: str, run_prefix: str,
                   reconstruction_id: int) -> Dict[str, Any]:
    """
    Gather from AWS the inputs  required to run the scale correction module.

    Parameters
    ----------
    working_bucket : str
        name of this pipeline's working bucket
    run_prefix : str
        identifier for the run
    reconstruction_id: int
        identifier for reconstruction being processed

    Returns
    -------
    dict with string keys:
        morphology: Morphology object
        soma_marker_z: z value from the marker file
        soma_depth: soma depth
        cut_thickness: slice thickness

    """

    input_json_key = f"{run_prefix}/{reconstruction_id}.json"
    input_json_response = s3.get_object(Bucket=working_bucket,
                                        Key=input_json_key)

    input_data = json.load(input_json_response["Body"])

    swc_key = f"{run_prefix}/{input_data['swc_file']}"
    soma_marker_key = f"{run_prefix}/{input_data['marker_file']}"

    swc_response = s3.get_object(
        Bucket=working_bucket,
        Key=swc_key,
    )

    soma_marker_response = s3.get_object(
        Bucket=working_bucket,
        Key=soma_marker_key,
    )

    morphology = morphology_from_swc(swc_response["Body"])

    soma_marker = compute_scale_correction.get_soma_marker_from_marker_file(
        soma_marker_response["Body"])

    return {
        "morphology": morphology,
        "soma_marker_z": soma_marker["z"],
        "soma_depth": input_data["cell_depth"],
        "cut_thickness": input_data["cut_thickness"],
    }
Esempio n. 6
0
def test_morphology_from_data_file_by_node_type(node_types=None):

    morphology = swc.morphology_from_swc(test_file)
    nodes = morphology.get_node_by_types(node_types)
    for node in nodes:
        # unfortunately, pandas automatically promotes numeric types to float in to_dict
        node['parent'] = int(node['parent'])
        node['id'] = int(node['id'])
        node['type'] = int(node['type'])

    node_id_cb = lambda node: node['id']
    parent_id_cb = lambda node: node['parent']

    axon_only_morphology = Morphology(nodes, node_id_cb, parent_id_cb)
    return axon_only_morphology
Esempio n. 7
0
    def test_end_to_end(self):
        cmd = [
            'python', '-m', 'neuron_morphology.transforms.affine_transformer.'
            'apply_affine_transform'
        ]
        for key, value in self.input.items():
            cmd.append(f'--{key}')
            cmd.append(f'{value}')

        sp.check_call(cmd)

        transformed_swc = os.path.join(self.test_dir, self.output_swc_name)
        tf_morph = morphology_from_swc(transformed_swc)

        for node in tf_morph.nodes():
            assert np.allclose([node['x'], node['y'], node['z']],
                               self.transformed_vector[node['id']])
Esempio n. 8
0
def run_upright_angle(
    gradient_path: str,
    swc_path: str,
    node: Optional[List[float]] = None,
):

    try:
        gradient_field = xr.open_dataarray(gradient_path)
    except IOError:
        raise IOError(
            f"Cannot find file with the gradient field in {gradient_path}")

    morph = morphology_from_swc(swc_path)

    output = calculate_transform(gradient_field, morph, node)
    output["upright_transform_dict"] = output.pop(
        "upright_transform").to_dict()

    return output
Esempio n. 9
0
def collect_inputs(working_bucket: str, run_prefix: str,
                   reconstruction_id: int, upright_swc_key: str):

    md_json_key = f"{run_prefix}/{reconstruction_id}.json"
    md_json_response = s3.get_object(Bucket=working_bucket, Key=md_json_key)
    metadata = json.load(md_json_response["Body"])

    marker_key = f"{run_prefix}/{metadata['marker_file']}"
    ccf_key = 'top_view_paths_10.h5'

    swc_response = s3.get_object(
        Bucket=working_bucket,
        Key=upright_swc_key,
    )

    marker_response = s3.get_object(
        Bucket=working_bucket,
        Key=marker_key,
    )
    ccf_response = s3.get_object(
        Bucket=working_bucket,
        Key=ccf_key,
    )

    morphology = morphology_from_swc(swc_response["Body"])
    soma_marker = read_soma_marker(marker_response["Body"])
    ccf_data = BytesIO(ccf_response["Body"].read())

    ccf_soma_location = dict(zip(['x', 'y', 'z'], metadata["ccf_soma_xyz"]))
    slice_transform = AffineTransform.from_list(metadata['slice_transform'])

    return {
        'morphology': morphology,
        'soma_marker': soma_marker,
        'ccf_soma_location': ccf_soma_location,
        'slice_transform': slice_transform,
        'slice_image_flip': metadata['slice_image_flip'],
        'ccf_path': ccf_data,
    }
Esempio n. 10
0
def main():
    parser = ArgSchemaParser(schema_type=InputParameters,
                             output_schema_type=OutputParameters)

    args = cp.deepcopy(parser.args)
    logging.getLogger().setLevel(args.pop("log_level"))

    if 'slice_transform_dict' in args:
        slice_transform = aff.AffineTransform.from_dict(
            args['slice_transform_dict'])
    elif 'slice_transform_list' in args:
        slice_transform = aff.AffineTransform.from_list(
            args['slice_transform_list'])
    else:
        raise ValueError('must provide either an slice_transform_dict '
                         'or an slice_transform_list')

    morphology = morphology_from_swc(args['swc_path'])
    soma_marker = read_soma_marker(args['marker_path'])
    ccf_soma_location = dict(zip(['x', 'y', 'z'], args["ccf_soma_location"]))

    (tilt_correction, tilt_transform) = run_tilt_correction(
        morphology,
        soma_marker,
        ccf_soma_location,
        slice_transform,
        args["slice_image_flip"],
        args['ccf_path'],
    )
    output = {
        'tilt_transform_dict': tilt_transform.to_dict(),
        'tilt_correction': tilt_correction,
        'inputs': parser.args
    }

    parser.output(output)
Esempio n. 11
0
def main():
    mod = ArgSchemaParser(schema_type=ApplyAffineSchema,
                          output_schema_type=OutputParameters)
    args = mod.args

    if 'affine_dict' in args:
        affine_transform = AffineTransform.from_dict(args['affine_dict'])
    elif 'affine_list' in args:
        affine_transform = AffineTransform.from_list(args['affine_list'])
    else:
        raise ValueError('must provide either an affine_dict or affine_list')

    morph_in = morphology_from_swc(args['input_swc'])

    morph_out = affine_transform.transform_morphology(morph_in)

    morphology_to_swc(morph_out, args['output_swc'])

    output = {
        'inputs': args,
        'transformed_swc': args['output_swc'],
    }

    mod.output(output)
Esempio n. 12
0
def main():

    args = vars(parse_arguments(sys.argv[1:]))

    if args['dir'] is None and args['swc'] is None and args['marker'] is None:
        print(
            "You need to provide one of the following arguments: directory, swc file, marker file"
        )
        sys.exit(1)

    reconstruction_files = []
    swc_file = None
    marker_file = None
    if args['dir']:
        for file_name in args['dir']:
            if glob.has_magic(file_name):
                reconstruction_files += glob.glob(file_name)
            else:
                reconstruction_files.append(file_name)

        swc_files = [f for f in reconstruction_files if f.endswith('.swc')]
        marker_files = [
            f for f in reconstruction_files if f.endswith('.marker')
        ]

        if len(swc_files) > 1 or len(marker_files) > 1:
            print(
                "You cannot choose a directory with more than one swc or marker file"
            )
            sys.exit(1)
        else:
            if len(swc_files) == 0:
                print(
                    "No swc file in the directory. No swc validation was done."
                )
                sys.exit(1)
            else:
                matching_morphology_name = marker_files[0].replace(
                    '.marker', '.swc')
                if matching_morphology_name != swc_files[0]:
                    print(
                        "No matching .swc file found. No marker validation was done for:\n %s \n\n"
                        % marker_files[0])
                    sys.exit(1)
        swc_file = swc_files[0]
        marker_file = marker_files[0]
    else:
        swc_file = args['swc']
        marker_file = args['marker']

    report = Report()
    try:
        morpho = swc.morphology_from_swc(swc_file)
        morpho.validate(strict=True)
        report.add_swc_results(swc_file, [])
    except InvalidMorphology as im:
        report.add_swc_results(swc_file, im.validation_errors)

    morphology = None
    try:
        morphology = swc.morphology_from_swc(swc_file)
        morphology.validate(strict=False)
    except InvalidMorphology as im:
        report.add_marker_results(marker_file, [
            MarkerValidationError(
                "Unable to parse matching SWC file "
                "to validate the marker file.", {}, "Fatal")
        ])
    if morphology:
        stats = statistics.morphology_statistics(morphology)
        report.add_swc_stats(swc_file, stats)

        if marker_file:
            try:
                results = validation.validate_marker(
                    marker.read_marker_file(marker_file), morphology)
                report.add_marker_results(marker_file, results)
            except InvalidMarkerFile as imf:
                report.add_marker_results(marker_file, imf.validation_errors)

    print(report.to_json())
    if report.has_results():
        sys.exit(1)
def collect_morphology(bucket: str, swc_key: str):
    swc_response = s3.get_object(
        Bucket=bucket,
        Key=swc_key,
    )
    return morphology_from_swc(swc_response["Body"])