コード例 #1
0
ファイル: schemas.py プロジェクト: RussTorres/em_stitch
class MontagePlotsSchema(ArgSchema):
    collection_path = InputFile(required=True,
                                description="point matches from here")
    resolved_path = InputFile(required=True,
                              description="resolved tiles from here")
    save_json_path = OutputFile(
        required=True,
        missing=None,
        default=None,
        description=("save residuals to this path if not None"))
    save_plot_path = OutputFile(
        required=True,
        missing=None,
        default=None,
        description=("save plot to this path if not None"))
    make_plot = Boolean(required=True,
                        missing=True,
                        default=True,
                        description=("make a plot?"))
    show = Boolean(required=True,
                   missing=True,
                   default=True,
                   description=("show on screen?"))
    pdf_out = OutputFile(required=True,
                         missing=None,
                         default=None,
                         description="where to write the pdf output")
コード例 #2
0
class LensCorrectionParameters(ArgSchema):
    manifest_path = InputFile(required=True,
                              description='path to manifest file')
    project_path = Str(required=True, description='path to project directory')
    fiji_path = InputFile(required=True, description='path to FIJI')
    grid_size = Int(
        required=True,
        description=('maximum row and column to form square '
                     'subset of tiles starting from zero (based on filenames '
                     'which end in "\{row\}_\{column\}.tif")'))
    heap_size = Int(required=False,
                    default=20,
                    description="memory in GB to allocate to Java heap")
    outfile = Str(required=False,
                  description=("File to which json output of lens correction "
                               "(leaf TransformSpec) is written"))
    processing_directory = Str(
        required=False,
        allow_none=True,
        description=("directory to which trakem2 processing "
                     "directory will be written "
                     "(will place in project_path directory if "
                     "unspecified or create temporary directory if None)"))
    SIFT_params = Nested(SIFTParameters)
    align_params = Nested(AlignmentParameters)
    max_threads_SIFT = Int(required=False,
                           default=3,
                           description=("Threads specified for SIFT"))
コード例 #3
0
class InputParameters(ArgSchema):
    swc_path = InputFile(description='path to swc file for soma location',
                         required=True)
    marker_path = InputFile(description='path to reconstruction marker file',
                            required=True)
    slice_image_flip = Boolean(description=(
        'indicates whether the image was flipped relative '
        'to the slice (avg_group_label.name = \'Flip Slice Indicator\''),
                               required=True)
    ccf_soma_location = List(
        Float,
        description='Soma location (x,y,z) coordinates in CCF',
        required=True)
    slice_transform_list = List(
        Float,
        required=False,
        cli_as_single_argument=True,
        description='List defining the transform defining slice cut angle')
    slice_transform_dict = Nested(
        AffineDictSchema,
        description='Dict defining the transform defining the slice cut angle',
        required=False)
    ccf_path = InputFile(
        description='path to common cortical framework streamline file',
        required=True)

    @mm.validates_schema
    def validate_schema_input(self, data):
        validate_input_affine(data)
コード例 #4
0
class PairwiseExperimentSchema(OnPremExperimentSchema):
    nice_mask_path = InputFile(
        required=True,
        description="path to mask tiff with unique labels")
    nice_dict_path = InputFile(
        required=True,
        description="path to dict for mask labels to LIMS ids")
コード例 #5
0
class PlaneGroup(DefaultSchema):
    local_z_stack_tif = InputFile(
        required=True,
        description="Full path to local z stack tiff file for this group.")
    column_z_stack_tif = InputFile(
        description="Full path to column z stack tiff file.")
    ophys_experiments = Nested(ExperimentPlane, many=True)
コード例 #6
0
ファイル: schemas.py プロジェクト: RussTorres/em_stitch
class GenerateEMTileSpecsParameters(ArgSchema):
    metafile = InputFile(
        required=True,
        description="metadata file containing TEMCA acquisition data")
    maskUrl = InputFile(required=False,
                        default=None,
                        missing=None,
                        description="absolute path to image mask to apply")
    image_directory = InputDir(
        required=False,
        description=("directory used in determining absolute paths to images. "
                     "Defaults to parent directory containing metafile "
                     "if omitted."))
    maximum_intensity = Int(
        required=False,
        default=255,
        description=("intensity value to interpret as white"))
    minimum_intensity = Int(
        required=False,
        default=0,
        description=("intensity value to interpret as black"))
    z = Float(required=False, default=0, description=("z value"))
    sectionId = Str(
        required=False,
        description=("sectionId to apply to tiles during ingest.  "
                     "If unspecified will default to a string "
                     "representation of the float value of z_index."))
    output_path = OutputFile(required=False,
                             description="directory for output files")
    compress_output = Boolean(
        required=False,
        missing=True,
        default=True,
        escription=("tilespecs will be .json or .json.gz"))
コード例 #7
0
class SweepExtractionParameters(ArgSchema):
    input_nwb_file = InputFile(description="input nwb file", required=True)
    input_h5_file = InputFile(description="input h5 file", required=False)
    stimulus_ontology_file = OutputFile(description="stimulus ontology JSON",
                                        required=False)
    manual_seal_gohm = Float(description="blah")
    manual_initial_access_resistance_mohm = Float(description="blah")
    manual_initial_input_mohm = Float(description="blah")
コード例 #8
0
class PipelineParameters(ArgSchema):
    input_nwb_file = InputFile(description="input nwb file", required=True)
    stimulus_ontology_file = OutputFile(description="blash", required=False)
    input_h5_file = InputFile(desription="input h5 file", required=False)
    output_nwb_file = OutputFile(description="output nwb file", required=True)
    qc_fig_dir = OutputFile(description="output qc figure directory",
                            required=False)
    qc_criteria = Nested(QcCriteria, required=True)
    manual_sweep_states = Nested(ManualSweepState, required=False, many=True)
コード例 #9
0
class FeatureExtractionParameters(ArgSchema):
    input_nwb_file = InputFile(description="input nwb file", required=True)
    stimulus_ontology_file = InputFile(description="stimulus ontology JSON",
                                       required=False)
    output_nwb_file = OutputFile(description="output nwb file", required=True)
    qc_fig_dir = OutputFile(description="output qc figure directory",
                            required=False)
    sweep_features = Nested(FxSweepFeatures, many=True)
    cell_features = Nested(CellFeatures, required=True)
コード例 #10
0
class MontageSolverSchema(ArgSchema):
    data_dir = InputDir(
        required=False,
        description="directory containing metafile, images, and matches")
    metafile = InputFile(
        required=False,
        description=("fullpath to metafile. Helps in the case of multiple"
                     " metafiles in one directory. data_dir will take "
                     " os.path.dirname(metafile)"))
    output_dir = OutputDir(required=False,
                           missing=None,
                           default=None,
                           description="directory for output files")
    read_transform_from = Str(
        required=False,
        missing='metafile',
        default='metafile',
        validator=mm.validate.OneOf(['metafile', 'reffile', 'dict']),
        description="3 possible ways to read in the reference transform")
    ref_transform = InputFile(required=False,
                              missing=None,
                              default=None,
                              description="transform json")
    ref_transform_dict = Dict(require=False,
                              missing=None,
                              description="transform in from memory")
    ransacReprojThreshold = Float(
        required=False,
        missing=10.0,
        default=10.0,
        description=("passed into cv2.estimateAffinePartial2D()"
                     "for RANSAC filtering of montage template matches"))
    compress_output = Boolean(
        required=False,
        missing=True,
        default=True,
        description=("tilespecs will be .json or .json.gz"))
    solver_templates = List(
        Str,
        required=True,
        description="input json basenames for the solver args")
    solver_template_dir = InputDir(
        required=True, description="location of the templates for the solver")

    @mm.post_load
    def check_solver_inputs(self, data):
        for args in data['solver_templates']:
            argpath = os.path.join(data['solver_template_dir'], args)
            if not os.path.isfile(argpath):
                raise mm.ValidationError("solver arg file doesn't exist: %s" %
                                         argpath)

    @mm.post_load
    def check_metafile(self, data):
        if ('data_dir' not in data) & ('metafile' not in data):
            raise mm.ValidationError(" must specify either data_dir"
                                     " or metafile")
コード例 #11
0
class GenerateEMTileSpecsParameters(OutputStackParameters):
    metafile = InputFile(
        required=False,
        description="metadata file containing TEMCA acquisition data")
    metafile_uri = Str(
        required=True,
        description=("uri of metadata containing TEMCA acquisition data"))
    # FIXME maskUrl and image_directory are not required -- posix_to_uri should support this
    maskUrl = InputFile(required=False,
                        default=None,
                        missing=None,
                        description="absolute path to image mask to apply")
    maskUrl_uri = Str(required=False,
                      default=None,
                      missing=None,
                      description=("uri of image mask to apply"))
    image_directory = InputDir(
        required=False,
        description=("directory used in determining absolute paths to images. "
                     "Defaults to parent directory containing metafile "
                     "if omitted."))
    image_prefix = Str(
        required=False,
        description=(
            "prefix used in determining full uris of images in metadata. "
            "Defaults to using the / delimited prefix to "
            "the metadata_uri if omitted"))
    maximum_intensity = Int(
        required=False,
        default=255,
        description=("intensity value to interpret as white"))
    minimum_intensity = Int(
        required=False,
        default=0,
        description=("intensity value to interpret as black"))
    sectionId = Str(
        required=False,
        description=("sectionId to apply to tiles during ingest.  "
                     "If unspecified will default to a string "
                     "representation of the float value of z_index."))

    @pre_load
    def metafile_to_uri(self, data):
        rendermodules.utilities.schema_utils.posix_to_uri(
            data, "metafile", "metafile_uri")

    # FIXME not required -- does this work
    @pre_load
    def maskUrl_to_uri(self, data):
        rendermodules.utilities.schema_utils.posix_to_uri(
            data, "storage_directory", "storage_prefix")

    @pre_load
    def image_directory_to_prefix(self, data):
        rendermodules.utilities.schema_utils.posix_to_uri(
            data, "image_directory", "image_prefix")
コード例 #12
0
class InputParameters(ArgSchema):
    swc_path = InputFile(description='path to swc file', required=True)
    marker_path = InputFile(description='path to marker file', required=True)
    soma_depth = Float(
        description='Recorded Depth of soma in slice',
        required=True,
    )
    cut_thickness = Float(
        description='Thickness of the slice when cut',
        required=False,
        default=350.,
    )
コード例 #13
0
ファイル: _schemas.py プロジェクト: tmchartrand/ipfx
class QcParameters(ArgSchema):
    stimulus_ontology_file = InputFile(description="blash", required=False)
    qc_criteria = Nested(QcCriteria, required=True)
    sweep_features = Nested(QcSweepFeatures, many=True, required=True)
    cell_features = Nested(CellFeatures)
    output_json = OutputFile(description="output feature json file",
                             required=True)
コード例 #14
0
class PairwiseRigidSchema(StackTransitionParameters):
    match_collection = Str(required=True,
                           description="Point match collection name")
    gap_file = InputFile(
        required=False,
        default=None,
        missing=None,
        description="json file {k: v} where int(k) is a z value to skip"
        "entries in here that are not already missing will"
        "be omitted from the output stack"
        "i.e. this is a place one can skip sections")
    translate_to_positive = Bool(
        required=False,
        default=True,
        missing=True,
        description="translate output stack to positive space")
    translation_buffer = List(Float,
                              required=False,
                              default=[0, 0],
                              missing=[0, 0],
                              description=("minimum (x, y) of output stack if "
                                           "translate_to_positive=True"))
    anchor_stack = Str(
        require=False,
        default=None,
        missing=None,
        description=("fix transforms using tiles in this stack"))
コード例 #15
0
class InputSchema(ArgSchema):
    log_level = Str(required=False, default="INFO")
    depths_tif = InputFile(required=True,
                           description="Full path to depth 2p tiff file.")
    surface_tif = InputFile(required=True,
                            description="Full path to surface 2p tiff file.")
    timeseries_tif = InputFile(
        required=True, description="Full path to timeseries tiff file.")
    storage_directory = OutputDir(
        required=True, description="Folder for column stack outputs.")
    plane_groups = Nested(PlaneGroup, many=True)
    test_mode = Int(
        default=0,
        description=("Flag to run without actually splitting data. For testing"
                     " runner mechanism and metadata. Testing of splitting "
                     "is handled in testing for the mesoscope_2p package."))
コード例 #16
0
ファイル: _schemas.py プロジェクト: rgerkin/AllenSDK
class CopiedFile(DefaultSchema):
    source = InputFile(required=True, description='copied from here')
    destination = FileExists(required=True, description='copied to here')
    key = String(required=False, description='passed from inputs')
    source_hash = List(Int,
                       required=False)  # int array vs bytes for JSONability
    destination_hash = List(Int, required=False)
コード例 #17
0
class ApplyLensCorrectionParameters(StackTransitionParameters):
    transform = Nested(TransformParameters)
    refId = Str(allow_none=True,
                required=True,
                description=('Reference ID to use when uploading transform to '
                             'render database (Not Implemented)'))
    labels = List(Str,
                  required=False,
                  missing=['lens'],
                  default=['lens'],
                  description="labels for the lens correction transform")
    maskUrl = InputFile(
        required=False,
        default=None,
        missing=None,
        description='path to level 0 maskUrl to apply to stack')
    maskUrl_uri = Str(required=False,
                      default=None,
                      missing=None,
                      description="uri for level 0 mask image to apply")

    @marshmallow.pre_load
    def maskUrl_to_uri(self, data):
        rendermodules.utilities.schema_utils.posix_to_uri(
            data, "maskUrl", "maskUrl_uri")
コード例 #18
0
class TransformLocalAnnotationParameters(RenderParameters):
    stack = Str(required=True,
                description='stack to look for transform annotations into')
    input_annotation_file = InputFile(required=True,
                                      description='path to annotation file')
    output_annotation_file = Str(
        required=True, description='path to save transformed annotation')
コード例 #19
0
class DataLoaderSchema(ArgSchema):
    landmark_file = InputFile(required=True,
                              description=("csv file, one line per landmark"))
    actions = List(Str,
                   required=False,
                   missing=[],
                   default=[],
                   cli_as_single_argument=True,
                   description=("actions to perform on data"))
    header = List(Str,
                  required=False,
                  default=None,
                  missing=None,
                  cli_as_single_argument=True,
                  description=("passed as names=header to pandas.read_csv()"))
    sd_set = Nested(src_dst)
    all_flags = Bool(required=False,
                     missing=False,
                     default=False,
                     description="if False, returns only flag=True data")
    exclude_labels = List(Int,
                          required=True,
                          missing=[100000, 200000],
                          default=[100000, 200000],
                          description="ignore Pt labels in this range")
コード例 #20
0
class PointMatchClientParametersQsub(
        RenderParameters, SIFTPointMatchParameters, SparkOptions):
    sparkhome = InputDir(
        required=False,
        default="/allen/aibs/pipeline/image_processing/"
        "volume_assembly/utils/spark",
        missing="/allen/aibs/pipeline/image_processing/"
        "volume_assembly/utils/spark",
        description="Path to the spark home directory")
    pbs_template = InputFile(
        required=True,
        description="pbs template to wrap spark job")
    no_nodes = Int(
        required=False,
        default=30,
        missing=10,
        description='Number of nodes to run the pbs job')
    ppn = Int(
        required=False,
        default=30,
        missing=30,
        description='Number of processors per node (default = 30)')
    queue_name = Str(
        required=False,
        default='connectome',
        missing='connectome',
        description='Name of the queue to submit the job')
    logdir = OutputDir(
        required=True,
        description="location to set logging for qsub command"
    )
コード例 #21
0
class QCPointMatchResultsParameters(RenderParameters):
    matchcollections = List(
        Str,
        required=True,
        metadata={'description': 'list of match collections to analyze'})
    input_tilepairfile = InputFile(
        required=True,
        metadata={'description': 'file path of tile pair file to qc'})
    output_tilepairfile = Str(
        required=True,
        metadata={
            'description':
            'file path of where to save the tile pair file to qc'
        })
    figdir = Str(required=True,
                 metadata={'description': 'directory to save images'})
    min_matches = Int(
        required=False,
        default=5,
        metadata={
            'description':
            'number of matches between tiles to be considered a valid match'
        })
    pool_size = Int(
        required=False,
        default=20,
        metadata={'description': 'number of parallel threads to use'})
コード例 #22
0
class PostProcessROIsInputSchema(ArgSchema):
    suite2p_stat_path = Str(
        required=True,
        validate=lambda x: Path(x).exists(),
        description=("Path to s2p output stat file containing ROIs generated "
                     "during source extraction"))
    motion_corrected_video = Str(
        required=True,
        validate=lambda x: Path(x).exists(),
        description=("Path to motion corrected video file *.h5"))
    motion_correction_values = InputFile(
        required=True,
        description=("Path to motion correction values for each frame "
                     "stored in .csv format. This .csv file is expected to"
                     "have a header row of either:\n"
                     "['framenumber','x','y','correlation','kalman_x',"
                     "'kalman_y']\n['framenumber','x','y','correlation',"
                     "'input_x','input_y','kalman_x',"
                     "'kalman_y','algorithm','type']"))
    output_json = OutputFile(
        required=True, description=("Path to a file to write output data."))
    maximum_motion_shift = Float(
        missing=30.0,
        required=False,
        allow_none=False,
        description=("The maximum allowable motion shift for a frame in pixels"
                     " before it is considered an anomaly and thrown out of "
                     "processing"))
    abs_threshold = Float(
        missing=None,
        required=False,
        allow_none=True,
        description=("The absolute threshold to binarize ROI masks against. "
                     "If not provided will use quantile to generate "
                     "threshold."))
    binary_quantile = Float(
        missing=0.1,
        validate=Range(min=0, max=1),
        description=("The quantile against which an ROI is binarized. If not "
                     "provided will use default function value of 0.1."))
    npixel_threshold = Int(
        default=50,
        required=False,
        description=("ROIs with fewer pixels than this will be labeled as "
                     "invalid and small size."))
    aspect_ratio_threshold = Float(
        default=0.2,
        required=False,
        description=("ROIs whose aspect ratio is <= this value are "
                     "not recorded. This captures a large majority of "
                     "Suite2P-created artifacts from motion border"))
    morphological_ops = Bool(
        default=True,
        required=False,
        description=("whether to perform morphological operations after "
                     "binarization. ROIs that are washed away to empty "
                     "after this operation are eliminated from the record. "
                     "This can apply to ROIs that were previously labeled "
                     "as small size, for example."))
コード例 #23
0
class SIFTPointMatchParameters(
        argschema.ArgSchema,
        FeatureExtractionParameters, FeatureRenderParameters,
        FeatureRenderClipParameters,
        FeatureStorageParameters, MatchDerivationParameters,
        RenderParametersMatchWebServiceParameters):
    pairJson = InputFile(required=True, description=(
        "JSON file where tile pairs are stored (.json, .gz, .zip)"))
コード例 #24
0
ファイル: _schemas.py プロジェクト: rgerkin/AllenSDK
class FileToCopy(DefaultSchema):
    source = InputFile(required=True, description='copy from here')
    destination = String(
        required=True,
        description='copy to here (full path, not just directory!)')
    key = String(required=True,
                 description='will be passed through to outputs, allowing a '
                 'name or kind to be associated with this file')
コード例 #25
0
class ImportTrakEM2AnnotationParameters(RenderTrakEM2Parameters):
    EMstack = Str(required=True,
                  description='stack to look for trakem2 patches in')
    trakem2project = InputFile(required=True,
                               description='trakem2 file to read in')
    output_annotation_file = OutputFile(
        required=True, description="place to save annotation output")
    output_bounding_box_file = OutputFile(
        required=True, description="place to save bounding box output")
コード例 #26
0
class GenerateSerialEMTileSpecsParameters(OutputStackParameters):

    image_file = InputFile(
        required=True,
        description="metadata file containing SerialEM acquisition data (idoc)"
    )
    z_spacing = Float(required=False,
                      default=100.0,
                      description="spacing between slices/ section thickness")
コード例 #27
0
ファイル: _schemas.py プロジェクト: ww2470/ipfx
class Nwb2SinkConfig(DefaultSchema):
    """Configure an Nwb2Sink (by arguing the input nwb path)
    """
    nwb_path = InputFile(
        description=(
            "Path to input NWB. This will serve as the basis for the output "
            "file."
        ),
        required=True
    )
コード例 #28
0
ファイル: schemas.py プロジェクト: RussTorres/em_stitch
class ViewMatchesSchema(ArgSchema):
    collection_path = InputFile(
        required=False,
        description="if specified, will read collection from here")
    collection_basename = Str(
        required=True,
        missing="collection.json",
        default="collection.json",
        description=("basename for collection file if collection_path"
                     " not specified. will also check for .json.gz"))
    data_dir = InputDir(
        required=True,
        description=("directory containing image files. Will also be dir"
                     " dir for collection path, if not otherwise specified"))
    resolved_tiles = List(
        Str,
        required=True,
        missing=["resolvedtiles.json.gz", "resolvedtiles_input.json.gz"],
        description=("will take the transform from the first file"
                     " matching this list, if possible"))
    transform_file = InputFile(
        required=False,
        description=("if provided, will get lens correction transform "
                     " from here"))
    view_all = Boolean(
        required=True,
        missing=False,
        default=False,
        description=("will plot all the pair matches. can be useful "
                     "for lens correction to file. probably not desirable "
                     "for montage"))
    show = Boolean(required=True,
                   missing=True,
                   default=True,
                   description=("show on screen?"))
    match_index = Int(required=True,
                      missing=0,
                      default=0,
                      description=("which index of self.matches to plot"))
    pdf_out = OutputFile(required=True,
                         missing='./view_matches_output.pdf',
                         default='./view_matches_output.pdf',
                         description="where to write the pdf output")
コード例 #29
0
class InputParameters(ArgSchema):
    gradient_path = InputFile(
        description=(
            "File at this location is a netcdf-formatted 2D gradient field."
            "Dimensions are x, y, dim, where dim defines the component (dx "
            "or dy) of the gradient"
        ),
        required=True
    )
    node = List(Float,
        description='[x,y,z] location in gradient field to get angle',
        cli_as_single_argument=True,
        default=[0, 0, 0],
        required=False,
    )

    step = Int(
        description=(
            "The input gradient field will be decimated isometrically by "
            "this factor"
        ),
        default=1,
        required=False,
    )
    neighbors = Int(
        description=('number of x and y neighbor idxs to use for interpolation, '
                     'must even and greater > 4'
        ),
        required=False,
        default=12,
        # min data points must be >= (k+1)**2, k=1 for linear
    )

    swc_path = InputFile(
        description='path to swc file for soma location',
        required=True
    )
    
    @mm.validates_schema
    def validate_schema_input(self, data):
        validate_neighbors(data.get('neighbors'))
コード例 #30
0
class DepthField(DefaultSchema):
    gradient_field_path = InputFile(description=(
        "The path to an xarray file describing the gradient of cortical "
        "depth on some domain. This file should contain one dataarray "
        "called 'gradient' which has dimensions drawn from "
        "{'x', 'y', 'z', 'component'}. The coords of x, y, z define the "
        "domain over which the gradient was computed. The component "
        "dimension describes the dimension associated with each component "
        "of the gradient and should have coords drawn from {'x', 'y', 'z'}."),
                                    required=True)
    depth_field_path = InputFile(
        description=("As gradient field, but gives depth values"),
        required=True)
    soma_origin = Bool(
        description="If true, the field is centered at the soma",
        required=True,
        default=True)
    pia_sign = Int(description="which direction is the pia",
                   required=True,
                   default=1,
                   validate=lambda val: val in {1, -1})