Beispiel #1
0
class Directories(DefaultSchema):

    ecephys_directory = InputDir(
        help=
        'Location of the ecephys_spike_sorting directory containing modules directory'
    )
    npx_directory = InputDir(help='Location of raw neuropixels binary files')
    kilosort_output_directory = OutputDir(
        help='Location of Kilosort output files')
    extracted_data_directory = OutputDir(
        help='Location for NPX/CatGT processed files')
    kilosort_output_tmp = OutputDir(help='Location for temporary KS output')
Beispiel #2
0
class ExperimentPlane(DefaultSchema):
    experiment_id = Int(
        required=True,
        description="Ophys experiment id.")
    storage_directory = OutputDir(
        required=True,
        description="Folder for output files.")
    roi_index = Int(
        required=True,
        description="Index of ROI in the scanimage metadata.")
    scanfield_z = Float(
        required=True,
        description="Z value of the scanfield for this experiment plane.")
    resolution = Float(
        required=True,
        description="Pixel size in microns.")
    offset_x = Int(
        required=True,
        description="X offset of image from gold reticle.")
    offset_y = Int(
        required=True,
        description="Y offset of image from gold reticle.")
    rotation = Float(
        required=True,
        description="Rotation of image relative to gold reticle.")
class PiaWmStreamlineSchema(ArgSchema):
    """Arg Schema for run_pia_wm_streamlines"""

    pia_path_str = String(
        required=True,
        description='string alternating x, y coordinates outlining the pia')
    wm_path_str = String(
        required=True,
        description='string alternating x, y coordinates outlining the wm')
    soma_path_str = String(
        required=False,
        description='string alternating x, y coordinates outlining the soma. '
                    'If provided, streamlines will be translated so that '
                    'the origin is at the soma')
    resolution = Float(required=False,
                       default=1,
                       description='Resolution of pixels in microns')
    pia_fixed_value = Float(required=False,
                            default=1,
                            description='Fixed value pia boundary condition')
    wm_fixed_value = Float(required=False,
                           default=0,
                           description='Fixed value wm boundary condition')
    mesh_res = Int(required=False,
                   default=20,
                   description='Resolution for mesh for laplace solver')
    output_dir = OutputDir(required=True,
                           description='Directory to write xarray results')
Beispiel #4
0
class PointMatchOptimizationParameters(RenderParameters):
    stack = Str(
        required=True,
        description='Name of the stack containing the tile pair')
    tile_stack = Str(
        required=False,
        default=None,
        missing=None,
        description='Name of the stack that will hold these two tiles')
    tileId1 = Str(
        required=True,
        description='tileId of the first tile in the tile pair')
    tileId2 = Str(
        required=True,
        description='tileId of the second tile in the tile pair')
    pool_size = Int(
        required=False,
        default=10,
        missing=10,
        description='Pool size for parallel processing')
    SIFT_options = Nested(SIFT_options, required=True)
    outputDirectory = OutputDir(
        required=True,
        description='Parent directory in which subdirectories will be created to store images and point-match results from SIFT')
    url_options = Nested(url_options, required=True)
Beispiel #5
0
class PointMatchClientParametersQsub(
        RenderParameters, SIFTPointMatchParameters, SparkOptions):
    sparkhome = InputDir(
        required=False,
        default="/allen/aibs/pipeline/image_processing/"
        "volume_assembly/utils/spark",
        missing="/allen/aibs/pipeline/image_processing/"
        "volume_assembly/utils/spark",
        description="Path to the spark home directory")
    pbs_template = InputFile(
        required=True,
        description="pbs template to wrap spark job")
    no_nodes = Int(
        required=False,
        default=30,
        missing=10,
        description='Number of nodes to run the pbs job')
    ppn = Int(
        required=False,
        default=30,
        missing=30,
        description='Number of processors per node (default = 30)')
    queue_name = Str(
        required=False,
        default='connectome',
        missing='connectome',
        description='Name of the queue to submit the job')
    logdir = OutputDir(
        required=True,
        description="location to set logging for qsub command"
    )
class MultIntensityCorrParams(StackTransitionParameters):
    correction_stack = Str(
        required=True,
        description='Correction stack (usually median stack for AT data)')
    output_directory = OutputDir(
        required=True,
        description='Directory for storing Images')
    # TODO add create_stack metadata
    cycle_number = Int(
        required=False, default=2,
        description="what cycleNumber to upload for output_stack on render")
    cycle_step_number = Int(
        required=False, default=1,
        description=("what cycleStepNumber to upload "
                     "for output_stack on render"))
    clip = Bool(
        required=False, default=True,
        description="whether to clip values")
    scale_factor = Float(
        required=False, default=1.0,
        description="scaling value")
    clip_min = Int(
        required=False, default=0,
        description='Min Clip value')
    clip_max = Int(
        required=False, default=65535,
        description='Max Clip value')
Beispiel #7
0
class MontageSolverSchema(ArgSchema):
    data_dir = InputDir(
        required=False,
        description="directory containing metafile, images, and matches")
    metafile = InputFile(
        required=False,
        description=("fullpath to metafile. Helps in the case of multiple"
                     " metafiles in one directory. data_dir will take "
                     " os.path.dirname(metafile)"))
    output_dir = OutputDir(required=False,
                           missing=None,
                           default=None,
                           description="directory for output files")
    read_transform_from = Str(
        required=False,
        missing='metafile',
        default='metafile',
        validator=mm.validate.OneOf(['metafile', 'reffile', 'dict']),
        description="3 possible ways to read in the reference transform")
    ref_transform = InputFile(required=False,
                              missing=None,
                              default=None,
                              description="transform json")
    ref_transform_dict = Dict(require=False,
                              missing=None,
                              description="transform in from memory")
    ransacReprojThreshold = Float(
        required=False,
        missing=10.0,
        default=10.0,
        description=("passed into cv2.estimateAffinePartial2D()"
                     "for RANSAC filtering of montage template matches"))
    compress_output = Boolean(
        required=False,
        missing=True,
        default=True,
        description=("tilespecs will be .json or .json.gz"))
    solver_templates = List(
        Str,
        required=True,
        description="input json basenames for the solver args")
    solver_template_dir = InputDir(
        required=True, description="location of the templates for the solver")

    @mm.post_load
    def check_solver_inputs(self, data):
        for args in data['solver_templates']:
            argpath = os.path.join(data['solver_template_dir'], args)
            if not os.path.isfile(argpath):
                raise mm.ValidationError("solver arg file doesn't exist: %s" %
                                         argpath)

    @mm.post_load
    def check_metafile(self, data):
        if ('data_dir' not in data) & ('metafile' not in data):
            raise mm.ValidationError(" must specify either data_dir"
                                     " or metafile")
Beispiel #8
0
class PairwiseMatchingSchema(ArgSchema, CommonMatchingParameters):
    output_directory = OutputDir(
        required=True,
        description="destination for output files.")
    save_registered_image = fields.Bool(
        required=False,
        default=False,
        description='Whether to save registered image.')
    fixed = fields.Nested(PairwiseExperimentSchema)
    moving = fields.Nested(PairwiseExperimentSchema)
class MaterializedBoxParameters(argschema.schemas.DefaultSchema):
    stack = Str(required=True,
                description=("stack fromw which boxes will be materialized"))
    rootDirectory = OutputDir(
        required=True,
        description=
        ("directory in which materialization directory structure will be "
         "created (structure is "
         "<rootDirectory>/<project>/<stack>/<width>x<height>/<mipMapLevel>/<z>/<row>/<col>.<fmt>)"
         ))
    width = Int(required=True,
                description=("width of flat rectangular tiles to generate"))
    height = Int(required=True,
                 description=("height of flat rectangular tiles to generate"))
    maxLevel = Int(required=False,
                   default=0,
                   description=("maximum mipMapLevel to generate."))
    fmt = Str(required=False,
              validator=validate.OneOf(['PNG', 'TIF', 'JPG']),
              description=("image format to generate mipmaps -- "
                           "PNG if not specified"))
    maxOverviewWidthAndHeight = Int(
        required=False,
        description=(
            "maximum pixel size for width or height of overview image.  "
            "If excluded or 0, no overview generated."))
    skipInterpolation = Boolean(
        required=False,
        description=("whether to skip interpolation (e.g. DMG data)"))
    binaryMask = Boolean(
        required=False,
        description=("whether to use binary mask (e.g. DMG data)"))
    label = Boolean(
        required=False,
        description=("whether to generate single color tile labels rather "
                     "than actual images"))
    createIGrid = Boolean(required=False,
                          description=("whther to create an IGrid file"))
    forceGeneration = Boolean(
        required=False, description=("whether to regenerate existing tiles"))
    renderGroup = Int(
        required=False,
        description=(
            "index (1-n) identifying coarse portion of layer to render"))
    numberOfRenderGroups = Int(
        required=False,
        description=(
            "used in conjunction with renderGroup, total number of groups "
            "being used"))
    filterListName = Str(
        required=False,
        description=("Apply specified filter list to all renderings"))
Beispiel #10
0
class RenderSectionAtScaleParameters(RenderParameters):
    input_stack = Str(
        required=True,
        description='Input stack to make the downsample version of')
    image_directory = OutputDir(
        required=True,
        description='Directory to save the downsampled sections')
    imgformat = Str(required=False,
                    default="png",
                    missing="png",
                    description='Image format (default -  png)')
    doFilter = Boolean(required=False,
                       default=True,
                       missing=True,
                       description='Apply filtering before rendering')
    fillWithNoise = Boolean(
        required=False,
        default=False,
        missing=False,
        description='Fill image with noise (default - False)')
    scale = Float(required=True,
                  description='scale of the downsampled sections')
    minZ = Int(required=False,
               default=-1,
               missing=-1,
               description='min Z to create the downsample section from')
    maxZ = Int(required=False,
               default=-1,
               missing=-1,
               description='max Z to create the downsample section from')
    filterListName = Str(
        required=False,
        description=("Apply specified filter list to all renderings"))
    bounds = Nested(Bounds, required=False, default=None, missing=None)
    use_stack_bounds = Boolean(
        required=False,
        default=False,
        missing=False,
        description=
        'Do you want to use stack bounds while downsampling?. Default=False')
    pool_size = Int(required=False,
                    default=20,
                    missing=20,
                    description='number of parallel threads to use')

    @post_load
    def validate_data(self, data):
        # FIXME will be able to remove with render-python tweak
        if data.get('filterListName') is not None:
            warnings.warn(
                "filterListName not implemented -- will use default behavior",
                UserWarning)
Beispiel #11
0
class SolverSchema(ArgSchema):
    data = Nested(DataLoaderSchema)
    transform = Dict(required=True,
                     description="dict containing transform specification")
    leave_out_index = Int(required=False,
                          missing=None,
                          default=None,
                          description="index to leave out of data")
    output_dir = OutputDir(
        required=False,
        missing=None,
        default=None,
        description="path for writing output json of transform")
class PtMatchOptimizationParameters(RenderParameters):
    stack = Str(
        required=True,
        description=(
            'Name of the stack containing the tile pair (not the base stack)'))
    tile_stack = Str(
        required=False,
        default=None,
        missing=None,
        description='Name of the stack that will hold these two tiles')
    tilepair_file = InputFile(required=True, description='Tile pair file')
    no_tilepairs_to_test = Int(
        required=False,
        default=10,
        missing=10,
        description=('Number of tilepairs to be tested for '
                     'optimization - default = 10'))
    filter_tilepairs = Bool(
        required=False,
        default=False,
        missing=False,
        description=("Do you want filter the tilpair file for pairs "
                     "that overlap? - default = False"))
    max_tilepairs_with_matches = Int(
        required=False,
        default=0,
        missing=0,
        description=('How many tilepairs with matches required for '
                     'selection of optimized parameter set'))
    numberOfThreads = Int(
        required=False,
        default=5,
        missing=5,
        description='Number of threads to run point matching job')
    SIFT_options = Nested(SIFT_options, required=True)
    outputDirectory = OutputDir(
        required=True,
        description=(
            'Parent directory in which subdirectories will be '
            'created to store images and point-match results from SIFT'))
    url_options = Nested(url_options, required=True)
    pool_size = Int(required=False,
                    default=10,
                    missing=10,
                    description='Pool size for parallel processing')

    @post_load
    def validate_data(self, data):
        if data['max_tilepairs_with_matches'] == 0:
            data['max_tilepairs_with_matches'] = data['no_tilepairs_to_test']
Beispiel #13
0
class StagedSolveSchema(ArgSchema):
    data = Nested(DataLoaderSchema)
    transforms = List(Dict,
                      required=True,
                      description="list of transform arg dicts")
    leave_out_index = Int(required=False,
                          missing=None,
                          default=None,
                          description="index to leave out of data")
    output_dir = OutputDir(
        required=False,
        missing=None,
        default=None,
        description="path for writing output json of transform")
class MakeDownsampleSectionStackParameters(RenderParameters):
    input_stack = Str(
        required=True,
        metadata={'description': 'stack to make a downsample version of'})
    scale = Float(required=False,
                  default=.01,
                  metadata={'description': 'scale to make images'})
    image_directory = OutputDir(
        required=True, metadata={'decription', 'path to save section images'})
    output_stack = Str(required=True,
                       metadata={'description': 'output stack to name'})
    pool_size = Int(
        required=False,
        default=20,
        metadata={'description': 'number of parallel threads to use'})
class InputSchema(ArgSchema):
    log_level = Str(required=False, default="INFO")
    depths_tif = InputFile(required=True,
                           description="Full path to depth 2p tiff file.")
    surface_tif = InputFile(required=True,
                            description="Full path to surface 2p tiff file.")
    timeseries_tif = InputFile(
        required=True, description="Full path to timeseries tiff file.")
    storage_directory = OutputDir(
        required=True, description="Folder for column stack outputs.")
    plane_groups = Nested(PlaneGroup, many=True)
    test_mode = Int(
        default=0,
        description=("Flag to run without actually splitting data. For testing"
                     " runner mechanism and metadata. Testing of splitting "
                     "is handled in testing for the mesoscope_2p package."))
class InputParameters(ArgSchema):
    output_dir = OutputDir(
        default="./",
        description="Directory in which to store data output files")
    input_source = InputFile(description="Path to input movie", required=True)
    pupil_bounding_box = NumpyArray(dtype="int", default=[])
    cr_bounding_box = NumpyArray(dtype="int", default=[])
    start_frame = Int(description="Frame of movie to start processing at")
    stop_frame = Int(description="Frame of movie to end processing at")
    frame_step = Int(description=(
        "Interval of frames to process. Used for skipping frames,"
        "if 1 it will process every frame between start and stop"))
    ransac = Nested(RansacParameters)
    annotation = Nested(AnnotationParameters)
    starburst = Nested(StarburstParameters)
    eye_params = Nested(EyeParameters)
    qc = Nested(QCParameters)
Beispiel #17
0
class TraceExtractionInputSchema(ArgSchema):
    log_level = LogLevel(default='INFO',
                         description="set the logging level of the module")
    motion_border = Nested(
        MotionBorder,
        required=True,
        description=("border widths - pixels outside the border are "
                     "considered invalid"))
    storage_directory = OutputDir(required=True,
                                  description="used to set output directory")
    motion_corrected_stack = H5InputFile(
        required=True,
        description="path to h5 file containing motion corrected image stack")
    rois = Nested(
        ExtractROISchema,
        many=True,
        description="specifications of individual regions of interest")
    log_0 = InputFile(
        required=False,
        description=("path to motion correction output csv. "
                     "NOTE: not used, but provided by LIMS schema."))
Beispiel #18
0
class SolverSchema(ArgSchema):
    data = Nested(DataLoaderSchema)
    regularization = Nested(regularization)
    leave_out_index = Int(required=False,
                          missing=None,
                          default=None,
                          description="index to leave out of data")
    model = Str(required=False,
                default='TPS',
                missing='TPS',
                description=("LIN, POLY, or TPS for linear, polynomial, "
                             "thin plate spline"))
    npts = Int(required=False,
               missing=None,
               default=None,
               description="number of pts per axis for TPS controls")
    output_dir = OutputDir(
        required=False,
        missing=None,
        default=None,
        description="path for writing output json of transform")
Beispiel #19
0
class RoughQCSchema(RenderParameters):
    input_downsampled_stack = Str(
        required=True, description="Pre rough aligned downsampled stack")
    output_downsampled_stack = Str(required=True,
                                   description="Rough aligned stack name")
    minZ = Int(required=True, description="min z")
    maxZ = Int(required=True, description="max z")
    pool_size = Int(required=False,
                    default=10,
                    missing=10,
                    description="Pool size")
    output_dir = OutputDir(required=False,
                           default=None,
                           missing=None,
                           description="temp filename to save fig")
    out_file_format = Str(
        validator=mm.validate.OneOf(['html', 'pdf']),
        required=False,
        default="pdf",
        description=
        "Do you want the output to be bokeh plots in html (option = 'html') or pdf files for plots (option = 'pdf', default)"
    )
Beispiel #20
0
class NwayMatchSummarySchema(ArgSchema):
    input_file = InputFile(
        required=False,
        desc="Input *.json file path to nway matching."
    )
    output_file = InputFile(
        required=False,
        desc="Output *.json file from nway matching."
    )
    # TODO: eliminate this non-specific Dict
    nway_input = fields.Dict(
        required=True,
        desc="Input to nway matching in Python dictionary form."
    )
    nway_output = fields.Nested(NwayMatchingOutputNoPlotsSchema)
    output_directory = OutputDir(
        required=True,
        description="Destination for summary plot output file(s).")

    @mm.pre_load
    def fill_dict_inputs(self, data: dict, **kwargs) -> dict:
        if not data['nway_input']:
            with open(data['input_file'], 'r') as f:
                input_dict = json.load(f)
            data['nway_input'] = input_dict
        elif data.get('input_file'):
            logger.warning("Both --nway_input and --input_file were provided "
                           "so --input_file will be ignored.")

        if not data['nway_output']:
            with open(data['output_file'], 'r') as f:
                output_dict = json.load(f)
            data['nway_output'] = output_dict
        elif data.get('output_file'):
            logger.warning("Both --nway_output and --output_file were "
                           "provided so --output_file will be ignored.")

        return data
Beispiel #21
0
class MeshLensCorrectionSchema(ArgSchema):
    nvertex = Int(required=False,
                  default=1000,
                  missinf=1000,
                  description="maximum number of vertices to attempt")
    tilespec_file = InputFile(required=False,
                              description="path to json of tilespecs")
    tilespecs = List(Dict,
                     required=False,
                     description="list of dict of tilespecs")
    match_file = InputFile(required=False,
                           description="path to json of matches")
    matches = List(Dict, required=False, description="list of dict of matches")
    regularization = Nested(regularization, missing={})
    good_solve = Nested(good_solve_criteria, missing={})
    output_dir = OutputDir(required=False,
                           description="directory for output files")
    outfile = Str(required=False,
                  description=("Basename to which resolved json output of "
                               "lens correction is written"))
    compress_output = Boolean(
        required=False,
        missing=True,
        default=True,
        description=("tilespecs will be .json or .json.gz"))
    timestamp = Boolean(required=False,
                        missing=False,
                        default=False,
                        description="add a timestamp to basename output")

    @mm.post_load
    def one_of_two(self, data):
        for a, b in [['tilespecs', 'tilespec_file'], ['matches',
                                                      'match_file']]:
            if (a in data) == (b in data):  # xor
                raise mm.ValidationError(
                    'must specify one and only one of %s or %s' % (a, b))
Beispiel #22
0
class LensCorrectionSchema(ArgSchema):
    data_dir = InputDir(
        required=True,
        description="directory containing metafile, images, and matches")
    output_dir = OutputDir(required=False,
                           description="directory for output files")
    mask_file = InputFile(required=False,
                          default=None,
                          missing=None,
                          description="mask to apply to each tile")
    nvertex = Int(required=False,
                  default=1000,
                  missinf=1000,
                  description="maximum number of vertices to attempt")
    ransac_thresh = Float(required=False,
                          default=5.0,
                          missing=5.0,
                          description="ransac outlier threshold")
    regularization = Nested(regularization, missing={})
    good_solve = Nested(good_solve_criteria, missing={})
    ignore_match_indices = List(
        Int,
        required=False,
        default=None,
        missing=None,
        description=("debug feature for ignoring certain indices"
                     " of the match collection"))
    compress_output = Boolean(
        required=False,
        missing=True,
        default=True,
        description=("tilespecs will be .json or .json.gz"))
    timestamp = Boolean(required=False,
                        missing=False,
                        default=False,
                        description="add a timestamp to basename output")
Beispiel #23
0
class BasicOutputDir(ArgSchema):
    output_dir = OutputDir(required=True, description="basic output dir")
class QCParameters(DefaultSchema):
    generate_plots = Bool(
        default=EyeTracker.DEFAULT_GENERATE_QC_OUTPUT,
        description="Flag for whether or not to output QC plots")
    output_dir = OutputDir(default="./qc",
                           description="Folder to store QC outputs")
Beispiel #25
0
class SolverOptionsParameters(DefaultSchema):
    degree = Int(
        required=False,
        default=1,
        missing=1,
        description=
        "Degree of rquired transformation (0 - Rigid, 1 - Affine(default), 2 - Polynomial)"
    )
    solver = Str(
        required=False,
        default="backslash",
        missing="backslash",
        description="type of solver to solve the system (default - backslash)")
    close_stack = Bool(
        required=False,
        default=False,
        description=
        "whether the solver should close the stack after uploading results")
    transfac = Float(required=False,
                     default=0.00001,
                     missing=0.00001,
                     description="translation factor")
    lambda_value = Float(required=False,
                         default=1000,
                         missing=1000,
                         description="lambda for the solver")
    edge_lambda = Float(required=False,
                        default=0.005,
                        missing=0.005,
                        description="edge lambda for solver regularization")
    nbrs = Int(
        required=False,
        default=2,
        missing=2,
        description=
        "number of neighboring sections to consider (applies for cross section point matches)"
    )
    nbrs_step = Int(required=False,
                    default=1,
                    missing=1,
                    description="neighbors step")
    xs_weight = Float(required=True,
                      description="Cross section point match weights")
    min_points = Int(
        required=False,
        default=5,
        missing=5,
        description=
        "Minimum number of points correspondences required per tile pair")
    max_points = Int(
        required=False,
        default=200,
        missing=200,
        description=
        "Maximum number of point correspondences required per tile pair")
    filter_point_matches = Int(
        required=False,
        default=1,
        missing=1,
        description="Filter point matches from collection (default = 1)")
    outlier_lambda = Int(required=False,
                         default=1000,
                         missing=1000,
                         description="lambda value for outliers")
    min_tiles = Int(required=False,
                    default=3,
                    missing=3,
                    description="Minimum number of tiles in section")
    Width = Int(required=True, description="Width of the tiles")
    Height = Int(required=True, description="Height of the tiles")
    outside_group = Int(required=False,
                        default=0,
                        missing=0,
                        description="Outside group")
    matrix_only = Int(required=False,
                      default=0,
                      missing=0,
                      description="matrix only")
    distribute_A = Int(required=False,
                       default=16,
                       missing=16,
                       description="Distribute A matrix")
    dir_scratch = OutputDir(required=True, description="Scratch directory")
    distributed = Int(required=False,
                      default=0,
                      missing=0,
                      description="Distributed parameter of solver")
    use_peg = Int(required=False,
                  default=0,
                  missing=0,
                  description="Use pegs? (default = 0)")
    verbose = Int(required=False,
                  default=0,
                  missing=0,
                  description="Verbose output from solver needed?")
    debug = Int(required=False,
                default=0,
                missing=0,
                description="turn on debug mode (default = 0 - off)")
    constrain_by_z = Int(required=False,
                         default=0,
                         missing=0,
                         description="Constrain solution by z (default = 0)")
    sandwich = Int(required=False,
                   default=0,
                   missing=0,
                   description="sandwich factor of solver")
    constraint_fac = Int(required=False,
                         default=1e15,
                         missing=1e15,
                         description="Contraint factor")
    pmopts = Nested(PointMatchFilteringOptions,
                    required=True,
                    description="Point match filtering options for solver")
    pastix = Nested(PastixOptions,
                    required=True,
                    description="Pastix solver options")
Beispiel #26
0
class PointMatchClientParametersSlurmSpark(SlurmSparkParameters,
                                      SIFTPointMatchParameters):
    logdir = OutputDir(
        required=False,
        description="location to set logging for slurm-spark command"
    )
Beispiel #27
0
class TilePairClientParameters(RenderParameters):
    stack = Str(
        required=True,
        description="input stack to which tilepairs need to be generated")
    baseStack = Str(
        required=False,
        default=None,
        missing=None,
        description="Base stack")
    minZ = Int(
        required=False,
        default=None,
        missing=None,
        description="z min for generating tilepairs")
    maxZ = Int(
        required=False,
        default=None,
        missing=None,
        description="z max for generating tilepairs")
    xyNeighborFactor = Float(
        required=False,
        default=0.9,
        description="Multiply this by max(width, height) of "
        "each tile to determine radius for locating neighbor tiles")
    zNeighborDistance = Int(
        required=False,
        default=2,
        missing=2,
        description="Look for neighbor tiles with z values less than "
        "or equal to this distance from the current tile's z value")
    excludeCornerNeighbors = Bool(
        required=False,
        default=True,
        missing=True,
        description="Exclude neighbor tiles whose center x and y is "
        "outside the source tile's x and y range respectively")
    excludeSameLayerNeighbors = Bool(
        required=False,
        default=False,
        missing=False,
        description="Exclude neighbor tiles in the "
        "same layer (z) as the source tile")
    excludeCompletelyObscuredTiles = Bool(
        required=False,
        default=True,
        missing=True,
        description="Exclude tiles that are completely "
        "obscured by reacquired tiles")
    output_dir = OutputDir(
        required=True,
        description="Output directory path to save the tilepair json file")
    memGB = Str(
        required=False,
        default='6G',
        missing='6G',
        description="Memory for the java client to run")

    @post_load
    def validate_data(self, data):
        if data['baseStack'] is None:
            data['baseStack'] = data['stack']
Beispiel #28
0
class Directories(DefaultSchema):

    kilosort_output_directory = OutputDir(
        help='Location of Kilosort output files')
    extracted_data_directory = OutputDir(
        help='Location for NPX file extraction')
class MeshLensCorrectionSchema(PointMatchOpenCVParameters):
    input_stack = Str(required=True,
                      description="Name of raw input lens data stack")
    output_stack = Str(required=True,
                       description="Name of lens corrected output stack")
    overwrite_zlayer = Bool(required=False,
                            default=True,
                            missing=True,
                            description="Overwrite z layer (default = True)")
    rerun_pointmatch = Bool(required=False,
                            default=True,
                            missing=True,
                            description="delete pointmatch values and rerun")
    close_stack = Bool(required=False,
                       default=True,
                       missing=True,
                       description="Close input stack")
    do_montage_QC = Bool(required=False,
                         default=True,
                         missing=True,
                         description="perform montage QC on stack result")
    match_collection = Str(required=True,
                           description="name of point match collection")
    metafile = Str(required=False, description="fullpath of metadata file")
    metafile_uri = Str(required=True,
                       description="uri_handler uri of metafile object")
    z_index = Int(required=True,
                  description="z value for the lens correction data in stack")
    ncpus = Int(required=False,
                default=-1,
                description="max number of cpus to use")
    nvertex = Int(required=False,
                  default=1000,
                  missing=1000,
                  description="maximum number of vertices to attempt")
    output_dir = OutputDir(
        required=False,
        default=None,
        missing=None,
        description="output dir to save tile pair file and qc json")
    outfile = Str(required=True,
                  description=("File to which json output of lens correction "
                               "(leaf TransformSpec) is written"))
    regularization = Nested(regularization, missing={})
    good_solve = Nested(good_solve_criteria, missing={})
    sectionId = Str(required=True, default="xxx", description="section Id")
    mask_coords = List(List(Int),
                       required=False,
                       default=None,
                       missing=None,
                       cli_as_single_argument=True,
                       description="Nx2 list of in-order bound coordinates")
    mask_dir = OutputDir(required=False,
                         default=None,
                         missing=None,
                         description="directory for saving masks")
    mask_file = InputFile(required=False,
                          default=None,
                          missing=None,
                          description="explicit mask setting from file")

    @marshmallow.pre_load
    def metafile_to_uri(self, data):
        asap.utilities.schema_utils.posix_to_uri(data, "metafile",
                                                 "metafile_uri")
Beispiel #30
0
class ModeOutputDirSchema(ArgSchema):
    output_dir = OutputDir(required=True,
                           description="775 output directory",
                           mode=0o775)