Beispiel #1
0
class SolverOptionsParameters(DefaultSchema):
    degree = Int(
        required=False,
        default=1,
        missing=1,
        description=
        "Degree of rquired transformation (0 - Rigid, 1 - Affine(default), 2 - Polynomial)"
    )
    solver = Str(
        required=False,
        default="backslash",
        missing="backslash",
        description="type of solver to solve the system (default - backslash)")
    close_stack = Bool(
        required=False,
        default=False,
        description=
        "whether the solver should close the stack after uploading results")
    transfac = Float(required=False,
                     default=0.00001,
                     missing=0.00001,
                     description="translation factor")
    lambda_value = Float(required=False,
                         default=1000,
                         missing=1000,
                         description="lambda for the solver")
    edge_lambda = Float(required=False,
                        default=0.005,
                        missing=0.005,
                        description="edge lambda for solver regularization")
    nbrs = Int(
        required=False,
        default=2,
        missing=2,
        description=
        "number of neighboring sections to consider (applies for cross section point matches)"
    )
    nbrs_step = Int(required=False,
                    default=1,
                    missing=1,
                    description="neighbors step")
    xs_weight = Float(required=True,
                      description="Cross section point match weights")
    min_points = Int(
        required=False,
        default=5,
        missing=5,
        description=
        "Minimum number of points correspondences required per tile pair")
    max_points = Int(
        required=False,
        default=200,
        missing=200,
        description=
        "Maximum number of point correspondences required per tile pair")
    filter_point_matches = Int(
        required=False,
        default=1,
        missing=1,
        description="Filter point matches from collection (default = 1)")
    outlier_lambda = Int(required=False,
                         default=1000,
                         missing=1000,
                         description="lambda value for outliers")
    min_tiles = Int(required=False,
                    default=3,
                    missing=3,
                    description="Minimum number of tiles in section")
    Width = Int(required=True, description="Width of the tiles")
    Height = Int(required=True, description="Height of the tiles")
    outside_group = Int(required=False,
                        default=0,
                        missing=0,
                        description="Outside group")
    matrix_only = Int(required=False,
                      default=0,
                      missing=0,
                      description="matrix only")
    distribute_A = Int(required=False,
                       default=16,
                       missing=16,
                       description="Distribute A matrix")
    dir_scratch = OutputDir(required=True, description="Scratch directory")
    distributed = Int(required=False,
                      default=0,
                      missing=0,
                      description="Distributed parameter of solver")
    use_peg = Int(required=False,
                  default=0,
                  missing=0,
                  description="Use pegs? (default = 0)")
    verbose = Int(required=False,
                  default=0,
                  missing=0,
                  description="Verbose output from solver needed?")
    debug = Int(required=False,
                default=0,
                missing=0,
                description="turn on debug mode (default = 0 - off)")
    constrain_by_z = Int(required=False,
                         default=0,
                         missing=0,
                         description="Constrain solution by z (default = 0)")
    sandwich = Int(required=False,
                   default=0,
                   missing=0,
                   description="sandwich factor of solver")
    constraint_fac = Int(required=False,
                         default=1e15,
                         missing=1e15,
                         description="Contraint factor")
    pmopts = Nested(PointMatchFilteringOptions,
                    required=True,
                    description="Point match filtering options for solver")
    pastix = Nested(PastixOptions,
                    required=True,
                    description="Pastix solver options")
Beispiel #2
0
class SolveMontageSectionParameters(RenderParameters):
    first_section = Int(required=True,
                        description="Z index of the first section")
    last_section = Int(required=True,
                       description="Z index of the last section")
    clone_section_stack = Bool(
        required=False,
        default=True,
        description=
        "Whether to clone out a temporary single section stack from source_collection stack"
    )
    solver_executable = Str(
        required=True, description="Matlab solver executable with full path")
    verbose = Int(required=False,
                  default=0,
                  missing=0,
                  description="Verbose output from solver needed?")
    solver_options = Nested(SolverOptionsParameters,
                            required=True,
                            description="Solver parameters")
    source_collection = Nested(
        SourceStackParameters,
        required=True,
        description=
        "Input stack parameters, will be created and deleted after from input_stack"
    )
    target_collection = Nested(TargetStackParameters,
                               required=True,
                               description="Output stack parameters")
    source_point_match_collection = Nested(
        PointMatchCollectionParameters,
        required=True,
        description="Point match collection parameters")

    @post_load
    def add_missing_values(self, data):
        # cannot create "lambda" as a variable name in SolverParameters
        data['solver_options']['lambda'] = data['solver_options'][
            'lambda_value']
        data['solver_options'].pop('lambda_value', None)

        if data['source_collection']['owner'] is None:
            data['source_collection']['owner'] = data['render']['owner']
        if data['source_collection']['project'] is None:
            data['source_collection']['project'] = data['render']['project']
        if data['source_collection']['service_host'] is None:
            if data['render']['host'].find('http://') == 0:
                data['source_collection']['service_host'] = data['render'][
                    'host'][7:] + ":" + str(data['render']['port'])
            else:
                data['source_collection']['service_host'] = data['render'][
                    'host'] + ":" + str(data['render']['port'])
        if data['source_collection']['baseURL'] is None:
            data['source_collection'][
                'baseURL'] = "http://{}:{}/render-ws/v1".format(
                    data['render']['host'], data['render']['port'])
        if data['source_collection']['renderbinPath'] is None:
            data['source_collection']['renderbinPath'] = data['render'][
                'client_scripts']

        if data['target_collection']['owner'] is None:
            data['target_collection']['owner'] = data['render']['owner']
        if data['target_collection']['project'] is None:
            data['target_collection']['project'] = data['render']['project']
        if data['target_collection']['service_host'] is None:
            if data['render']['host'].find('http://') == 0:
                data['target_collection']['service_host'] = data['render'][
                    'host'][7:] + ":" + str(data['render']['port'])
            else:
                data['target_collection']['service_host'] = data['render'][
                    'host'] + ":" + str(data['render']['port'])
        if data['target_collection']['baseURL'] is None:
            data['target_collection'][
                'baseURL'] = "http://{}:{}/render-ws/v1".format(
                    data['render']['host'], data['render']['port'])
        if data['target_collection']['renderbinPath'] is None:
            data['target_collection']['renderbinPath'] = data['render'][
                'client_scripts']

        if data['source_point_match_collection']['server'] is None:
            data['source_point_match_collection']['server'] = data[
                'source_collection']['baseURL']
        if data['source_point_match_collection']['owner'] is None:
            data['source_point_match_collection']['owner'] = data['render'][
                'owner']
class QCParameters(DefaultSchema):
    generate_plots = Bool(
        default=EyeTracker.DEFAULT_GENERATE_QC_OUTPUT,
        description="Flag for whether or not to output QC plots")
    output_dir = OutputDir(default="./qc",
                           description="Folder to store QC outputs")
class DepthEstimationParams(DefaultSchema):
    hi_noise_thresh = Float(required=True,
                            default=50.0,
                            help='Max RMS noise for including channels')
    lo_noise_thresh = Float(required=True,
                            default=3.0,
                            help='Min RMS noise for including channels')

    save_figure = Bool(required=True, default=True)
    figure_location = OutputFile(required=True, default=None)

    smoothing_amount = Int(
        required=True,
        default=5,
        help='Gaussian smoothing parameter to reduce channel-to-channel noise')
    power_thresh = Float(
        required=True,
        default=2.5,
        help=
        'Ignore threshold crossings if power is above this level (indicates channels are in the brain)'
    )
    diff_thresh = Float(
        required=True,
        default=-0.07,
        help='Threshold to detect large increases is power at brain surface')
    freq_range = NumpyArray(
        required=True,
        default=[0, 10],
        help='Frequency band for detecting power increases')
    max_freq = Int(required=True,
                   default=150,
                   help='Maximum frequency to plot')
    channel_range = NumpyArray(
        required=True,
        default=[370, 380],
        help='Channels assumed to be out of brain, but in saline')
    n_passes = Int(
        required=True,
        default=10,
        help='Number of times to compute offset and surface channel')
    skip_s_per_pass = Int(
        required=True,
        default=5,
        help='Number of seconds between data chunks used on each pass'
    )  #default=100
    start_time = Float(
        required=True,
        default=0,
        help='First time (in seconds) for computing median offset')
    time_interval = Float(required=True,
                          default=5,
                          help='Number of seconds for computing median offset')

    nfft = Int(required=True,
               default=4096,
               help='Length of FFT used for calculations')

    air_gap = Int(
        required=True,
        default=100,
        help='Approximate number of channels between brain surface and air')
Beispiel #5
0
class MyNestedSchema(DefaultSchema):
    a = Int(default=42, description="my first parameter")
    b = Bool(default=True, description="my boolean")
Beispiel #6
0
class TilePairClientParameters(RenderParameters):
    stack = Str(
        required=True,
        description="input stack to which tilepairs need to be generated")
    baseStack = Str(
        required=False,
        default=None,
        missing=None,
        description="Base stack")
    minZ = Int(
        required=False,
        default=None,
        missing=None,
        description="z min for generating tilepairs")
    maxZ = Int(
        required=False,
        default=None,
        missing=None,
        description="z max for generating tilepairs")
    xyNeighborFactor = Float(
        required=False,
        default=0.9,
        description="Multiply this by max(width, height) of "
        "each tile to determine radius for locating neighbor tiles")
    zNeighborDistance = Int(
        required=False,
        default=2,
        missing=2,
        description="Look for neighbor tiles with z values less than "
        "or equal to this distance from the current tile's z value")
    excludeCornerNeighbors = Bool(
        required=False,
        default=True,
        missing=True,
        description="Exclude neighbor tiles whose center x and y is "
        "outside the source tile's x and y range respectively")
    excludeSameLayerNeighbors = Bool(
        required=False,
        default=False,
        missing=False,
        description="Exclude neighbor tiles in the "
        "same layer (z) as the source tile")
    excludeCompletelyObscuredTiles = Bool(
        required=False,
        default=True,
        missing=True,
        description="Exclude tiles that are completely "
        "obscured by reacquired tiles")
    output_dir = OutputDir(
        required=True,
        description="Output directory path to save the tilepair json file")
    memGB = Str(
        required=False,
        default='6G',
        missing='6G',
        description="Memory for the java client to run")

    @post_load
    def validate_data(self, data):
        if data['baseStack'] is None:
            data['baseStack'] = data['stack']
class Stack(argschema.schemas.DefaultSchema):
    stack = Str(required=True)
    transform = Nested(Transform, required=False)
    children = Nested("self", many=True)
    fuse_stack = Bool(required=False, default=True, description=(
        "whether to include this stack's in the output of fusion"))
class PostProcessROIsInputSchema(ArgSchema):
    suite2p_stat_path = Str(
        required=True,
        validate=lambda x: Path(x).exists(),
        description=("Path to s2p output stat file containing ROIs generated "
                     "during source extraction"))
    motion_corrected_video = Str(
        required=True,
        validate=lambda x: Path(x).exists(),
        description=("Path to motion corrected video file *.h5"))
    motion_correction_values = InputFile(
        default=None,
        allow_none=True,
        description=("Path to motion correction values for each frame "
                     "stored in .csv format. This .csv file is expected to"
                     "have a header row of either:\n"
                     "['framenumber','x','y','correlation','kalman_x',"
                     "'kalman_y']\n['framenumber','x','y','correlation',"
                     "'input_x','input_y','kalman_x',"
                     "'kalman_y','algorithm','type']\n"
                     "If not specified, an empty motion border will be used "
                     "(i.e. pixels all the way up to the edge of the field "
                     "of view will be considered valid)"))
    output_json = OutputFile(
        required=True,
        description=("Path to a file to write output data."))
    maximum_motion_shift = Float(
        missing=30.0,
        required=False,
        allow_none=False,
        description=("The maximum allowable motion shift for a frame in pixels"
                     " before it is considered an anomaly and thrown out of "
                     "processing"))
    abs_threshold = Float(
        missing=None,
        required=False,
        allow_none=True,
        description=("The absolute threshold to binarize ROI masks against. "
                     "If not provided will use quantile to generate "
                     "threshold."))
    binary_quantile = Float(
        missing=0.1,
        validate=Range(min=0, max=1),
        description=("The quantile against which an ROI is binarized. If not "
                     "provided will use default function value of 0.1."))
    npixel_threshold = Int(
        default=50,
        required=False,
        description=("ROIs with fewer pixels than this will be labeled as "
                     "invalid and small size."))
    aspect_ratio_threshold = Float(
        default=0.2,
        required=False,
        description=("ROIs whose aspect ratio is <= this value are "
                     "not recorded. This captures a large majority of "
                     "Suite2P-created artifacts from motion border"))
    morphological_ops = Bool(
        default=True,
        required=False,
        description=("whether to perform morphological operations after "
                     "binarization. ROIs that are washed away to empty "
                     "after this operation are eliminated from the record. "
                     "This can apply to ROIs that were previously labeled "
                     "as small size, for example."))
Beispiel #9
0
class MeshLensCorrectionSchema(PointMatchOpenCVParameters):
    input_stack = Str(
        required=True,
        description="Name of raw input lens data stack")
    output_stack = Str(
        required=True,
        description="Name of lens corrected output stack")
    overwrite_zlayer = Bool(
        required=False,
        default=True,
        missing=True,
        description="Overwrite z layer (default = True)")
    rerun_pointmatch = Bool(
        required=False,
        default=True,
        missing=True,
        description="delete pointmatch values and rerun")
    close_stack = Bool(
        required=False,
        default=True,
        missing=True,
        description="Close input stack")
    do_montage_QC = Bool(
        required=False,
        default=True,
        missing=True,
        description="perform montage QC on stack result")
    match_collection = Str(
        required=True,
        description="name of point match collection")
    metafile = Str(
        required=False,
        description="fullpath of metadata file")
    metafile_uri = Str(
        required=True,
        description="uri_handler uri of metafile object")
    z_index = Int(
        required=True,
        description="z value for the lens correction data in stack")
    ncpus = Int(
        required=False,
        default=-1,
        description="max number of cpus to use")
    nvertex = Int(
        required=False,
        default=1000,
        missing=1000,
        description="maximum number of vertices to attempt")
    output_dir = OutputDir(
        required=False,
        default=None,
        missing=None,
        description="output dir to save tile pair file and qc json")
    outfile = Str(
        required=True,
        description=("File to which json output of lens correction "
                     "(leaf TransformSpec) is written"))
    regularization = Nested(regularization, missing={})
    good_solve = Nested(good_solve_criteria, missing={})
    sectionId = Str(
        required=True,
        default="xxx",
        description="section Id")
    mask_coords = List(
        List(Int),
        required=False,
        default=None,
        missing=None,
        cli_as_single_argument=True,
        description="Nx2 list of in-order bound coordinates")
    mask_dir = OutputDir(
        required=False,
        default=None,
        missing=None,
        description="directory for saving masks")
    mask_file = InputFile(
        required=False,
        default=None,
        missing=None,
        description="explicit mask setting from file")

    @marshmallow.pre_load
    def metafile_to_uri(self, data):
        rendermodules.utilities.schema_utils.posix_to_uri(
            data, "metafile", "metafile_uri")
class ApplyRoughAlignmentTransformParameters(RenderParameters):
    montage_stack = mm.fields.Str(
        required=True, description='stack to make a downsample version of')
    prealigned_stack = mm.fields.Str(
        required=False,
        default=None,
        missing=None,
        description=(
            "stack with dropped tiles corrected for stitching errors"))
    lowres_stack = mm.fields.Str(
        required=True,
        description='montage scape stack with rough aligned transform')
    output_stack = mm.fields.Str(
        required=True,
        description='output high resolution rough aligned stack')
    tilespec_directory = mm.fields.Str(
        required=True, description='path to save section images')
    map_z = mm.fields.Boolean(
        required=False,
        default=False,
        missing=False,
        description=('map the montage Z indices to the rough alignment '
                     'indices (default - False)'))
    remap_section_ids = mm.fields.Boolean(
        required=False,
        default=False,
        missing=False,
        description=('map section ids as well with the new z '
                     'mapping. Default = False'))
    consolidate_transforms = mm.fields.Boolean(
        required=False,
        default=True,
        missing=True,
        description=(
            'should the transforms be consolidated? (default - True)'))
    scale = mm.fields.Float(required=True,
                            description='scale of montage scapes')
    apply_scale = mm.fields.Boolean(required=False,
                                    default=False,
                                    missing=False,
                                    description='do you want to apply scale')
    pool_size = mm.fields.Int(require=False,
                              default=10,
                              missing=10,
                              description='pool size for parallel processing')
    new_z = List(Int,
                 required=False,
                 default=None,
                 cli_as_single_argument=True,
                 description="List of new z values to be mapped to")
    old_z = List(Int,
                 required=True,
                 cli_as_single_argument=True,
                 description="List of z values to apply rough alignment to")
    mask_input_dir = InputDir(
        required=False,
        default=None,
        missing=None,
        description=("directory containing mask files. basenames of "
                     "masks that match tileIds in the rough stack "
                     "will be handled."))
    read_masks_from_lowres_stack = Bool(
        required=False,
        default=False,
        missing=False,
        description=("masks will be taken from lowres tilespecs."
                     " any mask_input_dir ignored."))
    update_lowres_with_masks = Bool(
        required=False,
        default=False,
        description="should the masks be added to the rough stack?")
    filter_montage_output_with_masks = Bool(
        required=False,
        default=False,
        description="should the tiles written be filtered by the masks?")
    mask_exts = List(Str,
                     required=False,
                     default=['png', 'tif'],
                     description="what kind of mask files to recognize")
    close_stack = argschema.fields.Bool(
        required=False,
        default=True,
        missing=True,
        description=("whether to set output stack to COMPLETE"))

    @post_load
    def validate_data(self, data):
        if data['prealigned_stack'] is None:
            data['prealigned_stack'] = data['montage_stack']
        if data['map_z']:
            if data['new_z'] is None:
                raise ValidationError("new_z is invalid. "
                                      "You need to specify new_z "
                                      "as a list of values")
            elif abs(len(data['new_z']) - len(data['old_z'])) != 0:
                raise ValidationError("new_z list count does "
                                      "not match with old_z list count")
        else:
            data['new_z'] = data['old_z']
            data['remap_section_ids'] = False
class MoveStacksAndDataToS3Parameters(StackTransferParametersBase):
    check_only = Bool(required=False, default=False, description="Check for file existence but do not upload")
    upload_data = Bool(required=False, default=False, description="Upload data to S3")
    input_stacks = List(Str, required=True, description="list of stacks to copy")
    output_prefix = List(Str, required=False, description="list of names for uploaded stacks (defaults to input_stack)")
    aws_region = Str(required=False, default=None, allow_none=True, description="AWS region (required for newer regions")