class CellFeatures(DefaultSchema): blowout_mv = Float(description="blash", required=False, allow_none=True) seal_gohm = Float(description="blash", allow_none=True) electrode_0_pa = Float(description="blash", allow_none=True) input_access_resistance_ratio = Float(description="blash", allow_none=True) input_resistance_mohm = Float(description="blash", allow_none=True) initial_access_resistance_mohm = Float( description="blash", allow_none=True )
class StarburstParameters(DefaultSchema): index_length = Int(default=PointGenerator.DEFAULT_INDEX_LENGTH, description="Initial default length for rays") n_rays = Int(default=PointGenerator.DEFAULT_N_RAYS, description="Number of rays to draw") cr_threshold_factor = Float( default=PointGenerator.DEFAULT_THRESHOLD_FACTOR, description=("Threshold factor for corneal reflection ellipse edges, " "will supercede `threshold_factor` for corneal " "reflection if specified")) pupil_threshold_factor = Float( default=PointGenerator.DEFAULT_THRESHOLD_FACTOR, description=("Threshold factor for pupil ellipse edges, will " "supercede `threshold_factor` for pupil if specified")) cr_threshold_pixels = Int( default=PointGenerator.DEFAULT_CR_THRESHOLD_PIXELS, description=("Number of pixels from start of ray to use for adaptive " "threshold of the corneal reflection. Also serves as a " "minimum cutoff for point detection")) pupil_threshold_pixels = Int( default=PointGenerator.DEFAULT_PUPIL_THRESHOLD_PIXELS, description=("Number of pixels from start of ray to use for adaptive " "threshold of the pupil. Also serves as a minimum " "cutoff for point detection"))
class EyeParameters(DefaultSchema): cr_recolor_scale_factor = Float( default=EyeTracker.DEFAULT_CR_RECOLOR_SCALE_FACTOR, description="Size multiplier for corneal reflection recolor mask") min_pupil_value = Int( default=EyeTracker.DEFAULT_MIN_PUPIL_VALUE, description="Minimum value the average pupil shade can be") max_pupil_value = Int( default=EyeTracker.DEFAULT_MAX_PUPIL_VALUE, description="Maximum value the average pupil shade can be") recolor_cr = Bool(default=EyeTracker.DEFAULT_RECOLOR_CR, description="Flag for recoloring corneal reflection") adaptive_pupil = Bool( default=EyeTracker.DEFAULT_ADAPTIVE_PUPIL, description="Flag for whether or not to adaptively update pupil color") pupil_mask_radius = Int( default=EyeTracker.DEFAULT_PUPIL_MASK_RADIUS, description="Radius of pupil mask used to find seed point") cr_mask_radius = Int( default=EyeTracker.DEFAULT_CR_MASK_RADIUS, description="Radius of cr mask used to find seed point") smoothing_kernel_size = Int( default=EyeTracker.DEFAULT_SMOOTHING_KERNEL_SIZE, description=("Kernel size for median filter smoothing kernel (must be " "odd)")) clip_pupil_values = Bool( default=EyeTracker.DEFAULT_CLIP_PUPIL_VALUES, description=("Flag of whether or not to restrict pupil values for " "starburst to fall within the range of (min_pupil_value, " "max_pupil_value)")) average_iris_intensity = Int( default=EyeTracker.DEFAULT_AVERAGE_IRIS_INTENSITY, description="Average expected intensity of the iris") max_eccentricity = Float( default=EyeTracker.DEFAULT_MAX_ECCENTRICITY, description="Maximum eccentricity allowed for pupil.")
class MakeDownsampleSectionStackParameters(RenderParameters): input_stack = Str( required=True, metadata={'description': 'stack to make a downsample version of'}) scale = Float(required=False, default=.01, metadata={'description': 'scale to make images'}) image_directory = OutputDir( required=True, metadata={'decription', 'path to save section images'}) output_stack = Str(required=True, metadata={'description': 'output stack to name'}) pool_size = Int( required=False, default=20, metadata={'description': 'number of parallel threads to use'})
class PointMatchParameters(DefaultSchema): NumRandomSamplingsMethod = Str( required=False, default="Desired confidence", missing="Desired confidence", description='Numerical Random sampling method') MaximumRandomSamples = Int(required=False, default=5000, missing=5000, description='Maximum number of random samples') DesiredConfidence = Float(required=False, default=99.9, missing=99.9, description='Desired confidence level') PixelDistanceThreshold = Float( required=False, default=0.1, missing=0.1, description='Pixel distance threshold for filtering') Transform = Str(required=False, default="AFFINE", missing="AFFINE", description="Transformation type parameter for point " "match filtering (default AFFINE)")
class ObservatoryObject(DefaultSchema): rotation_x_deg = Float(required=True, description="Rotation about x in degrees") rotation_y_deg = Float(required=True, description="Rotation about y in degrees") rotation_z_deg = Float(required=True, description="Rotation about z in degrees") center_x_mm = Float(required=True, description="X position in millimeters") center_y_mm = Float(required=True, description="Y position in millimeters") center_z_mm = Float(required=True, description="Z position in millimeters")
class BigFetaPlotSchema(BigFetaSchema): z1 = Int(default=1000, description='first z for plot') z2 = Int(default=1000, description='second z for plot') zoff = Int(default=0, description='z offset between pointmatches and tilespecs') plot = Boolean(default=True, description='make a plot, otherwise, just text output') savefig = Boolean(default=False, description='save to a pdf') plot_dir = String(default='./') threshold = Float( default=5.0, description='threshold for colors in residual plot [pixels]') density = Boolean( default=True, description=("whether residual plot is density " " (for large numbers of points) or just points"))
class QcSweepFeatures(SweepFeatures): pre_noise_rms_mv = Float(description="blah", required=True) post_noise_rms_mv = Float( description="blah", required=True, allow_none=True ) slow_noise_rms_mv = Float(description="blah", required=True) vm_delta_mv = Float(description="blah", required=True, allow_none=True) stimulus_duration = Float(description="blah", required=True) stimulus_amplitude = Float( description="amplitude of stimulus", required=True, allow_none=True )
class OutputParameters(DefaultSchema): inputs = Nested( InputParameters, description="The parameters argued to this executable", required=True ) upright_angle = Float( description=( "Angle of rotation about the soma required to upright the input " "morphology. In radians counterclockwise from horizontal axis" ), required=True ) upright_transform_dict = Nested(AffineDictSchema, required=False, description='Dictionary defining an affine transform')
class MultIntensityCorrParams(StackTransitionParameters): correction_stack = Str(required=True, description='Correction stack (usually median stack for AT data)') output_directory = OutputDir(required=True, description='Directory for storing Images') # TODO add create_stack metadata cycle_number = Int(required=False, default=2, description="what cycleNumber to upload for output_stack on render") cycle_step_number = Int(required=False, default=1, description="what cycleStepNumber to upload for output_stack on render") clip = Bool(required=False, default=True, description="whether to clip values") scale_factor = Float(required=False,default=1.0, description="scaling value") clip_min = Int(required=False, default=0, description='Min Clip value') clip_max = Int(required=False, default=65535, description='Max Clip value')
class ProbeInputParameters(DefaultSchema): name = String(required=True, help='Identifier for this probe') lfp_input_file_path = String(required=True, description="path to original LFP .dat file") lfp_timestamps_input_path = String(required=True, description="path to LFP timestamps") lfp_data_path = String(required=True, help="Path to LFP data continuous file") lfp_timestamps_path = String( required=True, help="Path to LFP timestamps aligned to master clock") lfp_channel_info_path = String(required=True, help="Path to LFP channel info") total_channels = Int(default=384, help='Total channel count for this probe.') surface_channel = Int(required=True, help="Probe surface channel") reference_channels = NumpyArray(required=False, help="Probe reference channels") lfp_sampling_rate = Float(required=True, help="Sampling rate of LFP data") noisy_channels = NumpyArray(required=False, help="Noisy channels to remove")
class ApplyLowRes2HighResParameters(RenderParameters): input_stack = Str(required=True, metadata={'description':'stitched stack to apply alignment to'}) lowres_stack = Str(required=True, metadata={'description':'low res alignmed stack'}) output_stack = Str(required=True, metadata={'description':'output highres aligned stack'}) prealigned_stack = Str(required=True, metadata={'description':'pre aligned stack (typically the one with dropped tiles corrected for stitching errors)'}) scale = Float(required=False,default = .01, metadata={'description':'scale to make images'}) tilespec_directory = Str(required=True, metadata={'decription','path to save section images'}) output_stack = Str(required=True, metadata={'description':'output stack to name'}) pool_size = Int(required=False,default=20, metadata={'description':'number of parallel threads to use'}) minZ = Int(required=False,default=0, metadata={'description':'Minimum Z value'}) maxZ = Int(required=False,default=100000000, metadata={'description':'Maximum Z value'})
class tPrimeParams(DefaultSchema): tPrime_path = InputDir(help='directory containing the TPrime executable.') sync_period = Float(default=1.0, help='Period of sync waveform (sec).') toStream_sync_params = String( required=False, default='SY=0,384,6,500', help= 'string of CatGT params used to extract to stream sync edges, e.g. SY=0,384,6,500' ) ni_sync_params = String( required=False, default='XA=0,1,3,500', help= 'string of CatGT params used to extract NI sync edges, e.g. XA=0,1,3,500' ) toStream_path_3A = String(required=False, help='full path to toStream edges file') fromStream_list_3A = List( String, required=False, help='list of full paths to fromStream edges files')
class InputParameters(ArgSchema): swc_path = InputFile(description="path to input swc (csv) file", required=True) depth = Nested(DepthField, description=("A transform which can be evaluated at the " "location of each node in the input swc"), required=True, many=False) layers = Nested(Layer, description="specification of layer bounds", many=True, required=True) step_size = Float(description=( "size of each step, in the same units as the depth field and swc"), required=True, default=1.0) output_path = OutputFile(description="write (csv) outputs here", required=True) max_iter = Int(description="how many steps to take before giving up", required=True, default=1000)
class InputParameters(ArgSchema): layer_polygons = Nested( SimpleGeometry, description=( "Each entry defines the entire (simple) boundary of a layer"), many=True, required=True) pia_surface = Nested( SimpleGeometry, description="A path defining the pia-side surface of the cortex", required=True) wm_surface = Nested( SimpleGeometry, description=( "A path defining the white matter-side surface of the cortex"), required=True) working_scale = Float(description=( "When computing close-fitting boundaries, do so in a raster " "space scaled from the inputs according to this value."), required=False, default=1.0 / 4) images = Nested( Image, description=( "Each defines an image (in the space of the geometric objects) on " "which overlays will be drawn"), required=False, many=True) layer_order = List( String, description=( "Layer polygons will be ordered according to this rule when " "finding inter-layer surfaces. Names not in this list will not be " "ordered, but not all names in this list need to be present."), required=True, default=[ "Layer1", "Layer2", "Layer2/3", "Layer3", "Layer4", "Layer5", "Layer6a", "Layer6", "Layer6b" ])
class MakeDownsampleSectionStackParameters(RenderParameters): input_stack = Str(required=True, description='stack to make a downsample version of') scale = Float(required=False, default=.01, description='scale to make images') image_directory = Str( required=True, metadata={'decription', 'path to save section images'}) numsectionsfile = Str( required=True, metadata={'decription', 'file to save length of sections'}) output_stack = Str(required=True, metadata={'description': 'output stack to name'}) pool_size = Int( required=False, default=20, metadata={'description': 'number of parallel threads to use'}) minZ = Int(required=False, default=0, metadata={'description': 'Minimum Z value'}) maxZ = Int(required=False, default=100000000, metadata={'description': 'Maximum Z value'})
class LensCorrectionSchema(ArgSchema): data_dir = InputDir( required=True, description="directory containing metafile, images, and matches") output_dir = OutputDir(required=False, description="directory for output files") mask_file = InputFile(required=False, default=None, missing=None, description="mask to apply to each tile") nvertex = Int(required=False, default=1000, missinf=1000, description="maximum number of vertices to attempt") ransac_thresh = Float(required=False, default=5.0, missing=5.0, description="ransac outlier threshold") regularization = Nested(regularization, missing={}) good_solve = Nested(good_solve_criteria, missing={}) ignore_match_indices = List( Int, required=False, default=None, missing=None, description=("debug feature for ignoring certain indices" " of the match collection")) compress_output = Boolean( required=False, missing=True, default=True, description=("tilespecs will be .json or .json.gz")) timestamp = Boolean(required=False, missing=False, default=False, description="add a timestamp to basename output")
class OutputParameters(OutputSchema): execution_time = Float() quality_metrics_output_file = String()
class PointMatchOpenCVParameters(RenderParameters): ndiv = Int( required=False, default=8, missing=8, description="one tile per tile pair subdivided into " "ndiv x ndiv for easier homography finding") matchMax = Int( required=False, default=1000, missing=1000, description="per tile pair limit, randomly " "chosen after SIFT and RANSAC") downsample_scale = Float( required=False, default=0.3, missing=0.3, description="passed to cv2.resize(fx=, fy=)") SIFT_nfeature = Int( required=False, default=20000, missing=20000, description="passed to cv2.xfeatures2d.SIFT_create(nfeatures=)") SIFT_noctave = Int( required=False, default=3, missing=3, description="passed to cv2.xfeatures2d.SIFT_create(nOctaveLayers=)") SIFT_sigma = Float( required=False, default=1.5, missing=1.5, description="passed to cv2.xfeatures2d.SIFT_create(sigma=)") RANSAC_outlier = Float( required=False, default=5.0, missing=5.0, description="passed to cv2." "findHomography(src, dst, cv2.RANSAC, outlier)") FLANN_ntree = Int( required=False, default=5, missing=5, description="passed to cv2.FlannBasedMatcher()") FLANN_ncheck = Int( required=False, default=50, missing=50, description="passed to cv2.FlannBasedMatcher()") ratio_of_dist = Float( required=False, default=0.7, missing=0.7, description="ratio in Lowe's ratio test") CLAHE_grid = Int( required=False, default=None, missing=None, description="tileGridSize for cv2 CLAHE") CLAHE_clip = Float( required=False, default=None, missing=None, description="clipLimit for cv2 CLAHE") pairJson = Str( required=False, description="full path of tilepair json") input_stack = Str( required=False, description="Name of raw input lens data stack") match_collection = Str( required=False, description="name of point match collection") ncpus = Int( required=False, default=-1, missing=-1, description="number of CPUs to use")
class OutputParameters(OutputSchema): execution_time = Float()
class OutputParameters(OutputSchema): median_subtraction_execution_time = Float() median_subtraction_commit_hash = String() median_subtraction_commit_date = String()
class SessionData(RaisingSchema): ophys_experiment_id = Int(required=True, description='unique identifier for this ophys ' 'session') ophys_session_id = Int(required=True, description='The ophys session id that the ophys ' 'experiment to be written to NWB is ' 'from') behavior_session_id = Int(required=True, description='The behavior session id that the ' 'ophys experiment to be written to ' 'written to NWB is from') foraging_id = String(required=True, description='The foraging id associated with the ' 'ophys session') rig_name = String(required=True, description='name of ophys device') movie_height = Int(required=True, description='height of field-of-view for 2p movie') movie_width = Int(required=True, description='width of field-of-view for 2p movie') container_id = Int(required=True, description='container that this experiment is in') sync_file = String(required=True, description='path to sync file') max_projection_file = String(required=True, description='path to max_projection file') behavior_stimulus_file = String(required=True, description='path to behavior_stimulus ' 'file') dff_file = String(required=True, description='path to dff file') demix_file = String(required=True, description='path to demix file') average_intensity_projection_image_file = String( required=True, description='path to ' 'average_intensity_projection_image file') rigid_motion_transform_file = String(required=True, description='path to ' 'rigid_motion_transform' ' file') targeted_structure = String(required=True, description='Anatomical structure that the ' 'experiment targeted') targeted_depth = Int(required=True, description='Cortical depth that the experiment ' 'targeted') stimulus_name = String(required=True, description='Stimulus Name') date_of_acquisition = String(required=True, description='date of acquisition of ' 'experiment, as string (no ' 'timezone info but relative ot ' 'UTC)') reporter_line = List(String, required=True, description='reporter line') driver_line = List(String, required=True, description='driver line') external_specimen_name = Int(required=True, description='LabTracks ID of the animal') full_genotype = String(required=True, description='full genotype') surface_2p_pixel_size_um = Float(required=True, description='the spatial extent (in um) ' 'of the 2p field-of-view') ophys_cell_segmentation_run_id = Int(required=True, description='ID of the active ' 'segmentation run used ' 'to generate this file') cell_specimen_table_dict = Nested(CellSpecimenTable, required=True, description='Table of cell specimen ' 'info') sex = String(required=True, description='sex') age = String(required=True, description='age') eye_tracking_rig_geometry = Dict( required=True, description="Mapping containing information about session rig " "geometry used for eye gaze mapping." ) eye_tracking_filepath = String( required=True, validate=check_read_access, description="h5 filepath containing eye tracking ellipses" ) events_file = InputFile( required=True, description='h5 filepath to events data' ) imaging_plane_group = Int( required=True, allow_none=True, description="A numeric index that indicates the order that the " "frames were acquired when dealing with an imaging plane " "in a mesoscope experiment. Will be None for Scientifica " "experiments." ) plane_group_count = Int( required=True, description="The total number of plane groups associated with the " "ophys session that the experiment belongs to. Will be 0 " "for Scientifica experiments and nonzero for Mesoscope " "experiments." )
class InputSchema(ArgSchema): # ============== Required fields ============== input_file = InputFile( required=True, description=('An h5 file containing ellipses fits for ' 'eye, pupil, and corneal reflections.')) session_sync_file = InputFile( required=True, description=('An h5 file containing timestamps to synchronize ' 'eye tracking video frames with rest of ephys ' 'session events.')) output_file = OutputFile( required=True, description=('Full save path of output h5 file that ' 'will be created by this module.')) monitor_position_x_mm = Float(required=True, description=("Monitor center X position in " "'global' coordinates " "(millimeters).")) monitor_position_y_mm = Float(required=True, description=("Monitor center Y position in " "'global' coordinates " "(millimeters).")) monitor_position_z_mm = Float(required=True, description=("Monitor center Z position in " "'global' coordinates " "(millimeters).")) monitor_rotation_x_deg = Float(required=True, description="Monitor X rotation in degrees") monitor_rotation_y_deg = Float(required=True, description="Monitor Y rotation in degrees") monitor_rotation_z_deg = Float(required=True, description="Monitor Z rotation in degrees") camera_position_x_mm = Float(required=True, description=("Camera center X position in " "'global' coordinates " "(millimeters)")) camera_position_y_mm = Float(required=True, description=("Camera center Y position in " "'global' coordinates " "(millimeters)")) camera_position_z_mm = Float(required=True, description=("Camera center Z position in " "'global' coordinates " "(millimeters)")) camera_rotation_x_deg = Float(required=True, description="Camera X rotation in degrees") camera_rotation_y_deg = Float(required=True, description="Camera Y rotation in degrees") camera_rotation_z_deg = Float(required=True, description="Camera Z rotation in degrees") led_position_x_mm = Float(required=True, description=("LED X position in 'global' " "coordinates (millimeters)")) led_position_y_mm = Float(required=True, description=("LED Y position in 'global' " "coordinates (millimeters)")) led_position_z_mm = Float(required=True, description=("LED Z position in 'global' " "coordinates (millimeters)")) equipment = String(required=True, description=('String describing equipment setup used ' 'to acquire eye tracking videos.')) date_of_acquisition = String(required=True, description='Acquisition datetime string.') eye_video_file = InputFile(required=True, description=('Full path to raw eye video ' 'file (*.avi).')) # ============== Optional fields ============== eye_radius_cm = Float(default=0.1682, description=('Radius of tracked eye(s) in ' 'centimeters.')) cm_per_pixel = Float(default=(10.2 / 10000.0), description=('Centimeter per pixel conversion ' 'ratio.')) log_level = LogLevel(default='INFO', description='Set the logging level of the module.')
class ImageSpacing(RaisingSchema): row = Float(required=True) column = Float(required=True)
class ReferenceSpacing(RaisingSchema): row = Float(required=True) column = Float(required=True) slice = Float(required=True)
class SolverOptionsParameters(DefaultSchema): degree = Int( required=False, default=1, missing=1, description= "Degree of rquired transformation (0 - Rigid, 1 - Affine(default), 2 - Polynomial)" ) solver = Str( required=False, default="backslash", missing="backslash", description="type of solver to solve the system (default - backslash)") close_stack = Bool( required=False, default=False, description= "whether the solver should close the stack after uploading results") transfac = Float(required=False, default=0.00001, missing=0.00001, description="translation factor") lambda_value = Float(required=False, default=1000, missing=1000, description="lambda for the solver") edge_lambda = Float(required=False, default=0.005, missing=0.005, description="edge lambda for solver regularization") nbrs = Int( required=False, default=2, missing=2, description= "number of neighboring sections to consider (applies for cross section point matches)" ) nbrs_step = Int(required=False, default=1, missing=1, description="neighbors step") xs_weight = Float(required=True, description="Cross section point match weights") min_points = Int( required=False, default=5, missing=5, description= "Minimum number of points correspondences required per tile pair") max_points = Int( required=False, default=200, missing=200, description= "Maximum number of point correspondences required per tile pair") filter_point_matches = Int( required=False, default=1, missing=1, description="Filter point matches from collection (default = 1)") outlier_lambda = Int(required=False, default=1000, missing=1000, description="lambda value for outliers") min_tiles = Int(required=False, default=3, missing=3, description="Minimum number of tiles in section") Width = Int(required=True, description="Width of the tiles") Height = Int(required=True, description="Height of the tiles") outside_group = Int(required=False, default=0, missing=0, description="Outside group") matrix_only = Int(required=False, default=0, missing=0, description="matrix only") distribute_A = Int(required=False, default=16, missing=16, description="Distribute A matrix") dir_scratch = OutputDir(required=True, description="Scratch directory") distributed = Int(required=False, default=0, missing=0, description="Distributed parameter of solver") use_peg = Int(required=False, default=0, missing=0, description="Use pegs? (default = 0)") verbose = Int(required=False, default=0, missing=0, description="Verbose output from solver needed?") debug = Int(required=False, default=0, missing=0, description="turn on debug mode (default = 0 - off)") constrain_by_z = Int(required=False, default=0, missing=0, description="Constrain solution by z (default = 0)") sandwich = Int(required=False, default=0, missing=0, description="sandwich factor of solver") constraint_fac = Int(required=False, default=1e15, missing=1e15, description="Contraint factor") pmopts = Nested(PointMatchFilteringOptions, required=True, description="Point match filtering options for solver") pastix = Nested(PastixOptions, required=True, description="Pastix solver options")
class TilePairClientParameters(RenderParameters): stack = Str( required=True, description="input stack to which tilepairs need to be generated") baseStack = Str( required=False, default=None, missing=None, description="Base stack") minZ = Int( required=False, default=None, missing=None, description="z min for generating tilepairs") maxZ = Int( required=False, default=None, missing=None, description="z max for generating tilepairs") xyNeighborFactor = Float( required=False, default=0.9, description="Multiply this by max(width, height) of " "each tile to determine radius for locating neighbor tiles") zNeighborDistance = Int( required=False, default=2, missing=2, description="Look for neighbor tiles with z values less than " "or equal to this distance from the current tile's z value") excludeCornerNeighbors = Bool( required=False, default=True, missing=True, description="Exclude neighbor tiles whose center x and y is " "outside the source tile's x and y range respectively") excludeSameLayerNeighbors = Bool( required=False, default=False, missing=False, description="Exclude neighbor tiles in the " "same layer (z) as the source tile") excludeCompletelyObscuredTiles = Bool( required=False, default=True, missing=True, description="Exclude tiles that are completely " "obscured by reacquired tiles") output_dir = OutputDir( required=True, description="Output directory path to save the tilepair json file") memGB = Str( required=False, default='6G', missing='6G', description="Memory for the java client to run") @post_load def validate_data(self, data): if data['baseStack'] is None: data['baseStack'] = data['stack']
class SolverParameters(DefaultSchema): # NOTE: Khaled's EM_aligner needs some of the boolean variables as Integers # Hence providing the input as Integers degree = Int(required=True, default=1, description="Degree of transformation 1 - affine, " "2 - second order polynomial, maximum is 3") solver = Str(required=False, default='backslash', missing='backslash', description="Solver type - default is backslash") transfac = Float(required=False, default=1e-15, missing=1e-15, description='Translational factor') lambda_value = Float(required=True, description="regularization parameter") edge_lambda = Float(required=True, description="edge lambda regularization parameter") nbrs = Int(required=False, default=3, missing=3, description="No. of neighbors") nbrs_step = Int(required=False, default=1, missing=1, description="Step value to increment the # of neighbors") xs_weight = Float( required=True, description="Weight ratio for cross section point matches") min_points = Int( required=True, default=8, description="Minimum no. of point matches per tile pair defaults to 8") max_points = Int(required=True, default=100, description="Maximum no. of point matches") filter_point_matches = Int( required=False, default=1, missing=1, description='set to a value 1 if point matches must be filtered') outlier_lambda = Float( required=True, default=100, description="Outlier lambda - large numbers result in " "fewer tiles excluded") min_tiles = Int(required=False, default=2, missing=2, description="minimum number of tiles") Width = Int(required=False, default=3840, missing=3840, description='Width of the tiles (default = 3840)') Height = Int(required=False, default=3840, missing=3840, description='Height of the tiles (default= 3840)') outside_group = Int(required=False, default=0, missing=0, description='Outside group parameter (default = 0)') matrix_only = Int(required=False, default=0, missing=0, description="0 - solve (default), 1 - only generate " "the matrix. For debugging only") distribute_A = Int(required=False, default=1, missing=1, description="Shards of A matrix") dir_scratch = InputDir(required=True, description="Scratch directory") distributed = Int(required=False, default=0, missing=0, description="distributed or not?") disableValidation = Int( required=False, default=1, missing=1, description="Disable validation while ingesting tiles?") use_peg = Int(required=False, default=0, missing=0, description="use pegs or not") complete = Int(required=False, default=0, missing=0, description="Set stack state to complete after processing?") verbose = Int(required=False, default=0, missing=0, description="want verbose output?") debug = Int(required=False, default=0, missing=0, description="Debug mode?") constrain_by_z = Int(required=False, default=0, missing=0, description='Contrain by z') sandwich = Int(required=False, default=0, missing=0, description='Sandwich parameter for solver') constraint_fac = Float(required=False, default=1e+15, missing=1e+15, description='Constraint factor') pmopts = Nested(PointMatchParameters, required=True, description='Point match filtering parameters') pastix = Nested(PastixParameters, required=False, default=None, missing=None, description="Pastix parameters if solving using Pastix")
class OutputParameters(OutputSchema): surface_channel = Int() air_channel = Int() probe_json = String() execution_time = Float()
class DepthEstimationParams(DefaultSchema): hi_noise_thresh = Float(required=True, default=50.0, help='Max RMS noise for including channels') lo_noise_thresh = Float(required=True, default=3.0, help='Min RMS noise for including channels') save_figure = Bool(required=True, default=True) figure_location = OutputFile(required=True, default=None) smoothing_amount = Int( required=True, default=5, help='Gaussian smoothing parameter to reduce channel-to-channel noise') power_thresh = Float( required=True, default=2.5, help= 'Ignore threshold crossings if power is above this level (indicates channels are in the brain)' ) diff_thresh = Float( required=True, default=-0.07, help='Threshold to detect large increases is power at brain surface') freq_range = NumpyArray( required=True, default=[0, 10], help='Frequency band for detecting power increases') max_freq = Int(required=True, default=150, help='Maximum frequency to plot') channel_range = NumpyArray( required=True, default=[370, 380], help='Channels assumed to be out of brain, but in saline') n_passes = Int( required=True, default=10, help='Number of times to compute offset and surface channel') skip_s_per_pass = Int( required=True, default=5, help='Number of seconds between data chunks used on each pass' ) #default=100 start_time = Float( required=True, default=0, help='First time (in seconds) for computing median offset') time_interval = Float(required=True, default=5, help='Number of seconds for computing median offset') nfft = Int(required=True, default=4096, help='Length of FFT used for calculations') air_gap = Int( required=True, default=100, help='Approximate number of channels between brain surface and air')