class EyeParameters(DefaultSchema): cr_recolor_scale_factor = Float( default=EyeTracker.DEFAULT_CR_RECOLOR_SCALE_FACTOR, description="Size multiplier for corneal reflection recolor mask") min_pupil_value = Int( default=EyeTracker.DEFAULT_MIN_PUPIL_VALUE, description="Minimum value the average pupil shade can be") max_pupil_value = Int( default=EyeTracker.DEFAULT_MAX_PUPIL_VALUE, description="Maximum value the average pupil shade can be") recolor_cr = Bool(default=EyeTracker.DEFAULT_RECOLOR_CR, description="Flag for recoloring corneal reflection") adaptive_pupil = Bool( default=EyeTracker.DEFAULT_ADAPTIVE_PUPIL, description="Flag for whether or not to adaptively update pupil color") pupil_mask_radius = Int( default=EyeTracker.DEFAULT_PUPIL_MASK_RADIUS, description="Radius of pupil mask used to find seed point") cr_mask_radius = Int( default=EyeTracker.DEFAULT_CR_MASK_RADIUS, description="Radius of cr mask used to find seed point") smoothing_kernel_size = Int( default=EyeTracker.DEFAULT_SMOOTHING_KERNEL_SIZE, description=("Kernel size for median filter smoothing kernel (must be " "odd)"))
class FuseStacksParameters(RenderParameters): stacks = Nested(Stack, required=True, description=('stack dictionary representing directed ' 'graph of stacks')) interpolate_transforms = Bool( required=False, default=True, description=('whether to use an InterpolatedTransform ' 'class to interpolate the transforms of ' 'overlapping sections')) output_stack = Str(required=True, description=('stack to which fused representations ' 'should be added')) pool_size = Int(required=False, default=1, description='multiprocessing pool size') create_nonoverlapping_zs = Bool( required=False, default=False, description=("upload uninterpolated sections for sections " "without overlap")) close_stack = Bool( required=False, default=False, description=("whether to set output stack to COMPLETE on finishing")) max_tilespecs_per_import_process = Int( required=False, default=None, allow_none=True, description=("maximum number of tilespecs (written to tempfile) per " "import processing group. Default unlimited."))
class KilosortHelperParameters(DefaultSchema): kilosort_version = Int(required=True, default=2, help='Kilosort version to use (1 or 2)') spikeGLX_data = Bool(required=True, default=False, help='If true, use SpikeGLX metafile to build chanMap') ks_make_copy = Bool(required=False, default=False, help='If true, make a copy of the original KS output') surface_channel_buffer = Int(required=False, default=15, help='Number of channels above brain surface to include in spike sorting') matlab_home_directory = InputDir(help='Location from which Matlab files can be copied and run.') kilosort_repository = InputDir(help='Local directory for the Kilosort source code repository.') npy_matlab_repository = InputDir(help='Local directory for the npy_matlab repo for writing phy output') kilosort_params = Nested(KilosortParameters, required=False, help='Parameters used to auto-generate a Kilosort config file') kilosort2_params = Nested(Kilosort2Parameters, required=False, help='Parameters used to auto-generate a Kilosort2 config file')
class FilterSchema(RenderParameters, ZValueParameters, ProcessPoolParameters): input_stack = Str( required=True, description='stack with stage-aligned coordinates') input_match_collection = Str( required=True, description='Name of the montage point match collection') output_match_collection = Str( required=True, default=None, missing=None, description='Name of the montage point match collection to write to') resmax = Float( required=True, description=("maximum value in " "pixels for average residual in tile pair")) transmax = Float( required=True, description=("maximum value in " "pixels for translation relative to stage coords")) filter_output_file = OutputFile( required=True, description="location of json file with filter output") inverse_weighting = Bool( required=True, default=False, missing=False, description='new weights weighted inverse to counts per tile-pair')
class DataLoaderSchema(ArgSchema): landmark_file = InputFile(required=True, description=("csv file, one line per landmark")) actions = List(Str, required=False, missing=[], default=[], cli_as_single_argument=True, description=("actions to perform on data")) header = List(Str, required=False, default=None, missing=None, cli_as_single_argument=True, description=("passed as names=header to pandas.read_csv()")) sd_set = Nested(src_dst) all_flags = Bool(required=False, missing=False, default=False, description="if False, returns only flag=True data") exclude_labels = List(Int, required=True, missing=[100000, 200000], default=[100000, 200000], description="ignore Pt labels in this range")
class DepthEstimationParams(DefaultSchema): hi_noise_thresh = Float(required=True, default=50.0, help='Max RMS noise for including channels') lo_noise_thresh = Float(required=True, default=3.0, help='Min RMS noise for including channels') save_figure = Bool(required=True, default=True) figure_location = OutputFile(required=True, default=None) smoothing_amount = Int( required=True, default=5, help='Gaussian smoothing parameter to reduce channel-to-channel noise') power_thresh = Float( required=True, default=2.5, help= 'Ignore threshold crossings if power is above this level (indicates channels are in the brain)' ) diff_thresh = Float( required=True, default=-0.07, help='Threshold to detect large increases is power at brain surface') freq_range = NumpyArray( required=True, default=[0, 10], help='Frequency band for detecting power increases') max_freq = Int(required=True, default=150, help='Maximum frequency to plot') channel_range = NumpyArray( required=True, default=[370, 380], help='Channels assumed to be out of brain, but in saline') n_passes = Int( required=True, default=10, help='Number of times to compute offset and surface channel') skip_s_per_pass = Int( required=True, default=100, help='Number of seconds between data chunks used on each pass') start_time = Float( required=True, default=0, help='First time (in seconds) for computing median offset') time_interval = Float(required=True, default=5, help='Number of seconds for computing median offset') nfft = Int(required=True, default=4096, help='Length of FFT used for calculations') air_gap = Int( required=True, default=100, help='Approximate number of channels between brain surface and air')
class PairwiseRigidSchema(StackTransitionParameters): match_collection = Str(required=True, description="Point match collection name") gap_file = InputFile( required=False, default=None, missing=None, description="json file {k: v} where int(k) is a z value to skip" "entries in here that are not already missing will" "be omitted from the output stack" "i.e. this is a place one can skip sections") translate_to_positive = Bool( required=False, default=True, missing=True, description="translate output stack to positive space") translation_buffer = List(Float, required=False, default=[0, 0], missing=[0, 0], description=("minimum (x, y) of output stack if " "translate_to_positive=True")) anchor_stack = Str( require=False, default=None, missing=None, description=("fix transforms using tiles in this stack"))
class MultIntensityCorrParams(StackTransitionParameters): correction_stack = Str( required=True, description='Correction stack (usually median stack for AT data)') output_directory = OutputDir( required=True, description='Directory for storing Images') # TODO add create_stack metadata cycle_number = Int( required=False, default=2, description="what cycleNumber to upload for output_stack on render") cycle_step_number = Int( required=False, default=1, description=("what cycleStepNumber to upload " "for output_stack on render")) clip = Bool( required=False, default=True, description="whether to clip values") scale_factor = Float( required=False, default=1.0, description="scaling value") clip_min = Int( required=False, default=0, description='Min Clip value') clip_max = Int( required=False, default=65535, description='Max Clip value')
class InputParameters(ArgSchema): stimulus_pkl_path = String( required=True, help="path to pkl file containing raw stimulus information") sync_h5_path = String( required=True, help="path to h5 file containing syncronization information") output_stimulus_table_path = String( required=True, help="the output stimulus table csv will be written here") output_frame_times_path = String(required=True, help="output all frame times here") minimum_spontaneous_activity_duration = Float( default=sys.float_info.epsilon, help= "detected spontaneous activity sweeps will be rejected if they last fewer that this many seconds", ) maximum_expected_spontanous_activity_duration = Float( default=1225.02541, help= "validation will fail if a spontanous activity epoch longer than this one is computed.", ) frame_time_strategy = String( default="use_photodiode", help= "technique used to align frame times. Options are 'use_photodiode', which interpolates frame times between photodiode edge times (preferred when vsync times are unreliable) and 'use_vsyncs', which is preferred when reliable vsync times are available.", ) stimulus_name_map = Dict(keys=String(), values=String(), help="optionally rename stimuli", default=default_stimulus_renames) column_name_map = Dict(keys=String(), values=String(), help="optionally rename stimulus parameters", default=default_column_renames) extract_const_params_from_repr = Bool(default=True) drop_const_params = List( String(), help="columns to be dropped from the stimulus table", default=["name", "maskParams", "win", "autoLog", "autoDraw"], ) fail_on_negative_duration = Bool( default=False, help= "Determine if the module should fail if a stimulus epoch has a negative duration." )
class PostProcessROIsInputSchema(ArgSchema): suite2p_stat_path = Str( required=True, validate=lambda x: Path(x).exists(), description=("Path to s2p output stat file containing ROIs generated " "during source extraction")) motion_corrected_video = Str( required=True, validate=lambda x: Path(x).exists(), description=("Path to motion corrected video file *.h5")) motion_correction_values = InputFile( required=True, description=("Path to motion correction values for each frame " "stored in .csv format. This .csv file is expected to" "have a header row of either:\n" "['framenumber','x','y','correlation','kalman_x'," "'kalman_y']\n['framenumber','x','y','correlation'," "'input_x','input_y','kalman_x'," "'kalman_y','algorithm','type']")) output_json = OutputFile( required=True, description=("Path to a file to write output data.")) maximum_motion_shift = Float( missing=30.0, required=False, allow_none=False, description=("The maximum allowable motion shift for a frame in pixels" " before it is considered an anomaly and thrown out of " "processing")) abs_threshold = Float( missing=None, required=False, allow_none=True, description=("The absolute threshold to binarize ROI masks against. " "If not provided will use quantile to generate " "threshold.")) binary_quantile = Float( missing=0.1, validate=Range(min=0, max=1), description=("The quantile against which an ROI is binarized. If not " "provided will use default function value of 0.1.")) npixel_threshold = Int( default=50, required=False, description=("ROIs with fewer pixels than this will be labeled as " "invalid and small size.")) aspect_ratio_threshold = Float( default=0.2, required=False, description=("ROIs whose aspect ratio is <= this value are " "not recorded. This captures a large majority of " "Suite2P-created artifacts from motion border")) morphological_ops = Bool( default=True, required=False, description=("whether to perform morphological operations after " "binarization. ROIs that are washed away to empty " "after this operation are eliminated from the record. " "This can apply to ROIs that were previously labeled " "as small size, for example."))
class AnnotationParameters(DefaultSchema): annotate_movie = Bool(default=False, description="Flag for whether or not to annotate") output_file = OutputFile(default="./annotated.avi") fourcc = Str(description=("FOURCC string for video encoding. On Windows " "H264 is not available by default, so it will " "need to be installed or a different codec " "used."))
class Stack(argschema.schemas.DefaultSchema): stack = Str(required=True) transform = Nested(Transform, required=False) children = Nested("self", many=True) fuse_stack = Bool( required=False, default=True, description=( "whether to include this stack's in the output of fusion"))
class InputParameters(ArgSchema): stimulus = Nested(StimulusInputParameters, required=True, help='Defines the stimulus from which CSD is calculated') probes = Nested(ProbeInputParameters, many=True, required=True, help='Probewise parameters.') pre_stimulus_time = Float( required=True, help='how much time pre stimulus onset is used for CSD calculation ') post_stimulus_time = Float( required=True, help='how much time post stimulus onset is used for CSD calculation ') num_trials = Int( default=None, allow_none=True, help='Number of trials after stimulus onset from which to compute CSD') volts_per_bit = Float( default=1.0, help= 'If the data are not in units of volts, they must be converted. In the past, this value was 0.195' ) memmap = Bool( default=False, help= 'whether to memory map the data file on disk or load it directly to main memory' ) memmap_thresh = Float( default=np.inf, help= 'files larger than this threshold (bytes) will be memmapped, regardless of the memmap setting.' ) filter_cuts = List(Float, default=[5.0, 150.0], cli_as_single_argument=True, help='Cutoff frequencies for bandpass filter') filter_order = Int(default=5, help='Order for bandpass filter') reorder_channels = Bool( default=True, help='Determines whether LFP channels should be re-ordered') noisy_channel_threshold = Float( default=1500.0, help='Threshold for removing noisy channels from analysis')
class DetectMontageDefectsParameters(RenderParameters, ZValueParameters, ProcessPoolParameters): prestitched_stack = Str(required=True, description='Pre stitched stack (raw stack)') poststitched_stack = Str(required=True, description='Stitched montage stack') match_collection = Str( reuqired=True, description='Name of the montage point match collection') match_collection_owner = Str( required=False, default=None, missing=None, description='Name of the match collection owner') residual_threshold = Int( required=False, default=4, missing=4, description=('threshold value to filter residuals ' 'for detecting seams (default = 4)')) neighbors_distance = Int( required=False, default=80, missing=80, description=('distance in pixels to look for ' 'neighboring points in seam detection (default = 60)')) min_cluster_size = Int( required=False, default=12, missing=12, description=( 'minimum number of point matches required in each cluster ' 'for taking it into account for seam detection (default = 7)')) threshold_cutoff = argschema.fields.List( argschema.fields.Float, required=False, default=[0.005, 0.005], description='Threshold for MAD cutoff in x and y') plot_sections = Bool( required=False, default=True, missing=True, description=("Do you want to plot the sections with defects " "(holes or gaps)?. Will plot Bokeh plots in a html file")) out_html_dir = InputDir( required=False, default=None, missing=None, description="Folder to save the Bokeh plot defaults to /tmp directory") @post_load def add_match_collection_owner(self, data): if data['match_collection_owner'] is None: data['match_collection_owner'] = data['render']['owner']
class AlignmentParameters(DefaultSchema): rod = Float(required=True, description='ratio of distances for matching SIFT features') maxEpsilon = Float( required=True, description='maximum acceptable epsilon for RANSAC filtering') minInlierRatio = Float( required=True, description='minimum inlier ratio for RANSAC filtering') minNumInliers = Int( required=True, description='minimum number of inliers for RANSAC filtering') expectedModelIndex = Int( required=True, description=('expected model for RANSAC filtering, 0=Translation, ' '1="Rigid", 2="Similarity", 3="Affine"')) multipleHypotheses = Bool( required=True, description=('Utilize RANSAC filtering which allows ' 'fitting multiple hypothesis models')) rejectIdentity = Bool( required=True, description=('Reject Identity model (constant background) ' 'up to a tolerance defined by identityTolerance')) identityTolerance = Float( required=True, description='Tolerance to which Identity should rejected, in pixels') tilesAreInPlace = Bool( required=True, description=('Whether tile inputs to TrakEM2 are in place to be ' 'compared only to neighboring tiles.')) desiredModelIndex = Int( required=True, description=('Affine homography model which optimization will ' 'try to fit, 0="Translation", 1="Rigid", ' '2="Similarity", 3="Affine"')) # TODO no input for regularizer model, defaults to Rigid regularization regularize = Bool( required=True, description=('Whether to regularize the desired model with ' 'the regularizer model by lambda')) maxIterationsOptimize = Int( required=True, description='Max number of iterations for optimizer') maxPlateauWidthOptimize = Int( required=True, description='Maximum plateau width for optimizer') dimension = Int(required=True, description=('Dimension of polynomial kernel to fit ' 'for distortion model')) lambdaVal = Float( required=True, description=('Lambda parameter by which the regularizer model ' 'affects fitting of the desired model')) clearTransform = Bool( required=True, description=('Whether to remove transforms from tiles ' 'before SIFT matching. Not recommended')) visualize = Bool( required=True, description=('Whether to have TrakEM2 visualize the lens ' 'correction transform. Not recommended when ' 'running through xvfb'))
class url_options(DefaultSchema): normalizeForMatching = Bool( required=False, default=True, missing=True, description='normalize for matching') renderWithFilter = Bool( required=False, default=True, missing=True, description='Render with Filter') renderWithoutMask = Bool( required=False, default=False, missing=False, description='Render without mask') excludeAllTransforms = Bool( required=False, default=False, missing=False, description="Exclude all transforms") excludeFirstTransformAndAllAfter = Bool( required=False, default=False, missing=False, description="Exclude first transfrom and all after") excludeTransformsAfterLast = Bool( required=False, default=False, missing=False, description="Exclude transforms after last")
class PtMatchOptimizationParameters(RenderParameters): stack = Str( required=True, description=( 'Name of the stack containing the tile pair (not the base stack)')) tile_stack = Str( required=False, default=None, missing=None, description='Name of the stack that will hold these two tiles') tilepair_file = InputFile(required=True, description='Tile pair file') no_tilepairs_to_test = Int( required=False, default=10, missing=10, description=('Number of tilepairs to be tested for ' 'optimization - default = 10')) filter_tilepairs = Bool( required=False, default=False, missing=False, description=("Do you want filter the tilpair file for pairs " "that overlap? - default = False")) max_tilepairs_with_matches = Int( required=False, default=0, missing=0, description=('How many tilepairs with matches required for ' 'selection of optimized parameter set')) numberOfThreads = Int( required=False, default=5, missing=5, description='Number of threads to run point matching job') SIFT_options = Nested(SIFT_options, required=True) outputDirectory = OutputDir( required=True, description=( 'Parent directory in which subdirectories will be ' 'created to store images and point-match results from SIFT')) url_options = Nested(url_options, required=True) pool_size = Int(required=False, default=10, missing=10, description='Pool size for parallel processing') @post_load def validate_data(self, data): if data['max_tilepairs_with_matches'] == 0: data['max_tilepairs_with_matches'] = data['no_tilepairs_to_test']
class EyeParameters(DefaultSchema): cr_recolor_scale_factor = Float( default=EyeTracker.DEFAULT_CR_RECOLOR_SCALE_FACTOR, description="Size multiplier for corneal reflection recolor mask") min_pupil_value = Int( default=EyeTracker.DEFAULT_MIN_PUPIL_VALUE, description="Minimum value the average pupil shade can be") max_pupil_value = Int( default=EyeTracker.DEFAULT_MAX_PUPIL_VALUE, description="Maximum value the average pupil shade can be") recolor_cr = Bool(default=EyeTracker.DEFAULT_RECOLOR_CR, description="Flag for recoloring corneal reflection") adaptive_pupil = Bool( default=EyeTracker.DEFAULT_ADAPTIVE_PUPIL, description="Flag for whether or not to adaptively update pupil color") pupil_mask_radius = Int( default=EyeTracker.DEFAULT_PUPIL_MASK_RADIUS, description="Radius of pupil mask used to find seed point") cr_mask_radius = Int( default=EyeTracker.DEFAULT_CR_MASK_RADIUS, description="Radius of cr mask used to find seed point") smoothing_kernel_size = Int( default=EyeTracker.DEFAULT_SMOOTHING_KERNEL_SIZE, description=("Kernel size for median filter smoothing kernel (must be " "odd)")) clip_pupil_values = Bool( default=EyeTracker.DEFAULT_CLIP_PUPIL_VALUES, description=("Flag of whether or not to restrict pupil values for " "starburst to fall within the range of (min_pupil_value, " "max_pupil_value)")) average_iris_intensity = Int( default=EyeTracker.DEFAULT_AVERAGE_IRIS_INTENSITY, description="Average expected intensity of the iris") max_eccentricity = Float( default=EyeTracker.DEFAULT_MAX_ECCENTRICITY, description="Maximum eccentricity allowed for pupil.")
class QualityMetricsParams(DefaultSchema): isi_threshold = Float(required=False, default=0.0015, help='Maximum time (in seconds) for ISI violation') min_isi = Float(required=False, default=0.00, help='Minimum time (in seconds) for ISI violation') num_channels_to_compare = Int(required=False, default=13, help='Number of channels to use for computing PC metrics; must be odd') max_spikes_for_unit = Int(required=False, default=500, help='Number of spikes to subsample for computing PC metrics') max_spikes_for_nn = Int(required=False, default=10000, help='Further subsampling for NearestNeighbor calculation') n_neighbors = Int(required=False, default=4, help='Number of neighbors to use for NearestNeighbor calculation') n_silhouette = Int(required=False, default=10000, help='Number of spikes to use for calculating silhouette score') drift_metrics_min_spikes_per_interval = Int(required=False, default=10, help='Minimum number of spikes for computing depth') drift_metrics_interval_s = Float(required=False, default=100, help='Interval length is seconds for computing spike depth') quality_metrics_output_file = String(required=True, help='CSV file where metrics will be saved') include_pc_metrics = Bool(required=False, default=True, help='Compute features that require principal components')
class DepthField(DefaultSchema): gradient_field_path = InputFile(description=( "The path to an xarray file describing the gradient of cortical " "depth on some domain. This file should contain one dataarray " "called 'gradient' which has dimensions drawn from " "{'x', 'y', 'z', 'component'}. The coords of x, y, z define the " "domain over which the gradient was computed. The component " "dimension describes the dimension associated with each component " "of the gradient and should have coords drawn from {'x', 'y', 'z'}."), required=True) depth_field_path = InputFile( description=("As gradient field, but gives depth values"), required=True) soma_origin = Bool( description="If true, the field is centered at the soma", required=True, default=True) pia_sign = Int(description="which direction is the pia", required=True, default=1, validate=lambda val: val in {1, -1})
class EphysParams(DefaultSchema): sample_rate = Float( required=True, default=30000.0, help='Sample rate of Neuropixels AP band continuous data') lfp_sample_rate = Float( require=True, default=2500.0, help='Sample rate of Neuropixels LFP band continuous data') bit_volts = Float( required=True, default=0.195, help='Scalar required to convert int16 values into microvolts') num_channels = Int(required=True, default=384, help='Total number of channels in binary data files') reference_channels = NumpyArray( required=False, default=[36, 75, 112, 151, 188, 227, 264, 303, 340, 379], help='Reference channels on Neuropixels probe (numbering starts at 0)') template_zero_padding = Int( required=True, default=21, help='Zero-padding on templates output by Kilosort') vertical_site_spacing = Float(required=False, default=20e-6, help='Vertical site spacing in meters') probe_type = String(required=False, default='NP1', help='3A, 3B2, NP1') lfp_band_file = String(required=False, help='Location of LFP band binary file') ap_band_file = String(required=False, help='Location of AP band binary file') reorder_lfp_channels = Bool( required=False, default=True, help= 'Should we fix the ordering of LFP channels (necessary for 3a probes following extract_from_npx modules)' ) cluster_group_file_name = String(required=False, default='cluster_group.tsv')
class ImportAtlasSchema(RenderParameters): project_path = InputFile( required=True, description='Atlas a5proj file with data to import') LM_dataset_name = Str( required=True, default='test', description='Name of light microscopy dataset within atlas file') site_name = Str(required=True, description="name of site within Atlas file to import") output_stack = Str(required=True, description="name of stack to save into render") LM_stack = Str( required=True, default='ACQDAPI_1', description= "Name of LM stack in render that was imported into atlas and whose coordinate system the EM tiles will be registered to" ) make_tiles = Bool( required=False, default=False, description= "whether to launch jobs to make jpg img tiles of raw atlas tif's (inverting and flipping)" )
class ApplyDeconvZonedParams(RenderParameters): input_stack = Str(required=True, description='Input stack') output_stack = Str(required=True, description='Output stack') output_directory = Str(required=True, description='Directory for storing Images') z_index = Int(required=True, description='z value for section') pool_size = Int( required=False, default=20, description='size of pool for parallel processing (default=20)') psf_file = InputFile(required=True, description='path to psf file') num_iter = Int(required=True, default=20, description='number of iterations (default=20)') bgrd_size = Int(required=False, default=20, description='size of rolling ball (default=20)') scale_factor = Int(required=False, default=1, description='scaling factor (default=1)') close_stack = Bool(required=False, default=False, description="whether to close stack or not")
class MeanWaveformParams(DefaultSchema): samples_per_spike = Int(required=True, default=82, help='Number of samples to extract for each spike') pre_samples = Int( required=True, default=20, help='Number of samples between start of spike and the peak') num_epochs = Int(required=True, default=1, help='Number of epochs to compute mean waveforms') spikes_per_epoch = Int(require=True, default=100, help='Max number of spikes per epoch') upsampling_factor = Float( require=False, default=200 / 82, help='Upsampling factor for calculating waveform metrics') spread_threshold = Float( require=False, default=0.12, help='Threshold for computing channel spread of 2D waveform') site_range = Int(require=False, default=16, help='Number of sites to use for 2D waveform metrics') cWaves_path = InputDir(require=False, help='directory containing the TPrime executable.') use_C_Waves = Bool(require=False, default=False, help='Use faster C routine to calculate mean waveforms') snr_radius = Int( require=False, default=8, help='disk radius (chans) about pk-chan for snr calculation in C_waves' ) mean_waveforms_file = String(required=True, help='Path to mean waveforms file (.npy)')
class OtherParameters(DefaultSchema): rod = Float(required=True, metadata={'description': ''}) maxEpsilon = Float(required=True, metadata={'description': ''}) minInlierRatio = Float(required=True, metadata={'description': ''}) minNumInliers = Int(required=True, metadata={'description': ''}) expectedModelIndex = Int(required=True, metadata={'description': ''}) multipleHypotheses = Bool(required=True, metadata={'description': ''}) rejectIdentity = Bool(required=True, metadata={'description': ''}) identityTolerance = Float(required=True, metadata={'description': ''}) tilesAreInPlace = Bool(required=True, metadata={'description': ''}) desiredModelIndex = Int(required=True, metadata={'description': ''}) regularize = Bool(required=True, metadata={'description': ''}) maxIterationsOptimize = Int(required=True, metadata={'description': ''}) maxPlateauWidthOptimize = Int(required=True, metadata={'description': ''}) dimension = Int(required=True, metadata={'description': ''}) lambdaVal = Float(required=True, metadata={'description': ''}) clearTransform = Bool(required=True, metadata={'description': ''}) visualize = Bool(required=True, metadata={'description': ''})
class MeshLensCorrectionSchema(PointMatchOpenCVParameters): input_stack = Str(required=True, description="Name of raw input lens data stack") output_stack = Str(required=True, description="Name of lens corrected output stack") overwrite_zlayer = Bool(required=False, default=True, missing=True, description="Overwrite z layer (default = True)") rerun_pointmatch = Bool(required=False, default=True, missing=True, description="delete pointmatch values and rerun") close_stack = Bool(required=False, default=True, missing=True, description="Close input stack") do_montage_QC = Bool(required=False, default=True, missing=True, description="perform montage QC on stack result") match_collection = Str(required=True, description="name of point match collection") metafile = Str(required=False, description="fullpath of metadata file") metafile_uri = Str(required=True, description="uri_handler uri of metafile object") z_index = Int(required=True, description="z value for the lens correction data in stack") ncpus = Int(required=False, default=-1, description="max number of cpus to use") nvertex = Int(required=False, default=1000, missing=1000, description="maximum number of vertices to attempt") output_dir = OutputDir( required=False, default=None, missing=None, description="output dir to save tile pair file and qc json") outfile = Str(required=True, description=("File to which json output of lens correction " "(leaf TransformSpec) is written")) regularization = Nested(regularization, missing={}) good_solve = Nested(good_solve_criteria, missing={}) sectionId = Str(required=True, default="xxx", description="section Id") mask_coords = List(List(Int), required=False, default=None, missing=None, cli_as_single_argument=True, description="Nx2 list of in-order bound coordinates") mask_dir = OutputDir(required=False, default=None, missing=None, description="directory for saving masks") mask_file = InputFile(required=False, default=None, missing=None, description="explicit mask setting from file") @marshmallow.pre_load def metafile_to_uri(self, data): asap.utilities.schema_utils.posix_to_uri(data, "metafile", "metafile_uri")
class SolveMontageSectionParameters(RenderParameters): first_section = Int(required=True, description="Z index of the first section") last_section = Int(required=True, description="Z index of the last section") clone_section_stack = Bool( required=False, default=True, description= "Whether to clone out a temporary single section stack from source_collection stack" ) solver_executable = Str( required=True, description="Matlab solver executable with full path") verbose = Int(required=False, default=0, missing=0, description="Verbose output from solver needed?") solver_options = Nested(SolverOptionsParameters, required=True, description="Solver parameters") source_collection = Nested( SourceStackParameters, required=True, description= "Input stack parameters, will be created and deleted after from input_stack" ) target_collection = Nested(TargetStackParameters, required=True, description="Output stack parameters") source_point_match_collection = Nested( PointMatchCollectionParameters, required=True, description="Point match collection parameters") @post_load def add_missing_values(self, data): # cannot create "lambda" as a variable name in SolverParameters data['solver_options']['lambda'] = data['solver_options'][ 'lambda_value'] data['solver_options'].pop('lambda_value', None) if data['source_collection']['owner'] is None: data['source_collection']['owner'] = data['render']['owner'] if data['source_collection']['project'] is None: data['source_collection']['project'] = data['render']['project'] if data['source_collection']['service_host'] is None: if data['render']['host'].find('http://') == 0: data['source_collection']['service_host'] = data['render'][ 'host'][7:] + ":" + str(data['render']['port']) else: data['source_collection']['service_host'] = data['render'][ 'host'] + ":" + str(data['render']['port']) if data['source_collection']['baseURL'] is None: data['source_collection'][ 'baseURL'] = "http://{}:{}/render-ws/v1".format( data['render']['host'], data['render']['port']) if data['source_collection']['renderbinPath'] is None: data['source_collection']['renderbinPath'] = data['render'][ 'client_scripts'] if data['target_collection']['owner'] is None: data['target_collection']['owner'] = data['render']['owner'] if data['target_collection']['project'] is None: data['target_collection']['project'] = data['render']['project'] if data['target_collection']['service_host'] is None: if data['render']['host'].find('http://') == 0: data['target_collection']['service_host'] = data['render'][ 'host'][7:] + ":" + str(data['render']['port']) else: data['target_collection']['service_host'] = data['render'][ 'host'] + ":" + str(data['render']['port']) if data['target_collection']['baseURL'] is None: data['target_collection'][ 'baseURL'] = "http://{}:{}/render-ws/v1".format( data['render']['host'], data['render']['port']) if data['target_collection']['renderbinPath'] is None: data['target_collection']['renderbinPath'] = data['render'][ 'client_scripts'] if data['source_point_match_collection']['server'] is None: data['source_point_match_collection']['server'] = data[ 'source_collection']['baseURL'] if data['source_point_match_collection']['owner'] is None: data['source_point_match_collection']['owner'] = data['render'][ 'owner']
class SolverOptionsParameters(DefaultSchema): degree = Int( required=False, default=1, missing=1, description= "Degree of rquired transformation (0 - Rigid, 1 - Affine(default), 2 - Polynomial)" ) solver = Str( required=False, default="backslash", missing="backslash", description="type of solver to solve the system (default - backslash)") close_stack = Bool( required=False, default=False, description= "whether the solver should close the stack after uploading results") transfac = Float(required=False, default=0.00001, missing=0.00001, description="translation factor") lambda_value = Float(required=False, default=1000, missing=1000, description="lambda for the solver") edge_lambda = Float(required=False, default=0.005, missing=0.005, description="edge lambda for solver regularization") nbrs = Int( required=False, default=2, missing=2, description= "number of neighboring sections to consider (applies for cross section point matches)" ) nbrs_step = Int(required=False, default=1, missing=1, description="neighbors step") xs_weight = Float(required=True, description="Cross section point match weights") min_points = Int( required=False, default=5, missing=5, description= "Minimum number of points correspondences required per tile pair") max_points = Int( required=False, default=200, missing=200, description= "Maximum number of point correspondences required per tile pair") filter_point_matches = Int( required=False, default=1, missing=1, description="Filter point matches from collection (default = 1)") outlier_lambda = Int(required=False, default=1000, missing=1000, description="lambda value for outliers") min_tiles = Int(required=False, default=3, missing=3, description="Minimum number of tiles in section") Width = Int(required=True, description="Width of the tiles") Height = Int(required=True, description="Height of the tiles") outside_group = Int(required=False, default=0, missing=0, description="Outside group") matrix_only = Int(required=False, default=0, missing=0, description="matrix only") distribute_A = Int(required=False, default=16, missing=16, description="Distribute A matrix") dir_scratch = OutputDir(required=True, description="Scratch directory") distributed = Int(required=False, default=0, missing=0, description="Distributed parameter of solver") use_peg = Int(required=False, default=0, missing=0, description="Use pegs? (default = 0)") verbose = Int(required=False, default=0, missing=0, description="Verbose output from solver needed?") debug = Int(required=False, default=0, missing=0, description="turn on debug mode (default = 0 - off)") constrain_by_z = Int(required=False, default=0, missing=0, description="Constrain solution by z (default = 0)") sandwich = Int(required=False, default=0, missing=0, description="sandwich factor of solver") constraint_fac = Int(required=False, default=1e15, missing=1e15, description="Contraint factor") pmopts = Nested(PointMatchFilteringOptions, required=True, description="Point match filtering options for solver") pastix = Nested(PastixOptions, required=True, description="Pastix solver options")
class QCParameters(DefaultSchema): generate_plots = Bool( default=EyeTracker.DEFAULT_GENERATE_QC_OUTPUT, description="Flag for whether or not to output QC plots") output_dir = OutputDir(default="./qc", description="Folder to store QC outputs")
class TilePairClientParameters(RenderParameters): stack = Str( required=True, description="input stack to which tilepairs need to be generated") baseStack = Str( required=False, default=None, missing=None, description="Base stack") minZ = Int( required=False, default=None, missing=None, description="z min for generating tilepairs") maxZ = Int( required=False, default=None, missing=None, description="z max for generating tilepairs") xyNeighborFactor = Float( required=False, default=0.9, description="Multiply this by max(width, height) of " "each tile to determine radius for locating neighbor tiles") zNeighborDistance = Int( required=False, default=2, missing=2, description="Look for neighbor tiles with z values less than " "or equal to this distance from the current tile's z value") excludeCornerNeighbors = Bool( required=False, default=True, missing=True, description="Exclude neighbor tiles whose center x and y is " "outside the source tile's x and y range respectively") excludeSameLayerNeighbors = Bool( required=False, default=False, missing=False, description="Exclude neighbor tiles in the " "same layer (z) as the source tile") excludeCompletelyObscuredTiles = Bool( required=False, default=True, missing=True, description="Exclude tiles that are completely " "obscured by reacquired tiles") output_dir = OutputDir( required=True, description="Output directory path to save the tilepair json file") memGB = Str( required=False, default='6G', missing='6G', description="Memory for the java client to run") @post_load def validate_data(self, data): if data['baseStack'] is None: data['baseStack'] = data['stack']