class SwapZsParameters(RenderParameters): source_stack = List(Str, required=True, description="List of source stacks") target_stack = List(Str, required=True, description="List of target stacks") complete_source_stack = Boolean( required=False, default=False, missing=False, description=( "set source stack state to complete after copying Default=False")) complete_target_stack = Boolean( required=False, default=False, missing=False, description=( "set target stack state to complete after copying Default=False")) zValues = List(List(Int, required=True)) delete_source_stack = Boolean( required=False, default=False, missing=False, description=("Do you want to delete source stack after copying " "its contents?. Default=False")) pool_size = Int(required=False, default=5, missing=5, description="Pool size")
class DataLoaderSchema(ArgSchema): landmark_file = InputFile(required=True, description=("csv file, one line per landmark")) actions = List(Str, required=False, missing=[], default=[], cli_as_single_argument=True, description=("actions to perform on data")) header = List(Str, required=False, default=None, missing=None, cli_as_single_argument=True, description=("passed as names=header to pandas.read_csv()")) sd_set = Nested(src_dst) all_flags = Bool(required=False, missing=False, default=False, description="if False, returns only flag=True data") exclude_labels = List(Int, required=True, missing=[100000, 200000], default=[100000, 200000], description="ignore Pt labels in this range")
class InputParameters(ArgSchema): swc_path = InputFile(description='path to swc file for soma location', required=True) marker_path = InputFile(description='path to reconstruction marker file', required=True) slice_image_flip = Boolean(description=( 'indicates whether the image was flipped relative ' 'to the slice (avg_group_label.name = \'Flip Slice Indicator\''), required=True) ccf_soma_location = List( Float, description='Soma location (x,y,z) coordinates in CCF', required=True) slice_transform_list = List( Float, required=False, cli_as_single_argument=True, description='List defining the transform defining slice cut angle') slice_transform_dict = Nested( AffineDictSchema, description='Dict defining the transform defining the slice cut angle', required=False) ccf_path = InputFile( description='path to common cortical framework streamline file', required=True) @mm.validates_schema def validate_schema_input(self, data): validate_input_affine(data)
class LensQuiverSchema(ArgSchema): transform_list = List(InputFile, required=True, description=("list of paths to transforms " " or resolved tiles")) subplot_shape = List(Int, required=True, missing=[1, 1], default=[1, 1], description="sets the subplots for multiple plots") n_grid_pts = Int(required=True, missing=20, default=20, description="number of pts per axis for quiver grid") fignum = Int(required=True, missing=None, default=None, description="passed to plt.subplots to number the figure") arrow_scale = Float(required=True, missing=1.0, default=1.0, description="relative scale of arrows to axes") show = Boolean(required=True, missing=True, default=True, description=("show on screen?")) pdf_out = OutputFile(required=True, missing='./lens_corr_plots.pdf', default='./lens_corr_plots.pdf', description="where to write the pdf output")
class SimpleGeometry(DefaultSchema): name = String(description="identifier for this layer", required=True) path = List( List(Float), description=( "Coordinates defining this geometric object as [[x, y], [x, y]]"), required=True)
class CopiedFile(RaisingSchema): source = String(required=True, description='copied from here') destination = String(required=True, description='copied to here') key = String(required=False, description='passed from inputs') source_hash = List(Int, required=False) # int array vs bytes for JSONability destination_hash = List(Int, required=False)
class ReferenceLayerDepths(DefaultSchema): key = String( description="The name of a well known set of reference layer depths", required=False, default=None, allow_none=True ) names = List( String, description=( "Construct a custom sequence of layers using these names. Must " "also supply boundaries (there should be one more boundary than " "name)." ), cli_as_single_argument=True, required=False, default=None, allow_none=True ) boundaries = List( Float, description=( "Construct a custom sequence of layers using these boundaries. " "Must also supply names." ), cli_as_single_argument=True, required=False, default=None, allow_none=True ) @classmethod def is_valid(cls, value): key = value.get("key", None) names = value.get("names", None) boundaries = value.get("boundaries", None) if key is not None: if names is None and boundaries is None: return True else: raise ValidationError( "cannot supply key along with names and boundaries" ) elif names is not None and boundaries is not None: if len(names) + 1 == len(boundaries): return True else: raise ValidationError("must supply len(names) + 1 boundaries") else: raise ValidationError( "must supply either key or names and boundaries" )
class matrix_assembly(ArgSchema): depth = List( Int, cli_as_single_argument=True, default=[0, 1, 2], required=False, description='depth in z for matrix assembly point matches') explicit_weight_by_depth = List( Float, cli_as_single_argument=True, default=None, missing=None, description='explicitly set solver weights by depth') @pre_load def tolist(self, data): if not isinstance(data['depth'], list): data['depth'] = np.arange(0, data['depth'] + 1).tolist() @post_load def check_explicit(self, data): if data['explicit_weight_by_depth'] is not None: if ( len(data['explicit_weight_by_depth']) != len(data['depth'])): raise ValidationError( "matrix_assembly['explicit_weight_by_depth'] " "must be the same length as matrix_assembly['depth']") cross_pt_weight = Float( default=1.0, required=False, description='weight of cross section point matches') montage_pt_weight = Float( default=1.0, required=False, description='weight of montage point matches') npts_min = Int( default=5, missing=5, required=False, description='disregard any tile pairs with fewer points than this') npts_max = Int( default=500, required=False, description='truncate any tile pairs to this size') choose_random = Boolean( default=False, required=False, description=("choose random pts to meet for npts_max" " vs. just first npts_max")) inverse_dz = Boolean( default=True, required=False, description='cross section point match weighting fades with z')
class SessionData(RaisingSchema): ophys_experiment_id = Int( required=True, description='unique identifier for this ophys session') rig_name = String(required=True, description='name of ophys device') movie_height = Int(required=True, description='height of field-of-view for 2p movie') movie_width = Int(required=True, description='width of field-of-view for 2p movie') container_id = Int(required=True, description='container that this experiment is in') sync_file = String(required=True, description='path to sync file') segmentation_mask_image_file = String( required=True, description='path to segmentation_mask_image file') max_projection_file = String(required=True, description='path to max_projection file') behavior_stimulus_file = String( required=True, description='path to behavior_stimulus file') dff_file = String(required=True, description='path to dff file') demix_file = String(required=True, description='path to demix file') average_intensity_projection_image_file = String( required=True, description='path to average_intensity_projection_image file') rigid_motion_transform_file = String( required=True, description='path to rigid_motion_transform file') targeted_structure = String( required=True, description='Anatomical structure that the experiment targeted') targeted_depth = Int( required=True, description='Cortical depth that the experiment targeted') stimulus_name = String(required=True, description='Stimulus Name') date_of_acquisition = String( required=True, description= 'date of acquisition of experiment, as string (no timezone info but relative ot UTC)' ) reporter_line = List(String, required=True, description='reporter line') driver_line = List(String, required=True, description='driver line') external_specimen_name = Int(required=True, description='LabTracks ID of the animal') full_genotype = String(required=True, description='full genotype') surface_2p_pixel_size_um = Float( required=True, description='the spatial extent (in um) of the 2p field-of-view') ophys_cell_segmentation_run_id = Int( required=True, description= 'ID of the active segmentation run used to generate this file') cell_specimen_table_dict = Nested( CellSpecimenTable, required=True, description='Table of cell specimen info') sex = String(required=True, description='sex') age = String(required=True, description='age')
class SwapZsOutput(DefaultSchema): source_stacks = List( Str, required=True, description=( "List of source stacks that have been successfully swapped")) target_stacks = List( Str, required=True, description=( "List of target stacks that have been successfully swapped")) swapped_zvalues = List(List(Int, required=True))
class InputParameters(ArgSchema): class Meta: unknown = RAISE log_level = LogLevel(default='INFO', description="set the logging level of the module") case = String(required=True, validate=lambda s: s in VALID_CASES, help='select a use case to run') sub_images = Nested(SubImage, required=True, many=True, help='Sub images composing this image series') affine_params = List( Float, help='Parameters of affine image stack to reference space transform.') deformation_field_path = String( required=True, help= 'Path to parameters of the deformable local transform from affine-transformed image stack to reference space transform.' ) image_series_slice_spacing = Float( required=True, help='Distance (microns) between successive images in this series.') target_spacings = List( Float, required=True, help='For each volume produced, downsample to this isometric resolution' ) reference_spacing = Nested( ReferenceSpacing, required=True, help='Native spacing of reference space (microns).') reference_dimensions = Nested(ReferenceDimensions, required=True, help='Native dimensions of reference space.') sub_image_count = Int(required=True, help='Expected number of sub images') grid_prefix = String(required=True, help='Write output grid files here') accumulator_prefix = String( required=True, help='If this run produces accumulators, write them here.') storage_directory = String( required=False, help='Storage directory for this image series. Not used') filter_bit = Int( default=None, allow_none=True, help= 'if provided, signals that pixels with this bit high have passed the optional post-filter stage' ) nprocesses = Int(default=8, help='spawn this many worker subprocesses') reduce_level = Int( default=0, help='power of two by which to downsample each input axis')
class BehaviorSessionData(RaisingSchema): behavior_session_id = Int(required=True, description=("Unique identifier for the " "behavior session to write into " "NWB format")) foraging_id = String(required=True, description=("The foraging_id for the behavior " "session")) driver_line = List(String, required=True, description='Genetic driver line(s) of subject') reporter_line = List(String, required=True, description='Genetic reporter line(s) of subject') full_genotype = String(required=True, description='Full genotype of subject') rig_name = String(required=True, description=("Name of experimental rig used for " "the behavior session")) date_of_acquisition = String(required=True, description=("Date of acquisition of " "behavior session, in string " "format")) external_specimen_name = Int(required=True, description='LabTracks ID of the subject') behavior_stimulus_file = String(required=True, validate=check_read_access, description=("Path of behavior_stimulus " "camstim *.pkl file")) date_of_birth = String(required=True, description="Subject date of birth") sex = String(required=True, description="Subject sex") age = String(required=True, description="Subject age") stimulus_name = String(required=True, description=("Name of stimulus presented during " "behavior session")) @mm.pre_load def set_stimulus_name(self, data, **kwargs): if data.get("stimulus_name") is None: pkl = pd.read_pickle(data["behavior_stimulus_file"]) try: stimulus_name = pkl["items"]["behavior"]["cl_params"]["stage"] except KeyError: raise mm.ValidationError( f"Could not obtain stimulus_name/stage information from " f"the *.pkl file ({data['behavior_stimulus_file']}) " f"for the behavior session to save as NWB! The " f"following series of nested keys did not work: " f"['items']['behavior']['cl_params']['stage']" ) data["stimulus_name"] = stimulus_name return data
class MySchema(ArgSchema): array = NumpyArray(default=[[1, 2, 3], [4, 5, 6]], dtype="uint8", description="my example array") string_list = List(List(Str), default=[["hello", "world"], ["lists!"]], cli_as_single_argument=True, description="list of lists of strings") int_list = List(Int, default=[1, 2, 3], cli_as_single_argument=True, description="list of ints") nested = Nested(MyNestedSchema, required=True)
class InputParameters(ArgSchema): destination_bucket = Nested( S3LandingBucket, description= "s3 landing bucket info (bucket_name/access_point_arn and region)", required=True) neuron_reconstruction_id = Int( description="neuron reconstruction id", required=True, ) specimen_id = Int(description="specimen id", required=True) primary_boundaries = Nested(PrimaryBoundaries, description="primary boundaries", required=True) swc_file = String(description="path to input swc (csv) file", required=True) cell_depth = Float(description="cell depth", required=True, allow_none=True) cut_thickness = Float(description="cut thickness", required=True, allow_none=True) marker_file = String(description="path to input marker (csv) file", required=True) ccf_soma_xyz = List(Float, cli_as_single_argument=True, description="soma location (x,y,z) coordinates in CCF", required=True) slice_transform = List( Float, cli_as_single_argument=True, description='List defining the transform defining slice cut angle', required=True, allow_none=True, ) slice_image_flip = Boolean(description=( 'indicates whether the image was flipped relative ' 'to the slice (avg_group_label.name = \'Flip Slice Indicator\''), required=True)
class CellSpecimenTable(RaisingSchema): cell_roi_id = Dict(String, Int, required=True) cell_specimen_id = Dict(String, Int(allow_none=True), required=True) x = Dict(String, Int, required=True) y = Dict(String, Int, required=True) max_correction_up = Dict(String, Float, required=True) max_correction_right = Dict(String, Float, required=True) max_correction_down = Dict(String, Float, required=True) max_correction_left = Dict(String, Float, required=True) valid_roi = Dict(String, Boolean, required=True) height = Dict(String, Int, required=True) width = Dict(String, Int, required=True) mask_image_plane = Dict(String, Int, required=True) roi_mask = Dict(String, List(List(Boolean)), required=True)
class PairwiseRigidOutputSchema(DefaultSchema): minZ = Int(required=True, description="minimum z value in output stack") maxZ = Int(required=True, description="minimum z value in output stack") output_stack = Str(required=True, description="name of output stack") missing = List( Int, required=True, description="list of z values missing in z range of output stack") masked = List( Int, required=True, description="list of z values masked in z range of output stack") residuals = List(Dict, required=True, description="pairwise residuals in output stack")
class Roi(RaisingSchema): mask = List(List(Boolean), required=True, description='raster mask') y = Integer(required=True, description='y position (pixels) of mask\'s bounding box') x = Integer(required=True, description='x position (pixels) of mask\'s bounding box') width = Integer(required=True, description='width (pixels)of mask\'s bounding box') height = Integer(required=True, description='height (pixels) of mask\'s bounding box') valid = Boolean(default=True, description='Is this Roi known to be valid?') id = Integer(required=True, description='unique integer identifier for this Roi') mask_page = Integer(default=-1, description='') # TODO: this isn't in the examples
class SwapPointMatchesOutput(DefaultSchema): source_collection = Str( required=True, description="Source point match collection") target_collection = Str( required=True, description="Target point match collection") swapped_zs = List( Int, required=True, description="List of group ids that got swapped") nonswapped_zs = List( Int, required=True, description="List of group ids that did not get swapped")
class ApplyLensCorrectionParameters(StackTransitionParameters): transform = Nested(TransformParameters) refId = Str(allow_none=True, required=True, description=('Reference ID to use when uploading transform to ' 'render database (Not Implemented)')) labels = List(Str, required=False, missing=['lens'], default=['lens'], description="labels for the lens correction transform") maskUrl = InputFile( required=False, default=None, missing=None, description='path to level 0 maskUrl to apply to stack') maskUrl_uri = Str(required=False, default=None, missing=None, description="uri for level 0 mask image to apply") @marshmallow.pre_load def maskUrl_to_uri(self, data): rendermodules.utilities.schema_utils.posix_to_uri( data, "maskUrl", "maskUrl_uri")
class DataLoaderSchema(ArgSchema): landmark_file = InputFile(required=True, description=("csv file, one line per landmark")) actions = List(Str, required=False, missing=[], default=[], cli_as_single_argument=True, description=("actions to perform on data")) header = List(Str, required=False, default=None, missing=None, cli_as_single_argument=True, description=("passed as names=header to pandas.read_csv()")) sd_set = Nested(src_dst)
class PairwiseRigidSchema(StackTransitionParameters): match_collection = Str(required=True, description="Point match collection name") gap_file = InputFile( required=False, default=None, missing=None, description="json file {k: v} where int(k) is a z value to skip" "entries in here that are not already missing will" "be omitted from the output stack" "i.e. this is a place one can skip sections") translate_to_positive = Bool( required=False, default=True, missing=True, description="translate output stack to positive space") translation_buffer = List(Float, required=False, default=[0, 0], missing=[0, 0], description=("minimum (x, y) of output stack if " "translate_to_positive=True")) anchor_stack = Str( require=False, default=None, missing=None, description=("fix transforms using tiles in this stack"))
class Metadatum(DefaultSchema): """ A piece of lightweight data """ name = String( description=( "Identifier for this piece of metadata. Sinks will use this field " "in order to determine how metadata ought to be stored." ), required=True ) value = Field( description="The value of this metadata", required=True ) sweep_number = Int( description="If this is a ", required=False ) sinks = List( String, description="Sink(s) to which this metadatum ought to be written", required=True, default=list, validate=lambda x: len(x) > 0 )
class tPrimeParams(DefaultSchema): tPrime_path = InputDir(help='directory containing the TPrime executable.') sync_period = Float(default=1.0, help='Period of sync waveform (sec).') toStream_sync_params = String( required=False, default='SY=0,384,6,500', help= 'string of CatGT params used to extract to stream sync edges, e.g. SY=0,384,6,500' ) ni_sync_params = String( required=False, default='XA=0,1,3,500', help= 'string of CatGT params used to extract NI sync edges, e.g. XA=0,1,3,500' ) ni_ex_list = String( required=False, default='', help= 'string of CatGT params used to extract edges from ni, e.g. XA=0,1,3,500' ) im_ex_list = String( required=False, default='', help= 'string of CatGT params used to extract edges from im streams, e.g. SY=0,384,6,500' ) tPrime_3A = Boolean(required=False, default=False, help='is this 3A data?') toStream_path_3A = String(required=False, help='full path to toStream edges file') fromStream_list_3A = List( String, required=False, help='list of full paths to fromStream edges files')
class ProbeInputParameters(DefaultSchema): name = String(required=True, help='Identifier for this probe.') lfp_data_path = String(required=True, help='Path to lfp data for this probe') lfp_timestamps_path = String( required=True, help="Path to aligned lfp timestamps for this probe.") surface_channel = Int( required=True, help='Estimate of surface (pia boundary) channel index') reference_channels = List( Int, many=True, help='Indices of reference channels for this probe') csd_output_path = String(required=True, help='CSD output will be written here.') sampling_rate = Float(required=True, help='sampling rate assessed on master clock') total_channels = Int(default=384, help='Total channel count for this probe.') surface_channel_adjustment = Int( default=40, help= 'Erring up in the surface channel estimate is less dangerous for the CSD calculation than erring down, so an adjustment is provided.' ) spacing = Float( default=0.04, help= 'distance (in millimiters) between lengthwise-adjacent rows of recording sites on this probe.' ) phase = String( required=True, help= 'The probe type (3a or PXI) which determines if channels need to be reordered' )
class ProbeOutputParameters(DefaultSchema): name = String(required=True, help='Identifier for this probe.') csd_path = String(required=True, help='Path to current source density file.') csd_channels = List(Int, required=True, help='LFP channels from which CSD was calculated.')
class InputParameters(ArgSchema): reconstructions = Nested( Reconstruction, description="The morphological reconstructions to be processed", required=True, many=True ) heavy_output_path = OutputFile( description=( "features whose results are heavyweight data (e.g. the numpy " "arrays returned by layer histograms features) are stored here." ), required=True ) feature_set = String( description="select the basic set of features to calculate", required=False, default="aibs_default" ) only_marks = List( String, cli_as_single_argument=True, description=( "restrict calculated features to those with this set of marks" ), required=False ) required_marks = String( description=( "Error (vs. skip) if any of these marks fail validation" ), required=False, many=True ) output_table_path = OutputFile( description=( "this module writes outputs to a json specified as --output_json. " "If you want to store outputs in a different format " "(.csv is supported currently), specify this parameter" ), required=False ) num_processes = Int( description=( "Run a multiprocessing pool with this many processes. " "Default is min(number of cpus, number of swcs). " "Setting num_processes to 1 will avoid a pool." ), required=False, default=None, allow_none=True ) global_parameters = Nested( GlobalParameters, description=( "provide additional configuration to this feature extraction run. " "This configuration will be applied to all morphologies processed." ), required=False )
class QCPointMatchResultsParameters(RenderParameters): matchcollections = List( Str, required=True, metadata={'description': 'list of match collections to analyze'}) input_tilepairfile = InputFile( required=True, metadata={'description': 'file path of tile pair file to qc'}) output_tilepairfile = Str( required=True, metadata={ 'description': 'file path of where to save the tile pair file to qc' }) figdir = Str(required=True, metadata={'description': 'directory to save images'}) min_matches = Int( required=False, default=5, metadata={ 'description': 'number of matches between tiles to be considered a valid match' }) pool_size = Int( required=False, default=20, metadata={'description': 'number of parallel threads to use'})
class MaterializeSectionsParameters(argschema.ArgSchema, MaterializedBoxParameters, ZRangeParameters, RenderParametersRenderWebServiceParameters, SparkParameters): cleanUpPriorRun = Boolean( required=False, description=( "whether to regenerate most recently generated boxes of an " "identical plan. Useful for rerunning failed jobs.")) explainPlan = Boolean( required=False, description=( "whether to perform a dry run, logging as partition stages are run " "but skipping materialization")) maxImageCacheGb = Float( required=False, default=2.0, description=( "maximum image cache in GB of tilespec level 0 data to cache per " "core. Larger values may degrade performance due " "to JVM garbage collection.")) # TODO see Eric's zValues = List(Int, required=False, description=("z indices to materialize"))
class AddMipMapsToStackOutput(DefaultSchema): output_stack = Str(required=True) missing_ts_zs = List(Int, required=False, default=[], missing=[], cli_as_single_argument=True, description="Z values for which apply mipmaps failed")
class Flashes(DefaultSchema): stimulus_key = List(String, default=Flashes.known_stimulus_keys(), help='Key for the flash stimulus') trial_duration = Float( default=0.25, help='typical length of a epoch for given stimulus in seconds') psth_resolution = Float(default=0.001, help='resultion (seconds) for generating PSTH')