class SubImage(RaisingSchema): specimen_tissue_index = Int() dimensions = Nested(ImageDimensions) spacing = Nested(ImageSpacing) segmentation_paths = Dict() intensity_paths = Dict() polygons = Dict()
class InputParameters(ArgSchema): stimulus_pkl_path = String( required=True, help="path to pkl file containing raw stimulus information") sync_h5_path = String( required=True, help="path to h5 file containing syncronization information") output_stimulus_table_path = String( required=True, help="the output stimulus table csv will be written here") output_frame_times_path = String(required=True, help="output all frame times here") minimum_spontaneous_activity_duration = Float( default=sys.float_info.epsilon, help= "detected spontaneous activity sweeps will be rejected if they last fewer that this many seconds", ) maximum_expected_spontanous_activity_duration = Float( default=1225.02541, help= "validation will fail if a spontanous activity epoch longer than this one is computed.", ) frame_time_strategy = String( default="use_photodiode", help= "technique used to align frame times. Options are 'use_photodiode', which interpolates frame times between photodiode edge times (preferred when vsync times are unreliable) and 'use_vsyncs', which is preferred when reliable vsync times are available.", ) stimulus_name_map = Dict(keys=String(), values=String(), help="optionally rename stimuli", default=default_stimulus_renames) column_name_map = Dict(keys=String(), values=String(), help="optionally rename stimulus parameters", default=default_column_renames) extract_const_params_from_repr = Bool(default=True) drop_const_params = List( String(), help="columns to be dropped from the stimulus table", default=["name", "maskParams", "win", "autoLog", "autoDraw"], ) fail_on_negative_duration = Bool( default=False, help= "Determine if the module should fail if a stimulus epoch has a negative duration." )
class MontageSolverSchema(ArgSchema): data_dir = InputDir( required=False, description="directory containing metafile, images, and matches") metafile = InputFile( required=False, description=("fullpath to metafile. Helps in the case of multiple" " metafiles in one directory. data_dir will take " " os.path.dirname(metafile)")) output_dir = OutputDir(required=False, missing=None, default=None, description="directory for output files") read_transform_from = Str( required=False, missing='metafile', default='metafile', validator=mm.validate.OneOf(['metafile', 'reffile', 'dict']), description="3 possible ways to read in the reference transform") ref_transform = InputFile(required=False, missing=None, default=None, description="transform json") ref_transform_dict = Dict(require=False, missing=None, description="transform in from memory") ransacReprojThreshold = Float( required=False, missing=10.0, default=10.0, description=("passed into cv2.estimateAffinePartial2D()" "for RANSAC filtering of montage template matches")) compress_output = Boolean( required=False, missing=True, default=True, description=("tilespecs will be .json or .json.gz")) solver_templates = List( Str, required=True, description="input json basenames for the solver args") solver_template_dir = InputDir( required=True, description="location of the templates for the solver") @mm.post_load def check_solver_inputs(self, data): for args in data['solver_templates']: argpath = os.path.join(data['solver_template_dir'], args) if not os.path.isfile(argpath): raise mm.ValidationError("solver arg file doesn't exist: %s" % argpath) @mm.post_load def check_metafile(self, data): if ('data_dir' not in data) & ('metafile' not in data): raise mm.ValidationError(" must specify either data_dir" " or metafile")
class TransformParameters(DefaultSchema): type = Str( required=True, validator=OneOf(["leaf", "interpolated", "list", "ref"]), description=('Transform type as defined in Render Transform Spec. ' 'This module currently expects a "leaf"')) className = Str(required=True, description='mpicbg-compatible className') dataString = Str(required=True, description='mpicbg-compatible dataString') metaData = Dict(required=False, description="in this schema, otherwise will be stripped")
class OutputParameters(DefaultSchema): inputs = Nested( InputParameters, description="The parameters argued to this executable", required=True ) results = Dict( description="The outputs of feature extraction", required=True )
class InputParameters(ArgSchema): opto_pickle_path = String( required=True, help='path to file containing optotagging information') sync_h5_path = String( required=True, help='path to h5 file containing syncronization information') output_opto_table_path = String( required=True, help='the optotagging stimulation table will be written here') conditions = Dict(String, Nested(Condition), default=known_conditions)
class SolverSchema(ArgSchema): data = Nested(DataLoaderSchema) transform = Dict(required=True, description="dict containing transform specification") leave_out_index = Int(required=False, missing=None, default=None, description="index to leave out of data") output_dir = OutputDir( required=False, missing=None, default=None, description="path for writing output json of transform")
class OutputParameters(DefaultSchema): """ Outputs produced by attach_metadata """ inputs = Nested( InputParameters, description="The parameters argued to this executable", required=True ) sinks = Dict( description="The sinks to which metadata was attached", required=True, many=True )
class MorphologySummaryParameters(ArgSchema): pia_transform = Dict(description="input pia transform", required=True) relative_soma_depth = Float(desription="input relative soma depth", required=False) soma_depth = Float(description="input soma depth", required=True) swc_file = InputFile(description="input swc file", required=True) thumbnail_file = OutputFile(description="output thumbnail file", required=True) cortex_thumbnail_file = OutputFile( description="output cortex thumbnail file", required=True) normal_depth_thumbnail_file = OutputFile( description="output normal depth thumbnail file", required=True) high_resolution_thumbnail_file = OutputFile( description="output high resolution cortex thumbnail file", required=True)
class SparkOptions(argschema.schemas.DefaultSchema): jarfile = Str(required=True, description=( "spark jar to call java spark command")) className = Str(required=True, description=( "spark class to call")) driverMemory = Str(required=False, default='6g', description=( "spark driver memory (important for local spark)")) memory = Str( required=False, description="Memory required for spark job") sparkhome = InputDir(required=True, description=( "Spark home directory containing bin/spark_submit")) spark_files = List(InputFile, required=False, description=( "list of spark files to add to the spark submit command")) spark_conf = Dict(required=False, description=( "dictionary of key value pairs to add to spark_submit " "as --conf key=value"))
class ProbeOutputParameters(DefaultSchema): name = String(required=True, help="Identifier for this probe") output_paths = Dict( required=True, help="Paths of each mappable file written by this run of the module.", ) total_time_shift = Float( required=True, help= "Translation (in seconds) from master->probe times computed for this probe.", ) global_probe_sampling_rate = Float( required=True, help= "The sampling rate of this probe in Hz, assessed on the master clock.", ) global_probe_lfp_sampling_rate = Float( required=True, help= "The sampling rate of LFP collected on this probe in Hz, assessed on the master clock.", )
class RenderSchema(DefaultSchema): protocol = Str( default="http", help="Protocol to connect to render with (http or https)", required=False) port = Int(default=80, required=False) encoding = Str( default="jpg", help="Encoding option for the neuroglancer render datasource (jpg or raw16)", required=False) all_channels = Boolean(default=False, help="Use Render API to query for and load all channels", required=False) alt_render = Str( default="", help="Alternate render host to use for vizrelay API calls [to work in Docker]", required=False) enable_one_channel = Boolean(default=False, help="Enable only one of the channels", required=False) channel_name_shader_sub = Dict(default={}, help="Dictionary of CHANNEL_NAME : { SUB_NAME : SUB_VALUE }", required=False)
class SessionData(RaisingSchema): ophys_experiment_id = Int(required=True, description='unique identifier for this ophys ' 'session') ophys_session_id = Int(required=True, description='The ophys session id that the ophys ' 'experiment to be written to NWB is ' 'from') behavior_session_id = Int(required=True, description='The behavior session id that the ' 'ophys experiment to be written to ' 'written to NWB is from') foraging_id = String(required=True, description='The foraging id associated with the ' 'ophys session') rig_name = String(required=True, description='name of ophys device') movie_height = Int(required=True, description='height of field-of-view for 2p movie') movie_width = Int(required=True, description='width of field-of-view for 2p movie') container_id = Int(required=True, description='container that this experiment is in') sync_file = String(required=True, description='path to sync file') max_projection_file = String(required=True, description='path to max_projection file') behavior_stimulus_file = String(required=True, description='path to behavior_stimulus ' 'file') dff_file = String(required=True, description='path to dff file') demix_file = String(required=True, description='path to demix file') average_intensity_projection_image_file = String( required=True, description='path to ' 'average_intensity_projection_image file') rigid_motion_transform_file = String(required=True, description='path to ' 'rigid_motion_transform' ' file') targeted_structure = String(required=True, description='Anatomical structure that the ' 'experiment targeted') targeted_depth = Int(required=True, description='Cortical depth that the experiment ' 'targeted') stimulus_name = String(required=True, description='Stimulus Name') date_of_acquisition = String(required=True, description='date of acquisition of ' 'experiment, as string (no ' 'timezone info but relative ot ' 'UTC)') reporter_line = List(String, required=True, description='reporter line') driver_line = List(String, required=True, description='driver line') external_specimen_name = Int(required=True, description='LabTracks ID of the animal') full_genotype = String(required=True, description='full genotype') surface_2p_pixel_size_um = Float(required=True, description='the spatial extent (in um) ' 'of the 2p field-of-view') ophys_cell_segmentation_run_id = Int(required=True, description='ID of the active ' 'segmentation run used ' 'to generate this file') cell_specimen_table_dict = Nested(CellSpecimenTable, required=True, description='Table of cell specimen ' 'info') sex = String(required=True, description='sex') age = String(required=True, description='age') eye_tracking_rig_geometry = Dict( required=True, description="Mapping containing information about session rig " "geometry used for eye gaze mapping." ) eye_tracking_filepath = String( required=True, validate=check_read_access, description="h5 filepath containing eye tracking ellipses" ) events_file = InputFile( required=True, description='h5 filepath to events data' ) imaging_plane_group = Int( required=True, allow_none=True, description="A numeric index that indicates the order that the " "frames were acquired when dealing with an imaging plane " "in a mesoscope experiment. Will be None for Scientifica " "experiments." ) plane_group_count = Int( required=True, description="The total number of plane groups associated with the " "ophys session that the experiment belongs to. Will be 0 " "for Scientifica experiments and nonzero for Mesoscope " "experiments." )
class CellSpecimenTable(RaisingSchema): cell_roi_id = Dict(String, Int, required=True) cell_specimen_id = Dict(String, Int(allow_none=True), required=True) x = Dict(String, Int, required=True) y = Dict(String, Int, required=True) max_correction_up = Dict(String, Float, required=True) max_correction_right = Dict(String, Float, required=True) max_correction_down = Dict(String, Float, required=True) max_correction_left = Dict(String, Float, required=True) valid_roi = Dict(String, Boolean, required=True) height = Dict(String, Int, required=True) width = Dict(String, Int, required=True) mask_image_plane = Dict(String, Int, required=True) roi_mask = Dict(String, List(List(Boolean)), required=True)
class TiffMetadataOutput(DefaultSchema): input_tif = Str() roi_metadata = Dict() scanimage_metadata = Dict()
class OutputParameters(OutputSchema): output_file_paths = Dict(required=True)
class InputSchema(ArgSchema): class Meta: unknown = mm.RAISE log_level = LogLevel(default="INFO", help="set the logging level of the module") output_path = String( required=True, validate=check_write_access, help="write outputs to here", ) session_id = Int(required=True, help="unique identifier for this ecephys session") session_start_time = DateTime( required=True, help="the date and time (iso8601) at which the session started", ) stimulus_table_path = String( required=True, validate=check_read_access, help="path to stimulus table file", ) invalid_epochs = Nested(InvalidEpoch, many=True, required=True, help="epochs with invalid data") probes = Nested( Probe, many=True, required=True, help="records of the individual probes used for this experiment", ) running_speed_path = String( required=True, help= "data collected about the running behavior of the experiment's subject", ) session_sync_path = String( required=True, validate=check_read_access, help= "Path to an h5 experiment session sync file (*.sync). This file relates events from different acquisition modalities to one another in time." ) eye_tracking_rig_geometry = Dict( required=True, help= "Mapping containing information about session rig geometry used for eye gaze mapping." ) eye_dlc_ellipses_path = String( required=True, validate=check_read_access, help= "h5 filepath containing raw ellipse fits produced by Deep Lab Cuts of subject eye, pupil, and corneal reflections during experiment" ) eye_gaze_mapping_path = String( required=False, allow_none=True, help= "h5 filepath containing eye gaze behavior of the experiment's subject") pool_size = Int( default=3, help="number of child processes used to write probewise lfp files") optotagging_table_path = String( required=False, validate=check_read_access, help= "file at this path contains information about the optogenetic stimulation applied during this " ) session_metadata = Nested( SessionMetadata, allow_none=True, required=False, help="miscellaneous information describing this session")