class MontagePlotsSchema(ArgSchema): collection_path = InputFile(required=True, description="point matches from here") resolved_path = InputFile(required=True, description="resolved tiles from here") save_json_path = OutputFile( required=True, missing=None, default=None, description=("save residuals to this path if not None")) save_plot_path = OutputFile( required=True, missing=None, default=None, description=("save plot to this path if not None")) make_plot = Boolean(required=True, missing=True, default=True, description=("make a plot?")) show = Boolean(required=True, missing=True, default=True, description=("show on screen?")) pdf_out = OutputFile(required=True, missing=None, default=None, description="where to write the pdf output")
class InputParameters(ArgSchema): reconstructions = Nested( Reconstruction, description="The morphological reconstructions to be processed", required=True, many=True ) heavy_output_path = OutputFile( description=( "features whose results are heavyweight data (e.g. the numpy " "arrays returned by layer histograms features) are stored here." ), required=True ) feature_set = String( description="select the basic set of features to calculate", required=False, default="aibs_default" ) only_marks = List( String, cli_as_single_argument=True, description=( "restrict calculated features to those with this set of marks" ), required=False ) required_marks = String( description=( "Error (vs. skip) if any of these marks fail validation" ), required=False, many=True ) output_table_path = OutputFile( description=( "this module writes outputs to a json specified as --output_json. " "If you want to store outputs in a different format " "(.csv is supported currently), specify this parameter" ), required=False ) num_processes = Int( description=( "Run a multiprocessing pool with this many processes. " "Default is min(number of cpus, number of swcs). " "Setting num_processes to 1 will avoid a pool." ), required=False, default=None, allow_none=True ) global_parameters = Nested( GlobalParameters, description=( "provide additional configuration to this feature extraction run. " "This configuration will be applied to all morphologies processed." ), required=False )
class PipelineParameters(ArgSchema): input_nwb_file = InputFile(description="input nwb file", required=True) stimulus_ontology_file = OutputFile(description="blash", required=False) input_h5_file = InputFile(desription="input h5 file", required=False) output_nwb_file = OutputFile(description="output nwb file", required=True) qc_fig_dir = OutputFile(description="output qc figure directory", required=False) qc_criteria = Nested(QcCriteria, required=True) manual_sweep_states = Nested(ManualSweepState, required=False, many=True)
class FeatureExtractionParameters(ArgSchema): input_nwb_file = InputFile(description="input nwb file", required=True) stimulus_ontology_file = InputFile(description="stimulus ontology JSON", required=False) output_nwb_file = OutputFile(description="output nwb file", required=True) qc_fig_dir = OutputFile(description="output qc figure directory", required=False) sweep_features = Nested(FxSweepFeatures, many=True) cell_features = Nested(CellFeatures, required=True)
class ImportTrakEM2AnnotationParameters(RenderTrakEM2Parameters): EMstack = Str(required=True, description='stack to look for trakem2 patches in') trakem2project = InputFile(required=True, description='trakem2 file to read in') output_annotation_file = OutputFile( required=True, description="place to save annotation output") output_bounding_box_file = OutputFile( required=True, description="place to save bounding box output")
class NwayMatchingOutputSchema(NwayMatchingOutputNoPlotsSchema): nway_match_fraction_plot = OutputFile( required=True, description="Path of match fraction plot *.png") nway_warp_overlay_plot = OutputFile( required=True, description="Path of warp overlay plot *.png") nway_warp_summary_plot = OutputFile( required=True, description="Path of warp summary plot *.png")
class SweepExtractionParameters(ArgSchema): input_nwb_file = InputFile(description="input nwb file", required=True) stimulus_ontology_file = OutputFile(description="stimulus ontology JSON", required=False) update_ontology = Boolean( description= "update stimulus ontology file from LIMS (if path is provided)", default=True) # TODO: these values below seem unused manual_seal_gohm = Float(description="blah") manual_initial_access_resistance_mohm = Float(description="blah") manual_initial_input_mohm = Float(description="blah") output_json = OutputFile(description="output feature json file", required=True)
class MorphologySummaryParameters(ArgSchema): pia_transform = Dict(description="input pia transform", required=True) relative_soma_depth = Float(desription="input relative soma depth", required=False) soma_depth = Float(description="input soma depth", required=True) swc_file = InputFile(description="input swc file", required=True) thumbnail_file = OutputFile(description="output thumbnail file", required=True) cortex_thumbnail_file = OutputFile( description="output cortex thumbnail file", required=True) normal_depth_thumbnail_file = OutputFile( description="output normal depth thumbnail file", required=True) high_resolution_thumbnail_file = OutputFile( description="output high resolution cortex thumbnail file", required=True)
class OutputParameters(DefaultSchema): inputs = Nested( PiaWmStreamlineSchema, description="The parameters argued to this executable", required=True ) depth_field_file = OutputFile( required=True, description='location of depth field xarray') gradient_field_file = OutputFile( required=True, description='location of gradient field xarray') translation = NumpyArray( required=False, description='translation if applied')
class output_stack(db_params): output_file = OutputFile( required=False, missing=None, default=None, description=("json or json.gz serialization of input stack" "ResolvedTiles.")) compress_output = Boolean( required=False, default=True, missing=True, description=("if writing file, compress with gzip.")) collection_type = String(default='stack', description="'stack' or 'pointmatch'") use_rest = Boolean( default=False, description=("passed as kwarg to " "renderapi.client.import_tilespecs_parallel")) @mm.post_load def validate_file(self, data): if data['db_interface'] == 'file': if data['output_file'] is None: raise mm.ValidationError("with db_interface 'file', " "'output_file' must be a file") @mm.post_load def validate_data(self, data): if 'name' in data: if len(data['name']) != 1: raise mm.ValidationError("only one input or output " "stack name is allowed")
class LensQuiverSchema(ArgSchema): transform_list = List(InputFile, required=True, description=("list of paths to transforms " " or resolved tiles")) subplot_shape = List(Int, required=True, missing=[1, 1], default=[1, 1], description="sets the subplots for multiple plots") n_grid_pts = Int(required=True, missing=20, default=20, description="number of pts per axis for quiver grid") fignum = Int(required=True, missing=None, default=None, description="passed to plt.subplots to number the figure") arrow_scale = Float(required=True, missing=1.0, default=1.0, description="relative scale of arrows to axes") show = Boolean(required=True, missing=True, default=True, description=("show on screen?")) pdf_out = OutputFile(required=True, missing='./lens_corr_plots.pdf', default='./lens_corr_plots.pdf', description="where to write the pdf output")
class QcParameters(ArgSchema): stimulus_ontology_file = InputFile(description="blash", required=False) qc_criteria = Nested(QcCriteria, required=True) sweep_features = Nested(QcSweepFeatures, many=True, required=True) cell_features = Nested(CellFeatures) output_json = OutputFile(description="output feature json file", required=True)
class PipelineParameters(ArgSchema): input_nwb_file = InputFile(description="input nwb file", required=True) stimulus_ontology_file = OutputFile(description="blash", required=False) update_ontology = Boolean( description= "update stimulus ontology file from LIMS (if path is provided)", default=True) output_nwb_file = OutputFile(description="output nwb file", required=True) output_json = OutputFile(description="output feature json file", required=True) qc_fig_dir = OutputFile(description="output qc figure directory", required=False) qc_criteria = Nested(QcCriteria, required=True) manual_sweep_states = Nested(ManualSweepState, required=False, many=True) write_spikes = Boolean(description="Flag for writing spike times", required=False)
class DepthEstimationParams(DefaultSchema): hi_noise_thresh = Float(required=True, default=50.0, help='Max RMS noise for including channels') lo_noise_thresh = Float(required=True, default=3.0, help='Min RMS noise for including channels') save_figure = Bool(required=True, default=True) figure_location = OutputFile(required=True, default=None) smoothing_amount = Int( required=True, default=5, help='Gaussian smoothing parameter to reduce channel-to-channel noise') power_thresh = Float( required=True, default=2.5, help= 'Ignore threshold crossings if power is above this level (indicates channels are in the brain)' ) diff_thresh = Float( required=True, default=-0.07, help='Threshold to detect large increases is power at brain surface') freq_range = NumpyArray( required=True, default=[0, 10], help='Frequency band for detecting power increases') max_freq = Int(required=True, default=150, help='Maximum frequency to plot') channel_range = NumpyArray( required=True, default=[370, 380], help='Channels assumed to be out of brain, but in saline') n_passes = Int( required=True, default=10, help='Number of times to compute offset and surface channel') skip_s_per_pass = Int( required=True, default=100, help='Number of seconds between data chunks used on each pass') start_time = Float( required=True, default=0, help='First time (in seconds) for computing median offset') time_interval = Float(required=True, default=5, help='Number of seconds for computing median offset') nfft = Int(required=True, default=4096, help='Length of FFT used for calculations') air_gap = Int( required=True, default=100, help='Approximate number of channels between brain surface and air')
class DoMeshLensCorrectionOutputSchema(DefaultSchema): output_json = Str( required=True, description="path to lens correction file") maskUrl = OutputFile( required=True, description="path to mask generated")
class FilterSchema(RenderParameters, ZValueParameters, ProcessPoolParameters): input_stack = Str( required=True, description='stack with stage-aligned coordinates') input_match_collection = Str( required=True, description='Name of the montage point match collection') output_match_collection = Str( required=True, default=None, missing=None, description='Name of the montage point match collection to write to') resmax = Float( required=True, description=("maximum value in " "pixels for average residual in tile pair")) transmax = Float( required=True, description=("maximum value in " "pixels for translation relative to stage coords")) filter_output_file = OutputFile( required=True, description="location of json file with filter output") inverse_weighting = Bool( required=True, default=False, missing=False, description='new weights weighted inverse to counts per tile-pair')
class GenerateEMTileSpecsParameters(ArgSchema): metafile = InputFile( required=True, description="metadata file containing TEMCA acquisition data") maskUrl = InputFile(required=False, default=None, missing=None, description="absolute path to image mask to apply") image_directory = InputDir( required=False, description=("directory used in determining absolute paths to images. " "Defaults to parent directory containing metafile " "if omitted.")) maximum_intensity = Int( required=False, default=255, description=("intensity value to interpret as white")) minimum_intensity = Int( required=False, default=0, description=("intensity value to interpret as black")) z = Float(required=False, default=0, description=("z value")) sectionId = Str( required=False, description=("sectionId to apply to tiles during ingest. " "If unspecified will default to a string " "representation of the float value of z_index.")) output_path = OutputFile(required=False, description="directory for output files") compress_output = Boolean( required=False, missing=True, default=True, escription=("tilespecs will be .json or .json.gz"))
class PostProcessROIsInputSchema(ArgSchema): suite2p_stat_path = Str( required=True, validate=lambda x: Path(x).exists(), description=("Path to s2p output stat file containing ROIs generated " "during source extraction")) motion_corrected_video = Str( required=True, validate=lambda x: Path(x).exists(), description=("Path to motion corrected video file *.h5")) motion_correction_values = InputFile( required=True, description=("Path to motion correction values for each frame " "stored in .csv format. This .csv file is expected to" "have a header row of either:\n" "['framenumber','x','y','correlation','kalman_x'," "'kalman_y']\n['framenumber','x','y','correlation'," "'input_x','input_y','kalman_x'," "'kalman_y','algorithm','type']")) output_json = OutputFile( required=True, description=("Path to a file to write output data.")) maximum_motion_shift = Float( missing=30.0, required=False, allow_none=False, description=("The maximum allowable motion shift for a frame in pixels" " before it is considered an anomaly and thrown out of " "processing")) abs_threshold = Float( missing=None, required=False, allow_none=True, description=("The absolute threshold to binarize ROI masks against. " "If not provided will use quantile to generate " "threshold.")) binary_quantile = Float( missing=0.1, validate=Range(min=0, max=1), description=("The quantile against which an ROI is binarized. If not " "provided will use default function value of 0.1.")) npixel_threshold = Int( default=50, required=False, description=("ROIs with fewer pixels than this will be labeled as " "invalid and small size.")) aspect_ratio_threshold = Float( default=0.2, required=False, description=("ROIs whose aspect ratio is <= this value are " "not recorded. This captures a large majority of " "Suite2P-created artifacts from motion border")) morphological_ops = Bool( default=True, required=False, description=("whether to perform morphological operations after " "binarization. ROIs that are washed away to empty " "after this operation are eliminated from the record. " "This can apply to ROIs that were previously labeled " "as small size, for example."))
class AnnotationParameters(DefaultSchema): annotate_movie = Bool(default=False, description="Flag for whether or not to annotate") output_file = OutputFile(default="./annotated.avi") fourcc = Str(description=("FOURCC string for video encoding. On Windows " "H264 is not available by default, so it will " "need to be installed or a different codec " "used."))
class SweepExtractionParameters(ArgSchema): input_nwb_file = InputFile(description="input nwb file", required=True) stimulus_ontology_file = OutputFile( description="stimulus ontology JSON", required=False ) manual_seal_gohm = Float(description="blah") manual_initial_access_resistance_mohm = Float(description="blah") manual_initial_input_mohm = Float(description="blah")
class NwayDiagnosticSchema(ArgSchema): output_pdf = OutputFile( required=True, description="path to output pdf") use_input_dir = fields.Bool( required=False, missing=False, default=False, descriptip="output to same directory as input")
class Nwb2SinkTarget(DefaultSchema): """Configure an output target for an Nwb2 Sink """ output_path = OutputFile( description=( "Output path to which file with attached metadata will be written" ), required=True )
class DandiSinkTarget(DefaultSchema): """Specify an output target for a DANDI metadata sink """ output_path = OutputFile( description=( "Outputs will be written here. Currently only yaml is " "supported" ) )
class OutputParameters(DefaultSchema): inputs = Nested( ApplyAffineSchema, description="The parameters argued to this executable", required=True ) transformed_swc = OutputFile( required=True, description='location of the transformed swc')
class DataFilterSchema(ArgSchema): dset1 = Nested(DataLoaderSchema) dset_soma = Nested(DataLoaderSchema) dset2 = Nested(DataLoaderSchema) output_file = OutputFile(required=False, missing=None, default=None, description="where to write output file") header = Str(required=True, default="opt", description="specifies which data to use, i.e. opt/em")
class OutputImage(DefaultSchema): input_path = InputFile(description="The base image was read from here", required=True) output_path = OutputFile(description="The overlay was written to here", required=True) downsample = Int( description=( "The base image was downsampled by this factor along each axis"), required=True, ) overlay_type = String( description="This image has this kind of overlay", required=True, )
class Image(DefaultSchema): input_path = InputFile(description="Read the image from here", required=True) output_path = OutputFile( description="Write outputs to (siblings of) this path", required=True) downsample = Int( description=("Downsample the image by this amount on each dimension " "(currently this is just a decimation, hence Int)."), required=True, default=8) overlay_types = List( String, description=("produce these types of overlays for this image. " "See ImageOutputter for options"), required=True, default=["before", "after"])
class ApplyAffineSchema(ArgSchema): """Arg Schema for apply_affine_transform module""" affine_dict = Nested(AffineDictSchema, required=False, description='Dictionary defining an affine transform') affine_list = List(Float, required=False, cli_as_single_argument=True, description='List defining an affine transform') input_swc = InputFile(required=True, description='swc file to be transformed') output_swc = OutputFile(required=True, description='Output swc filepath') @mm.validates_schema def validate_schema_input(self, data): validate_input_affine(data)
class ViewMatchesSchema(ArgSchema): collection_path = InputFile( required=False, description="if specified, will read collection from here") collection_basename = Str( required=True, missing="collection.json", default="collection.json", description=("basename for collection file if collection_path" " not specified. will also check for .json.gz")) data_dir = InputDir( required=True, description=("directory containing image files. Will also be dir" " dir for collection path, if not otherwise specified")) resolved_tiles = List( Str, required=True, missing=["resolvedtiles.json.gz", "resolvedtiles_input.json.gz"], description=("will take the transform from the first file" " matching this list, if possible")) transform_file = InputFile( required=False, description=("if provided, will get lens correction transform " " from here")) view_all = Boolean( required=True, missing=False, default=False, description=("will plot all the pair matches. can be useful " "for lens correction to file. probably not desirable " "for montage")) show = Boolean(required=True, missing=True, default=True, description=("show on screen?")) match_index = Int(required=True, missing=0, default=0, description=("which index of self.matches to plot")) pdf_out = OutputFile(required=True, missing='./view_matches_output.pdf', default='./view_matches_output.pdf', description="where to write the pdf output")
class InputParameters(ArgSchema): swc_path = InputFile(description="path to input swc (csv) file", required=True) depth = Nested(DepthField, description=("A transform which can be evaluated at the " "location of each node in the input swc"), required=True, many=False) layers = Nested(Layer, description="specification of layer bounds", many=True, required=True) step_size = Float(description=( "size of each step, in the same units as the depth field and swc"), required=True, default=1.0) output_path = OutputFile(description="write (csv) outputs here", required=True) max_iter = Int(description="how many steps to take before giving up", required=True, default=1000)