class MergeStacksParameters(RenderParameters): stack1 = Str(required=True, metadata={'description': 'first stack to merge'}) stack2 = Str(required=True, metadata={'description': 'second stack to merge'}) output_stack = Str(required=True, metadata={'description': 'stack to save answer into'}) zmin = Int(required=False, metadata={'description': 'zvalue to start'}) zmax = Int(required=False, metadata={'description': 'zvalue to end'}) z_intersection = Boolean( required=False, default=False, metadata={ 'description': 'only output z values that appears in both stacks\ (default False, output z values in either stack)' }) pool_size = Int(required=False, default=20, metadata={ 'description': 'size of pool for parallel processing (default=20)' })
class FitTransformsByPointMatchParameters(RenderParameters): dst_stack = Str(required=True, description= 'stack with tiles in the desired destination space') src_stack = Str(required=True, description='stack with tiles in the input space') output_stack = Str(required=True, description= 'name to call output version of \ input_stack_src_space with a transform added to bring it \ into the destination space') matchcollection = Str(required=True, description = "point match collection expressing point matches between the stacks") num_local_transforms = Int(required=True, description="number of local transforms to preserver, \ assumes point matches written down after such local transforms") transform_type = Str(required = False, default = 'affine', validate = mm.validate.OneOf(["affine","rigid"]), description = "type of transformation to fit") pool_size = Int(required=False, default=20, description= 'degree of parallelism (default 20)') setz = Boolean(required=False, default = True, description="whether to change z's to the destination stack")
class FlipStackParameters(RenderParameters): input_stack = Str(required=True, metadata={'description': 'stack to apply affine to'}) output_stack = Str( required=False, metadata={ 'description': 'stack to save answer into (defaults to overwriting input_stack)' }) minZ = Int(required=True, metadata={'description': 'minimum Z to flip'}) maxZ = Int(required=True, metadata={'description': 'maximum Z to flip'}) pool_size = Int(required=False, default=20, metadata={ 'description': 'size of pool for parallel processing (default=20)' }) delete_after = Boolean( required=False, default=False, metadata={ 'description': 'whether to delete the old image files or not after flipping' })
class BigFetaSchema(ArgSchema): """The input schema used by the BigFeta solver """ first_section = Int(required=True, description='first section for matrix assembly') last_section = Int(required=True, description='last section for matrix assembly') n_parallel_jobs = Int( default=4, required=False, description=("number of parallel jobs that will run for " "retrieving tilespecs, assembly from pointmatches, " "and import_tilespecs_parallel")) processing_chunk_size = Int( default=1, required=False, description=("number of pairs per multiprocessing job. can help " "parallelizing pymongo calls.")) solve_type = String(default='montage', required=False, description='Solve type options (montage, 3D)', validator=mm.validate.OneOf(['montage', '3D'])) close_stack = Boolean(default=True, required=False, description='Set output stack to state COMPLETE?') overwrite_zlayer = Boolean( default=True, required=False, description='delete section before import tilespecs?') profile_data_load = Boolean( default=False, description="module will raise exception after timing tilespec read") transformation = String(default='AffineModel', validate=mm.validate.OneOf([ 'AffineModel', 'SimilarityModel', 'Polynomial2DTransform', 'affine', 'rigid', 'affine_fullsize', 'RotationModel', 'TranslationModel', 'ThinPlateSplineTransform' ]), description="transformation to use for the solve") fullsize_transform = Boolean(default=False, description='use fullsize affine transform') poly_order = Int(default=2, required=False, description='order of polynomial transform.') output_mode = String( default='none', validate=mm.validate.OneOf(['none', 'hdf5', 'stack']), description=("none: just solve and show logging output\n" "hdf5: assemble to hdf5_options.output_dir\n" "stack: write to output stack")) assemble_from_file = String( default='', description=("path to an hdf5 file for solving from hdf5 output." "mainly for testing purposes. hdf5 output usually to " "be solved by external solver")) ingest_from_file = String( default='', description='path to an hdf5 file output from the external solver.') render_output = String(default='null', description=("anything besides the default will " "show all the render stderr/stdout")) input_stack = Nested( input_stack, description=("specifies the origin of the tilespecs.")) output_stack = Nested( output_stack, description=("specifies the destination of the tilespecs.")) pointmatch = Nested( pointmatch, description=("specifies the origin of the point correspondences")) hdf5_options = Nested( hdf5_options, description=("options invoked if output_mode is \"hdf5\"")) matrix_assembly = Nested( matrix_assembly, description=("options that control which correspondences are" " included in the matrix equation and their weights")) regularization = Nested( regularization, description=("options that contol the regularization of different" " types of variables in the solve")) transform_apply = List( Int, default=[], missing=[], description=("tilespec.tforms[i].tform() for i in transform_apply " "will be performed on the matches before matrix " "assembly.")) @mm.post_load def validate_data(self, data): if (data['regularization']['poly_factors'] is not None) & \ (data['transformation'] == 'Polynomial2DTransform'): n = len(data['regularization']['poly_factors']) if n != data['poly_order'] + 1: raise mm.ValidationError( "regularization.poly_factors must be a list" " of length poly_order + 1")
class RemapZsParameters(StackTransitionParameters): remap_sectionId = Boolean(required=False) new_zValues = List(Int, required=True)
class ManualSweepState(DefaultSchema): sweep_number = Integer(description="sweep number", required=True) passed = Boolean(description="manual override state", required=True)
class FxSweepFeatures(SweepFeatures): passed = Boolean(description="qc passed or failed", required=True)
class EMA_Schema(ArgSchema): first_section = Int( required=True, description='first section for matrix assembly') last_section = Int( required=True, description='last section for matrix assembly') n_parallel_jobs = Int( default=4, required=False, description='number of parallel jobs that will run for assembly') solve_type = String( default='montage', required=False, description='Solve type options (montage, 3D) Default=montage') close_stack = Boolean( default=True, required=False, description='Close the output stack? - default - True') overwrite_zlayer = Boolean( default=True, required=False, description='delete section before import tilespecs?') profile_data_load = Boolean( default=False) transformation = String( default='AffineModel', validate=lambda x: x in [ 'AffineModel', 'SimilarityModel', 'Polynomial2DTransform', 'affine', 'rigid', 'affine_fullsize']) fullsize_transform = Boolean( default=False, description='use fullsize affine transform') poly_order = Int( default=3, required=False, description='order of polynomial transform') output_mode = String( default='hdf5') assemble_from_file = String( default='', description='fullpath to solution_input.h5') ingest_from_file = String( default='', description='fullpath to solution_output.h5') render_output = String( default='null', description=("/path/to/file, null (devnull), or " "stdout for where to redirect render output")) input_stack = Nested(stack) output_stack = Nested(stack) pointmatch = Nested(pointmatch) hdf5_options = Nested(hdf5_options) matrix_assembly = Nested(matrix_assembly) regularization = Nested(regularization) showtiming = Int( default=1, description='have the routine showhow long each process takes') log_level = String( default="INFO", description='logging level') @post_load def validate_data(self, data): if (data['regularization']['poly_factors'] is not None) & \ (data['transformation'] == 'Polynomial2DTransform'): n = len(data['regularization']['poly_factors']) if n != data['poly_order'] + 1: raise ValidationError( "regularization.poly_factors must be a list" " of length poly_order + 1")
class InputSchema(ArgSchema): # ============== Required fields ============== input_file = InputFile( required=True, description=('An h5 file containing ellipses fits for ' 'eye, pupil, and corneal reflections.') ) session_sync_file = InputFile( required=True, description=('An h5 file containing timestamps to synchronize ' 'eye tracking video frames with rest of ephys ' 'session events.') ) output_file = OutputFile( required=True, description=('Full save path of output h5 file that ' 'will be created by this module.') ) monitor_position_x_mm = Float(required=True, description=("Monitor center X position in " "'global' coordinates " "(millimeters).")) monitor_position_y_mm = Float(required=True, description=("Monitor center Y position in " "'global' coordinates " "(millimeters).")) monitor_position_z_mm = Float(required=True, description=("Monitor center Z position in " "'global' coordinates " "(millimeters).")) monitor_rotation_x_deg = Float(required=True, description="Monitor X rotation in degrees") monitor_rotation_y_deg = Float(required=True, description="Monitor Y rotation in degrees") monitor_rotation_z_deg = Float(required=True, description="Monitor Z rotation in degrees") camera_position_x_mm = Float(required=True, description=("Camera center X position in " "'global' coordinates " "(millimeters)")) camera_position_y_mm = Float(required=True, description=("Camera center Y position in " "'global' coordinates " "(millimeters)")) camera_position_z_mm = Float(required=True, description=("Camera center Z position in " "'global' coordinates " "(millimeters)")) camera_rotation_x_deg = Float(required=True, description="Camera X rotation in degrees") camera_rotation_y_deg = Float(required=True, description="Camera Y rotation in degrees") camera_rotation_z_deg = Float(required=True, description="Camera Z rotation in degrees") led_position_x_mm = Float(required=True, description=("LED X position in 'global' " "coordinates (millimeters)")) led_position_y_mm = Float(required=True, description=("LED Y position in 'global' " "coordinates (millimeters)")) led_position_z_mm = Float(required=True, description=("LED Z position in 'global' " "coordinates (millimeters)")) equipment = String(required=True, description=('String describing equipment setup used ' 'to acquire eye tracking videos.')) date_of_acquisition = String(required=True, description='Acquisition datetime string.') eye_video_file = InputFile(required=True, description=('Full path to raw eye video ' 'file (*.avi).')) # ============== Optional fields ============== eye_radius_cm = Float(default=0.1682, description=('Radius of tracked eye(s) in ' 'centimeters.')) cm_per_pixel = Float(default=(10.2 / 10000.0), description=('Centimeter per pixel conversion ' 'ratio.')) log_level = LogLevel(default='INFO', description='Set the logging level of the module.') truncate_timestamps = Boolean(default=True, description=('If True, truncate sync ' 'timestamps whenever unusually ' 'large gapes occur; ' 'Default=True'))
class MakeMontageScapeSectionStackParameters(OutputStackParameters): montage_stack = Str( required=True, metadata={'description': 'stack to make a downsample version of'}) image_directory = Str( required=True, metadata={'description': 'directory that stores the montage scapes'}) set_new_z = Boolean( required=False, default=False, missing=False, metadata={ 'description': 'set to assign new z values starting from 0 (default - False)' }) new_z_start = Int(required=False, default=0, missing=0, metadata={'description': 'new starting z index'}) remap_section_ids = Boolean( required=False, default=False, missing=False, metadata={ 'description': 'change section ids to new z values. default = False' }) imgformat = Str(required=False, default='tif', missing='tif', metadata={ 'description': 'image format of the montage scapes (default - tif)' }) scale = Float(required=True, metadata={'description': 'scale of montage scapes'}) apply_scale = Boolean( required=False, default=False, missing=False, metadata={ 'description': 'Do you want to scale the downsample to the size of section? Default = False' }) doFilter = Boolean( required=False, default=True, description=("whether to apply default filtering when generating " "missing downsamples")) level = Int( required=False, default=1, description=( "integer mipMapLevel used to generate missing downsamples")) fillWithNoise = Boolean( required=False, default=False, description=("Whether to fill the background pixels with noise when " "generating missing downsamples")) memGB_materialize = Str( required=False, default='12G', description=("Java heap size in GB for materialization")) pool_size_materialize = Int( required=False, default=1, description=("number of processes to generate missing downsamples")) filterListName = Str( required=False, description=("Apply specified filter list to all renderings")) uuid_prefix = Boolean( required=False, default=True, description=( "Prepend uuid to generated tileIds to prevent collisions")) uuid_length = Int( required=False, default=10, description=("length of uuid4 string used in uuid prefix")) @post_load def validate_data(self, data): if data['set_new_z'] and data['new_z_start'] < 0: raise ValidationError('new Z start cannot be less than zero') elif not data['set_new_z']: data['new_z_start'] = min(data['zValues']) # FIXME will be able to remove with render-python tweak if data.get('filterListName') is not None: warnings.warn( "filterListName not implemented -- will use default behavior", UserWarning)
class db_params(DefaultSchema): owner = String( default='', required=False, description='render or mongo owner') project = String( default='', required=False, description='render or mongo project') name = List( String, cli_as_single_argument=True, required=False, many=True, description='render or mongo collection name') host = String( required=False, description='render host') port = Int( default=8080, required=False, description='render port') mongo_host = String( default='em-131fs', required=False, description='mongodb host') mongo_port = Int( default=27017, required=False, description='mongodb port') mongo_userName = String( default='', required=False, description='mongo user name') mongo_authenticationDatabase = String( default='', required=False, description='mongo admin db') mongo_password = String( default='', required=False, description='mongo pwd') db_interface = String( default='mongo', validator=mm.validate.OneOf(['render', 'mongo', 'file']), description=("render: read or write via render\n" "mongo: read or write via pymongo\n" "file: read or write to file")) client_scripts = String( default=("/allen/aibs/pipeline/image_processing/" "volume_assembly/render-jars/production/scripts"), required=False, description='see renderapi.render.RenderClient') memGB = String( required=False, default='5G', description='see renderapi.render.RenderClient') validate_client = Boolean( required=False, default=False, description='see renderapi.render.RenderClient') @mm.pre_load def tolist(self, data): if 'name' in data: if not isinstance(data['name'], list): data['name'] = [data['name']]