Esempio n. 1
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = RAISE

    log_level = LogLevel(default='INFO',
                         description='set the logging level of the module')
    files = Nested(FileToCopy,
                   many=True,
                   required=True,
                   description='files to be copied')
    use_rsync = Boolean(
        default=True,
        description=
        'copy files using rsync rather than shutil (this is not likely to work if you are running windows!)'
    )
    hasher_key = String(
        default='sha256',
        validate=lambda st: st in available_hashers,
        allow_none=True,
        description=
        'select a hash function to compute over base64-encoded pre- and post-copy files'
    )
    raise_if_comparison_fails = Boolean(
        default=True,
        description='if a hash comparison fails, throw an error (vs. a warning)'
    )
    make_parent_dirs = Boolean(
        default=True,
        description='build missing parent directories for destination')
    chmod = Int(
        default=775,
        description=
        "destination files (and any created parents will have these permissions"
    )
Esempio n. 2
0
class SessionUploadInputSchema(ArgSchema, NonFileParameters):
    log_level = LogLevel(default='INFO',
                         description='set the logging level of the module')
    files = Nested(FileToCopy,
                   many=True,
                   required=True,
                   description='files to be copied')
Esempio n. 3
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = RAISE
    log_level = LogLevel(default='INFO', description='set the logging level of the module')
    rule = String(default='run', required=False)
    dockerfile = String(required=True, validate=check_read_access, description='Dockerfile for image')
    modelfile = String(required=True, validate=check_read_access, description='Zip file for model')
    video_input_file = String(required=True, validate=check_read_access, description='Eye tracking movie')
    ellipse_output_data_file = String(required=True, validate=check_write_access_overwrite, description='write outputs to here')
    ellipse_output_video_file = String(required=False, validate=check_write_access_overwrite, description='write outputs to here')
    points_output_video_file = String(required=False, validate=check_write_access_overwrite, description='write outputs to here')
Esempio n. 4
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = mm.RAISE
    log_level = LogLevel(default='INFO',
                         description='Logging level of the module')
    session_data = Nested(BehaviorSessionData,
                          required=True,
                          description='Data pertaining to a behavior session')
    output_path = String(required=True,
                         validate=check_write_access_overwrite,
                         description='Path of output.json to be written')
Esempio n. 5
0
class InputParameters(ArgSchema):
    class Meta:
        unknown = RAISE

    log_level = LogLevel(default='INFO',
                         description="set the logging level of the module")
    case = String(required=True,
                  validate=lambda s: s in VALID_CASES,
                  help='select a use case to run')
    sub_images = Nested(SubImage,
                        required=True,
                        many=True,
                        help='Sub images composing this image series')
    affine_params = List(
        Float,
        help='Parameters of affine image stack to reference space transform.')
    deformation_field_path = String(
        required=True,
        help=
        'Path to parameters of the deformable local transform from affine-transformed image stack to reference space transform.'
    )
    image_series_slice_spacing = Float(
        required=True,
        help='Distance (microns) between successive images in this series.')
    target_spacings = List(
        Float,
        required=True,
        help='For each volume produced, downsample to this isometric resolution'
    )
    reference_spacing = Nested(
        ReferenceSpacing,
        required=True,
        help='Native spacing of reference space (microns).')
    reference_dimensions = Nested(ReferenceDimensions,
                                  required=True,
                                  help='Native dimensions of reference space.')
    sub_image_count = Int(required=True, help='Expected number of sub images')
    grid_prefix = String(required=True, help='Write output grid files here')
    accumulator_prefix = String(
        required=True,
        help='If this run produces accumulators, write them here.')
    storage_directory = String(
        required=False,
        help='Storage directory for this image series. Not used')
    filter_bit = Int(
        default=None,
        allow_none=True,
        help=
        'if provided, signals that pixels with this bit high have passed the optional post-filter stage'
    )
    nprocesses = Int(default=8, help='spawn this many worker subprocesses')
    reduce_level = Int(
        default=0, help='power of two by which to downsample each input axis')
Esempio n. 6
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = RAISE

    log_level = LogLevel(default='INFO',
                         description='set the logging level of the module')
    session_data = Nested(
        SessionData,
        required=True,
        description='records of the individual probes used for this experiment'
    )
    output_path = String(required=True,
                         validate=check_write_access_overwrite,
                         description='write outputs to here')
Esempio n. 7
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = RAISE

    log_level = LogLevel(default='INFO',
                         description='set the logging level of the module')
    session_data = Nested(SessionData, required=True,
                          description='records of the individual probes '
                                      'used for this experiment')
    output_path = String(required=True, validate=check_write_access_overwrite,
                         description='write outputs to here')
    skip_eye_tracking = Boolean(
        required=True, default=False,
        description="Whether or not to skip processing eye tracking data. "
                    "If True, no eye tracking data will be written to NWB")
Esempio n. 8
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = RAISE

    log_level = LogLevel(default='INFO',
                         description='set the logging level of the module')
    motion_border = Nested(MotionBorder,
                           required=True,
                           description='border widths - pixels outside the '
                           'border are considered invalid')
    storage_directory = String(required=True,
                               description='used to set output directory')
    motion_corrected_stack = String(required=True,
                                    description='path to h5 file containing '
                                    'motion corrected image stack')
    rois = Nested(Roi,
                  many=True,
                  description='specifications of individual regions of '
                  'interest')
    log_0 = String(required=True,
                   description='path to motion correction output csv')  #
Esempio n. 9
0
class TraceExtractionInputSchema(ArgSchema):
    log_level = LogLevel(default='INFO',
                         description="set the logging level of the module")
    motion_border = Nested(
        MotionBorder,
        required=True,
        description=("border widths - pixels outside the border are "
                     "considered invalid"))
    storage_directory = OutputDir(required=True,
                                  description="used to set output directory")
    motion_corrected_stack = H5InputFile(
        required=True,
        description="path to h5 file containing motion corrected image stack")
    rois = Nested(
        ExtractROISchema,
        many=True,
        description="specifications of individual regions of interest")
    log_0 = InputFile(
        required=False,
        description=("path to motion correction output csv. "
                     "NOTE: not used, but provided by LIMS schema."))
Esempio n. 10
0
class InputSchema(ArgSchema):

    # ============== Required fields ==============
    input_file = InputFile(
        required=True,
        description=('An h5 file containing ellipses fits for '
                     'eye, pupil, and corneal reflections.'))

    session_sync_file = InputFile(
        required=True,
        description=('An h5 file containing timestamps to synchronize '
                     'eye tracking video frames with rest of ephys '
                     'session events.'))

    output_file = OutputFile(
        required=True,
        description=('Full save path of output h5 file that '
                     'will be created by this module.'))

    monitor_position_x_mm = Float(required=True,
                                  description=("Monitor center X position in "
                                               "'global' coordinates "
                                               "(millimeters)."))
    monitor_position_y_mm = Float(required=True,
                                  description=("Monitor center Y position in "
                                               "'global' coordinates "
                                               "(millimeters)."))
    monitor_position_z_mm = Float(required=True,
                                  description=("Monitor center Z position in "
                                               "'global' coordinates "
                                               "(millimeters)."))
    monitor_rotation_x_deg = Float(required=True,
                                   description="Monitor X rotation in degrees")
    monitor_rotation_y_deg = Float(required=True,
                                   description="Monitor Y rotation in degrees")
    monitor_rotation_z_deg = Float(required=True,
                                   description="Monitor Z rotation in degrees")
    camera_position_x_mm = Float(required=True,
                                 description=("Camera center X position in "
                                              "'global' coordinates "
                                              "(millimeters)"))
    camera_position_y_mm = Float(required=True,
                                 description=("Camera center Y position in "
                                              "'global' coordinates "
                                              "(millimeters)"))
    camera_position_z_mm = Float(required=True,
                                 description=("Camera center Z position in "
                                              "'global' coordinates "
                                              "(millimeters)"))
    camera_rotation_x_deg = Float(required=True,
                                  description="Camera X rotation in degrees")
    camera_rotation_y_deg = Float(required=True,
                                  description="Camera Y rotation in degrees")
    camera_rotation_z_deg = Float(required=True,
                                  description="Camera Z rotation in degrees")
    led_position_x_mm = Float(required=True,
                              description=("LED X position in 'global' "
                                           "coordinates (millimeters)"))
    led_position_y_mm = Float(required=True,
                              description=("LED Y position in 'global' "
                                           "coordinates (millimeters)"))
    led_position_z_mm = Float(required=True,
                              description=("LED Z position in 'global' "
                                           "coordinates (millimeters)"))
    equipment = String(required=True,
                       description=('String describing equipment setup used '
                                    'to acquire eye tracking videos.'))
    date_of_acquisition = String(required=True,
                                 description='Acquisition datetime string.')
    eye_video_file = InputFile(required=True,
                               description=('Full path to raw eye video '
                                            'file (*.avi).'))

    # ============== Optional fields ==============
    eye_radius_cm = Float(default=0.1682,
                          description=('Radius of tracked eye(s) in '
                                       'centimeters.'))
    cm_per_pixel = Float(default=(10.2 / 10000.0),
                         description=('Centimeter per pixel conversion '
                                      'ratio.'))
    log_level = LogLevel(default='INFO',
                         description='Set the logging level of the module.')
Esempio n. 11
0
class InputSchema(ArgSchema):
    class Meta:
        unknown = mm.RAISE

    log_level = LogLevel(default="INFO",
                         help="set the logging level of the module")
    output_path = String(
        required=True,
        validate=check_write_access,
        help="write outputs to here",
    )
    session_id = Int(required=True,
                     help="unique identifier for this ecephys session")
    session_start_time = DateTime(
        required=True,
        help="the date and time (iso8601) at which the session started",
    )
    stimulus_table_path = String(
        required=True,
        validate=check_read_access,
        help="path to stimulus table file",
    )
    invalid_epochs = Nested(InvalidEpoch,
                            many=True,
                            required=True,
                            help="epochs with invalid data")
    probes = Nested(
        Probe,
        many=True,
        required=True,
        help="records of the individual probes used for this experiment",
    )
    running_speed_path = String(
        required=True,
        help=
        "data collected about the running behavior of the experiment's subject",
    )
    session_sync_path = String(
        required=True,
        validate=check_read_access,
        help=
        "Path to an h5 experiment session sync file (*.sync). This file relates events from different acquisition modalities to one another in time."
    )
    eye_tracking_rig_geometry = Dict(
        required=True,
        help=
        "Mapping containing information about session rig geometry used for eye gaze mapping."
    )
    eye_dlc_ellipses_path = String(
        required=True,
        validate=check_read_access,
        help=
        "h5 filepath containing raw ellipse fits produced by Deep Lab Cuts of subject eye, pupil, and corneal reflections during experiment"
    )
    eye_gaze_mapping_path = String(
        required=False,
        allow_none=True,
        help=
        "h5 filepath containing eye gaze behavior of the experiment's subject")
    pool_size = Int(
        default=3,
        help="number of child processes used to write probewise lfp files")
    optotagging_table_path = String(
        required=False,
        validate=check_read_access,
        help=
        "file at this path contains information about the optogenetic stimulation applied during this "
    )
    session_metadata = Nested(
        SessionMetadata,
        allow_none=True,
        required=False,
        help="miscellaneous information describing this session")