class tPrimeParams(DefaultSchema):
    tPrime_path = InputDir(help='directory containing the TPrime executable.')
    sync_period = Float(default=1.0, help='Period of sync waveform (sec).')
    toStream_sync_params = String(
        required=False,
        default='SY=0,384,6,500',
        help=
        'string of CatGT params used to extract to stream sync edges, e.g. SY=0,384,6,500'
    )
    ni_sync_params = String(
        required=False,
        default='XA=0,1,3,500',
        help=
        'string of CatGT params used to extract NI sync edges, e.g. XA=0,1,3,500'
    )
    ni_ex_list = String(
        required=False,
        default='',
        help=
        'string of CatGT params used to extract edges from ni, e.g. XA=0,1,3,500'
    )
    im_ex_list = String(
        required=False,
        default='',
        help=
        'string of CatGT params used to extract edges from im streams, e.g. SY=0,384,6,500'
    )
    tPrime_3A = Boolean(required=False, default=False, help='is this 3A data?')
    toStream_path_3A = String(required=False,
                              help='full path to toStream edges file')
    fromStream_list_3A = List(
        String,
        required=False,
        help='list of full paths to fromStream edges files')
示例#2
0
class SessionMetadata(RaisingSchema):
    specimen_name = String(required=True)
    age_in_days = Float(required=True)
    full_genotype = String(required=True)
    strain = String(required=True)
    sex = String(required=True)
    stimulus_name = String(required=True)
示例#3
0
class Channel(RaisingSchema):
    @mm.pre_load
    def set_field_defaults(self, data, **kwargs):
        if data.get("filtering") is None:
            data["filtering"] = ("AP band: 500 Hz high-pass; "
                                 "LFP band: 1000 Hz low-pass")
        if data.get("manual_structure_acronym") is None:
            data["manual_structure_acronym"] = ""
        return data

    id = Int(required=True)
    probe_id = Int(required=True)
    valid_data = Boolean(required=True)
    local_index = Int(required=True)
    probe_vertical_position = Int(required=True)
    probe_horizontal_position = Int(required=True)
    manual_structure_id = Int(required=True, allow_none=True)
    manual_structure_acronym = String(required=True)
    anterior_posterior_ccf_coordinate = Float(allow_none=True)
    dorsal_ventral_ccf_coordinate = Float(allow_none=True)
    left_right_ccf_coordinate = Float(allow_none=True)
    impedence = Float(required=False, allow_none=True, default=None)
    filtering = String(required=False)

    @mm.post_load
    def set_impedence_default(self, data, **kwargs):
        # This must be a post_load operation as np.nan is not a valid
        # JSON format 'float' type for the Marshmallow `Float` field
        # (so validation fails if this is set at pre_load)
        if data.get("impedence") is None:
            data["impedence"] = np.nan
        return data
class KilosortParameters(DefaultSchema):

	Nfilt = Int(required=False, default=1024)
	Threshold = String(required=False, default="[4, 10, 10]")
	lam = String(required=False, default="[5, 20, 20]")
	IntitalizeTh = Int(required=False, default=-4)
	InitializeNfilt = Int(required=False, default=10000)
示例#5
0
class ProbeInputParameters(DefaultSchema):
    name = String(required=True, help="Identifier for this probe")
    sampling_rate = Float(
        required=True,
        help=
        "The sampling rate of the probe, in Hz, assessed on the probe clock.",
    )
    lfp_sampling_rate = Float(
        required=True,
        help="The sampling rate of the LFP collected on this probe.")
    start_index = Int(
        default=0,
        help="Sample index of probe recording start time. Defaults to 0.")
    barcode_channel_states_path = String(
        required=True,
        help=
        "Path to the channel states file. This file contains a 1-dimensional array whose axis is events and whose "
        "values indicate the state of the channel line (rising or falling) at that event.",
    )
    barcode_timestamps_path = String(
        required=True,
        help=
        "Path to the timestamps file. This file contains a 1-dimensional array whose axis is events and whose "
        "values indicate the sample on which each event was detected.",
    )
    mappable_timestamp_files = Nested(
        ProbeMappable,
        many=True,
        help=
        "Timestamps files for this probe. Describe the times (in probe samples) when e.g. lfp samples were taken or spike events occured",
    )
示例#6
0
class ProbeOutputParameters(DefaultSchema):
    name = String(required=True, help='Identifier for this probe.')
    csd_path = String(required=True,
                      help='Path to current source density file.')
    csd_channels = List(Int,
                        required=True,
                        help='LFP channels from which CSD was calculated.')
示例#7
0
class ProbeInputParameters(DefaultSchema):
    name = String(required=True, help='Identifier for this probe.')
    lfp_data_path = String(required=True,
                           help='Path to lfp data for this probe')
    lfp_timestamps_path = String(
        required=True, help="Path to aligned lfp timestamps for this probe.")
    surface_channel = Int(
        required=True, help='Estimate of surface (pia boundary) channel index')
    reference_channels = List(
        Int, many=True, help='Indices of reference channels for this probe')
    csd_output_path = String(required=True,
                             help='CSD output will be written here.')
    sampling_rate = Float(required=True,
                          help='sampling rate assessed on master clock')
    total_channels = Int(default=384,
                         help='Total channel count for this probe.')
    surface_channel_adjustment = Int(
        default=40,
        help=
        'Erring up in the surface channel estimate is less dangerous for the CSD calculation than erring down, so an adjustment is provided.'
    )
    spacing = Float(
        default=0.04,
        help=
        'distance (in millimiters) between lengthwise-adjacent rows of recording sites on this probe.'
    )
    phase = String(
        required=True,
        help=
        'The probe type (3a or PXI) which determines if channels need to be reordered'
    )
示例#8
0
class ProbeOutputParameters(DefaultSchema):
    name = String(required=True, help='Identifier for this probe.')
    lfp_data_path = String(required=True, help='Output subsampled data file.')
    lfp_timestamps_path = String(required=True,
                                 help='Timestamps for subsampled data.')
    lfp_channel_info_path = String(
        required=True, help='LFP channels from that was subsampled.')
示例#9
0
class CopiedFile(RaisingSchema):
    source = String(required=True, description='copied from here')
    destination = String(required=True, description='copied to here')
    key = String(required=False, description='passed from inputs')
    source_hash = List(Int,
                       required=False)  # int array vs bytes for JSONability
    destination_hash = List(Int, required=False)
示例#10
0
class InputParameters(ArgSchema):
    reconstructions = Nested(
        Reconstruction,
        description="The morphological reconstructions to be processed",
        required=True,
        many=True
    ) 
    heavy_output_path = OutputFile(
        description=(
            "features whose results are heavyweight data (e.g. the numpy "
            "arrays returned by layer histograms features) are stored here."
        ),
        required=True
    )
    feature_set = String(
        description="select the basic set of features to calculate",
        required=False,
        default="aibs_default"
    )
    only_marks = List(
        String,
        cli_as_single_argument=True,
        description=(
            "restrict calculated features to those with this set of marks"
        ), 
        required=False
    )
    required_marks = String(
        description=(
            "Error (vs. skip) if any of these marks fail validation"
        ), 
        required=False,
        many=True
    )
    output_table_path = OutputFile(
        description=(
            "this module writes outputs to a json specified as --output_json. "
            "If you want to store outputs in a different format "
            "(.csv is supported currently), specify this parameter"
        ),
        required=False
    )
    num_processes = Int(
        description=(
            "Run a multiprocessing pool with this many processes. "
            "Default is min(number of cpus, number of swcs). "
            "Setting num_processes to 1 will avoid a pool."
        ),
        required=False,
        default=None,
        allow_none=True
    )
    global_parameters = Nested(
        GlobalParameters, 
        description=(
            "provide additional configuration to this feature extraction run. "
            "This configuration will be applied to all morphologies processed."
        ), 
        required=False
    )
示例#11
0
class PiaWmStreamlineSchema(ArgSchema):
    """Arg Schema for run_pia_wm_streamlines"""

    pia_path_str = String(
        required=True,
        description='string alternating x, y coordinates outlining the pia')
    wm_path_str = String(
        required=True,
        description='string alternating x, y coordinates outlining the wm')
    soma_path_str = String(
        required=False,
        description='string alternating x, y coordinates outlining the soma. '
                    'If provided, streamlines will be translated so that '
                    'the origin is at the soma')
    resolution = Float(required=False,
                       default=1,
                       description='Resolution of pixels in microns')
    pia_fixed_value = Float(required=False,
                            default=1,
                            description='Fixed value pia boundary condition')
    wm_fixed_value = Float(required=False,
                           default=0,
                           description='Fixed value wm boundary condition')
    mesh_res = Int(required=False,
                   default=20,
                   description='Resolution for mesh for laplace solver')
    output_dir = OutputDir(required=True,
                           description='Directory to write xarray results')
示例#12
0
class FileToCopy(DefaultSchema):
    source = InputFile(required=True, description='copy from here')
    destination = String(
        required=True,
        description='copy to here (full path, not just directory!)')
    key = String(required=True,
                 description='will be passed through to outputs, allowing a '
                 'name or kind to be associated with this file')
class OutputParameters(OutputSchema): 

    execution_time = Float()
    kilosort_commit_hash = String()
    kilosort_commit_date = String()
    mask_channels = NumpyArray()
    nTemplate = Int()
    nTot = Int()
    
示例#14
0
class OutputSchema(DefaultSchema):
    input_parameters = Nested(
        InputParameters,
        description=("Input parameters the module "
                     "was run with"),
        required=True,
    )
    output_path = String(help="Path to output csv file")
    output_frame_times_path = String(help="output all frame times here")
示例#15
0
class StimulusInputParameters(DefaultSchema):
    stimulus_table_path = String(required=True, help='Path to stimulus table')
    key = String(required=True,
                 help='CSD is calculated from a specific stimulus, defined ('
                 'in part) by this key.')
    index = Int(default=None,
                allow_none=True,
                help='CSD is calculated from a specific stimulus, defined ('
                'in part) by this index.')
示例#16
0
class ExtractFromNpxParams(DefaultSchema):
    npx_directory = String(help='Path to NPX file(s) saved by Open Ephys')
    settings_xml = String(help='Path to settings.xml file saved by Open Ephys')
    npx_extractor_executable = String(
        help='Path to .exe file for NPX extraction (Windows only)')
    npx_extractor_repo = String(
        required=False,
        default='None',
        help='Path to local repository for NPX extractor')
示例#17
0
class S3LandingBucket(DefaultSchema):
    name = String(description="s3 landing bucket name or access point arn",
                  required=True)

    region = String(description="s3 landing bucket's region", required=True)

    credentials_file = String(
        description=
        "the INI config file path to the s3 landing bucket's credentials",
        required=False)
示例#18
0
class InputParameters(ArgSchema):
    opto_pickle_path = String(
        required=True, help='path to file containing optotagging information')
    sync_h5_path = String(
        required=True,
        help='path to h5 file containing syncronization information')
    output_opto_table_path = String(
        required=True,
        help='the optotagging stimulation table will be written here')
    conditions = Dict(String, Nested(Condition), default=known_conditions)
class EphysParams(DefaultSchema):
    sample_rate = Float(required=True, default=30000.0, help='Sample rate of Neuropixels AP band continuous data')
    lfp_sample_rate = Float(require=True, default=2500.0, help='Sample rate of Neuropixels LFP band continuous data')
    bit_volts = Float(required=True, default=0.195, help='Scalar required to convert int16 values into microvolts')
    num_channels = Int(required=True, default=384, help='Total number of channels in binary data files')
    reference_channels = NumpyArray(required=False, default=[36, 75, 112, 151, 188, 227, 264, 303, 340, 379], help='Reference channels on Neuropixels probe (numbering starts at 0)')
    template_zero_padding = Int(required=True, default=21, help='Zero-padding on templates output by Kilosort')
    vertical_site_spacing = Float(required=False, default=20e-6, help='Vertical site spacing in meters') 
    probe_type = String(required=False, default='3A', help='3A, 3B1, or 3B2')
    lfp_band_file = String(required=False, help='Location of LFP band binary file')
    ap_band_file = String(required=False, help='Location of AP band binary file')
示例#20
0
class OutputSchema(RaisingSchema):
    neuropil_trace_file = String(
        required=True,
        description='path to output h5 file containing neuropil traces'
    )  # TODO rename these to _path
    roi_trace_file = String(
        required=True,
        description='path to output h5 file containing roi traces')
    exclusion_labels = Nested(
        ExclusionLabel,
        many=True,
        description='a report of roi-wise problems detected during extraction')
示例#21
0
class InputParameters(ArgSchema):
    class Meta:
        unknown = RAISE

    log_level = LogLevel(default='INFO',
                         description="set the logging level of the module")
    case = String(required=True,
                  validate=lambda s: s in VALID_CASES,
                  help='select a use case to run')
    sub_images = Nested(SubImage,
                        required=True,
                        many=True,
                        help='Sub images composing this image series')
    affine_params = List(
        Float,
        help='Parameters of affine image stack to reference space transform.')
    deformation_field_path = String(
        required=True,
        help=
        'Path to parameters of the deformable local transform from affine-transformed image stack to reference space transform.'
    )
    image_series_slice_spacing = Float(
        required=True,
        help='Distance (microns) between successive images in this series.')
    target_spacings = List(
        Float,
        required=True,
        help='For each volume produced, downsample to this isometric resolution'
    )
    reference_spacing = Nested(
        ReferenceSpacing,
        required=True,
        help='Native spacing of reference space (microns).')
    reference_dimensions = Nested(ReferenceDimensions,
                                  required=True,
                                  help='Native dimensions of reference space.')
    sub_image_count = Int(required=True, help='Expected number of sub images')
    grid_prefix = String(required=True, help='Write output grid files here')
    accumulator_prefix = String(
        required=True,
        help='If this run produces accumulators, write them here.')
    storage_directory = String(
        required=False,
        help='Storage directory for this image series. Not used')
    filter_bit = Int(
        default=None,
        allow_none=True,
        help=
        'if provided, signals that pixels with this bit high have passed the optional post-filter stage'
    )
    nprocesses = Int(default=8, help='spawn this many worker subprocesses')
    reduce_level = Int(
        default=0, help='power of two by which to downsample each input axis')
示例#22
0
class SweepFeatures(DefaultSchema):
    stimulus_code = String(description="stimulus code", required=True)
    stimulus_name = String(
        description="index of sweep in order of presentation", required=True)
    stimulus_amplitude = Float(description="amplitude of stimulus",
                               required=True,
                               allow_none=True)
    sweep_number = Integer(
        description="index of sweep in order of presentation", required=True)
    stimulus_units = String(desription="stimulus units", required=True)
    bridge_balance_mohm = Float(description="bridge balance", allow_none=True)
    pre_vm_mv = Float(allow_none=True)
    leak_pa = Float(allow_none=True)
示例#23
0
class FileToCopy(RaisingSchema):
    source = String(required=True,
                    validate=check_read_access,
                    description='copy from here')
    destination = String(
        required=True,
        validate=check_write_access,
        description='copy to here (full path, not just directory!)')
    key = String(
        required=True,
        description=
        'will be passed through to outputs, allowing a name or kind to be associated with this file'
    )
示例#24
0
class InputParameters(ArgSchema):
    drifting_gratings = Nested(DriftingGratings)
    static_gratings = Nested(StaticGratings)
    natural_scenes = Nested(NaturalScenes)
    # natural_movies = Nested(NaturalMovies)
    dot_motion = Nested(DotMotion)
    # contrast_tuning = Nested(ContrastTuning)
    flashes = Nested(Flashes)
    receptive_field_mapping = Nested(ReceptiveFieldMapping)

    input_session_nwb = String(required=True,
                               help='Ecephys spiking nwb file for session')
    output_file = String(required=True, help='Location for saving output file')
示例#25
0
class db_params(DefaultSchema):
    owner = String(default='',
                   required=False,
                   description='render or mongo owner')
    project = String(default='',
                     required=False,
                     description='render or mongo project')
    name = List(String,
                cli_as_single_argument=True,
                required=False,
                many=True,
                description='render or mongo collection name')
    host = String(required=False, description='render host')
    port = Int(default=8080, required=False, description='render port')
    mongo_host = String(default='em-131fs',
                        required=False,
                        description='mongodb host')
    mongo_port = Int(default=27017, required=False, description='mongodb port')
    mongo_userName = String(default='',
                            required=False,
                            description='mongo user name')
    mongo_authenticationDatabase = String(default='',
                                          required=False,
                                          description='mongo admin db')
    mongo_password = String(default='',
                            required=False,
                            description='mongo pwd')
    db_interface = String(default='mongo',
                          validator=mm.validate.OneOf(
                              ['render', 'mongo', 'file']),
                          description=("render: read or write via render\n"
                                       "mongo: read or write via pymongo\n"
                                       "file: read or write to file"))
    client_scripts = String(
        default=("/allen/aibs/pipeline/image_processing/"
                 "volume_assembly/render-jars/production/scripts"),
        required=False,
        description='see renderapi.render.RenderClient')
    memGB = String(required=False,
                   default='5G',
                   description='see renderapi.render.RenderClient')
    validate_client = Boolean(required=False,
                              default=False,
                              description='see renderapi.render.RenderClient')

    @mm.pre_load
    def tolist(self, data):
        if 'name' in data:
            if not isinstance(data['name'], list):
                data['name'] = [data['name']]
示例#26
0
class db_params(ArgSchema):
    owner = String(
        default='',
        required=False,
        description='owner')
    project = String(
        default='',
        required=False,
        description='project')
    name = List(
        String,
        cli_as_single_argument=True,
        required=True,
        many=True,
        description='stack name')
    host = String(
        default=None,
        required=False,
        description='render host')
    port = Int(
        default=8080,
        required=False,
        description='render port')
    mongo_host = String(
        default='em-131fs',
        required=False,
        description='mongodb host')
    mongo_port = Int(
        default=27017,
        required=False,
        description='mongodb port')
    mongo_userName = String(
        default='',
        required=False,
        description='mongo user name')
    mongo_authenticationDatabase = String(
        default='',
        required=False,
        description='mongo admin db')
    mongo_password = String(
        default='',
        required=False,
        description='mongo pwd')
    db_interface = String(
        default='mongo')
    client_scripts = String(
        default=("/allen/aibs/pipeline/image_processing/"
                 "volume_assembly/render-jars/production/scripts"),
        required=False,
        description='render bin path')

    @pre_load
    def tolist(self, data):
        if not isinstance(data['name'], list):
            data['name'] = [data['name']]
示例#27
0
class InputParameters(ArgSchema):

    destination_bucket = Nested(
        S3LandingBucket,
        description=
        "s3 landing bucket info (bucket_name/access_point_arn and region)",
        required=True)

    neuron_reconstruction_id = Int(
        description="neuron reconstruction id",
        required=True,
    )

    specimen_id = Int(description="specimen id", required=True)

    primary_boundaries = Nested(PrimaryBoundaries,
                                description="primary boundaries",
                                required=True)

    swc_file = String(description="path to input swc (csv) file",
                      required=True)

    cell_depth = Float(description="cell depth",
                       required=True,
                       allow_none=True)

    cut_thickness = Float(description="cut thickness",
                          required=True,
                          allow_none=True)

    marker_file = String(description="path to input marker (csv) file",
                         required=True)

    ccf_soma_xyz = List(Float,
                        cli_as_single_argument=True,
                        description="soma location (x,y,z) coordinates in CCF",
                        required=True)

    slice_transform = List(
        Float,
        cli_as_single_argument=True,
        description='List defining the transform defining slice cut angle',
        required=True,
        allow_none=True,
    )

    slice_image_flip = Boolean(description=(
        'indicates whether the image was flipped relative '
        'to the slice (avg_group_label.name = \'Flip Slice Indicator\''),
                               required=True)
示例#28
0
class CatGTParams(DefaultSchema):
    run_name = String(required=True,
                      help='undecorated run name (no g or t indices')
    gate_string = String(required=True, default='0', help='gate string')
    trigger_string = String(
        required=True,
        default='0,0',
        help='string specifying trials to concatenate, e.g. 0,200')
    probe_string = String(required=True,
                          default='0',
                          help='string specifying probes, e.g. 0:3')
    stream_string = String(required=True,
                           default='-ap',
                           help='string specifying which streams to process')
    car_mode = String(
        required=False,
        default='None',
        help='Comaon average reference mode. Must = None, gbldmx, or loccar ')
    loccar_inner = Int(required=False,
                       default=2,
                       help='Inner radius for loccar in sites')
    loccar_outer = Int(required=False,
                       default=8,
                       help='Outer radius for loccar in sites')
    cmdStr = String(
        required=True,
        default='-prbfld -aphipass=300 -gbldmx -gfix=0.40,0.10,0.02',
        help='input stream filter, error correct and extract settings for CatGT'
    )
    extract_string = String(required=True,
                            default='',
                            help='extract edges from datastreams')
    catGTPath = InputDir(help='directory containing the CatGT executable.')
示例#29
0
class ProbeMappable(DefaultSchema):
    name = String(
        required=True,
        help='What kind of mappable data is this? e.g. "spike_timestamps"',
    )
    input_path = String(
        required=True,
        help=
        "Input path for this file. Should point to a file containing a 1D timestamps array with values in probe samples.",
    )
    output_path = String(
        required=True,
        help=
        "Output path for the mapped version of this file. Will write a 1D timestamps array with values in seconds on the master clock.",
    )
示例#30
0
class output_stack(db_params):
    output_file = OutputFile(
        required=False,
        missing=None,
        default=None,
        description=("json or json.gz serialization of input stack"
                     "ResolvedTiles."))
    compress_output = Boolean(
        required=False,
        default=True,
        missing=True,
        description=("if writing file, compress with gzip."))
    collection_type = String(default='stack',
                             description="'stack' or 'pointmatch'")
    use_rest = Boolean(
        default=False,
        description=("passed as kwarg to "
                     "renderapi.client.import_tilespecs_parallel"))

    @mm.post_load
    def validate_file(self, data):
        if data['db_interface'] == 'file':
            if data['output_file'] is None:
                raise mm.ValidationError("with db_interface 'file', "
                                         "'output_file' must be a file")

    @mm.post_load
    def validate_data(self, data):
        if 'name' in data:
            if len(data['name']) != 1:
                raise mm.ValidationError("only one input or output "
                                         "stack name is allowed")