def main(): # the values for ns_builder are auto-generated from your cookiecutter inputs ns_builder = NamespaceBuilder(doc='General framework for storing nonrigid motion correction of optical imaging in ' 'NWB:N 2.0', name='ndx-nonrigid-motion-correction', version='0.1.0', author='Ben Dichter', contact='*****@*****.**') # see https://pynwb.readthedocs.io/en/latest/extensions.html#extending-nwb for more information nmc = NWBGroupSpec( neurodata_type_def='NonrigidMotionCorrection', neurodata_type_inc='NWBDataInterface', doc='General framework for storing nonrigid motion correction of optical imaging in NWB:N 2.0', ) # I think it may make more sense to store pixel_map as an x*y*z array and convert to your form nmc.add_dataset(name='pixel_map', dtype='int', shape=((None, None), (None, None, None)), dims=(('x', 'y'), ('x', 'y', 'z')), doc="A mapping function of pixels onto basis variables. To be fully general, this is a sparse " "matrix (AxBxC) (number of voxels in each of the spatial dimensions). In standard " "blockwise motion correction, this matrix is a matrix of ones. In CNMF's patches, this matrix " "is a labels each non-overlapping patch with a unique int") nmc.add_dataset(name='data', dtype='float', shape=(None, None), dims=('time', 'nblocks * ndims'), doc="A set of (NxM) basis variables where N is the number of blocks, M is the number of spatial " "dimensions (1 to 3) each defined for a set of times T (for the number of time-steps). In " "standard block-wise motion correction, N = 1, in line-by-line motion correction, N = number of" " lines, in CNMF's non-rigid motion correction, N = number of patches") nmc.add_attribute(name='interpolation_type', dtype='text', doc="the relationship between expected shifts and the estimated movie value. This will frequently" " be either nearest neighbor (common), 1 pixel local interpolation (most common), or some " "other interpolation function (least common, such as MATLAB's 3d gridded interpolation)", default_value='local_interpolation') nmc.add_attribute(name='spatial_dimensions', dtype='int', shape=((2,), (3,)), dims=('x, y', 'x, y, z'), doc='number of pixels in each spatial dimension') nmc.add_attribute(name='nblocks', dtype='int', doc='number of blocks') new_data_types = [nmc] ns_builder.include_type('TimeSeries', namespace='core') export_spec(ns_builder, new_data_types)
def main(): ns_builder = NamespaceBuilder( doc="""HDMF extensions for storing hierarchical behavioral data""", name="""ndx-hierarchical-behavioral-data""", version="""0.1.1""", author=list(map(str.strip, """Ben Dichter""".split(','))), contact=list( map(str.strip, """*****@*****.**""".split(',')))) # TODO: specify the neurodata_types that are used by the extension as well # as in which namespace they are found # this is similar to specifying the Python modules that need to be imported # to use your new data types # as of HDMF 1.6.1, the full ancestry of the neurodata_types that are used by # the extension should be included, i.e., the neurodata_type and its parent # type and its parent type and so on. this will be addressed in a future # release of HDMF. ns_builder.include_type('TimeIntervals', namespace='core') ns_builder.include_type('DynamicTableRegion', namespace='core') ns_builder.include_type('VectorData', namespace='core') behav_table = NWBGroupSpec( neurodata_type_def='HierarchicalBehavioralTable', neurodata_type_inc='TimeIntervals', doc='DynamicTable that holds hierarchical behavioral information.') behav_table.add_dataset(name='label', neurodata_type_inc='VectorData', doc='The label associated with each item', dtype='text') next_tier = behav_table.add_dataset( name='next_tier', neurodata_type_inc='DynamicTableRegion', doc='reference to the next tier', ) next_tier.add_attribute(name='table', dtype=RefSpec(target_type='TimeIntervals', reftype='object'), doc='reference to the next level') behav_table.add_dataset( name='next_tier_index', neurodata_type_inc='VectorIndex', doc='Index dataset for next tier.', ) new_data_types = [behav_table] # export the spec to yaml files in the spec folder output_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)
def main(): # these arguments were auto-generated from your cookiecutter inputs ns_builder = NWBNamespaceBuilder( doc='NWB extension for survey/behavioral data', name='ndx-survey-data', version='0.2.0', author=list(map(str.strip, 'Ben Dichter, Armin Najarpour Foroushani'.split(','))), contact=list(map(str.strip, '*****@*****.**'.split(','))) ) for type_name in ('DynamicTable', 'VectorData'): ns_builder.include_type(type_name, namespace='core') survey_data = NWBGroupSpec( doc='Table that holds information about the survey/behavior', neurodata_type_def='SurveyTable', neurodata_type_inc='DynamicTable', default_name='survey_data' ) question_response = NWBDatasetSpec( doc='Column that holds information about a question', neurodata_type_def='QuestionResponse', neurodata_type_inc='VectorData', default_name='question_response', attributes=[NWBAttributeSpec(name='options', doc='Response Options', dtype='text', shape=(None,), dims=('num_options',))] ) survey_data.add_dataset( neurodata_type_inc='VectorData', doc='UNIX time of survey response', name='unix_timestamp', dtype='int', shape=(None,), dims=('num_responses',) ) new_data_types = [survey_data, question_response] # export the spec to yaml files in the spec folder output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)
def main(): ns_builder = NWBNamespaceBuilder( doc='type for storing time-varying 3D point clouds', name='ndx-pointcloudseries', version='0.0.1', author='Luiz Tauffer and Ben Dichter', contact='*****@*****.**') PointCloudSeries = NWBGroupSpec( doc='type for storing time-varying 3D point clouds', neurodata_type_def='PointCloudSeries', neurodata_type_inc='TimeSeries', ) PointCloudSeries.add_dataset(name='point_cloud', neurodata_type_inc='VectorData', doc='datapoints locations over time', dims=('time', '[x, y, z]'), shape=(None, 3), dtype='float', quantity='?') PointCloudSeries.add_dataset(name='point_cloud_index', neurodata_type_inc='VectorIndex', doc='datapoints indices', dims=('index', ), shape=(None), quantity='?') PointCloudSeries.add_dataset(name='color', neurodata_type_inc='VectorData', doc='datapoints color', dims=('time', '[r, g, b]'), shape=(None, 3), dtype='float', quantity='?') PointCloudSeries.add_dataset(name='color_index', neurodata_type_inc='VectorIndex', doc='datapoints colors indices', dims=('index', ), shape=(None), quantity='?') new_data_types = [PointCloudSeries] ns_builder.include_type('TimeSeries', namespace='core') ns_builder.include_type('VectorData', namespace='core') ns_builder.include_type('VectorIndex', namespace='core') export_spec(ns_builder, new_data_types)
def main(): ns_builder = NWBNamespaceBuilder(doc='data type for holding power or phase spectra for a signal', name='ndx-spectrum', version='0.2.2', author='Ben Dichter', contact='*****@*****.**') ns_builder.include_type('NWBDataInterface', namespace='core') ns_builder.include_type('TimeSeries', namespace='core') ns_builder.include_type('DynamicTableRegion', namespace='core') Spectrum = NWBGroupSpec( neurodata_type_def='Spectrum', neurodata_type_inc='NWBDataInterface', doc='type for storing power or phase of spectrum') for data_name in ('power', 'phase'): Spectrum.add_dataset( name=data_name, doc='spectrum values', dims=(('frequency',), ('frequency', 'channel')), shape=((None,), (None, None)), dtype='float', quantity='?') Spectrum.add_dataset(name='frequencies', doc='frequencies of spectrum', dims=('frequency',), shape=(None,), dtype='float') Spectrum.add_link(target_type='TimeSeries', doc='timeseries that this spectrum describes', quantity='?', name='source_timeseries') Spectrum.add_dataset(name='electrodes', doc='the electrodes that this series was generated from', neurodata_type_inc='DynamicTableRegion', quantity='?') new_data_types = [Spectrum] # export the spec to yaml files in the spec folder output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)
def main(): # these arguments were auto-generated from your cookiecutter inputs ns_builder = NWBNamespaceBuilder( doc='An NWB:N extension for storing bipolar schema', name='ndx-bipolar-scheme', version='0.3.0', author=list( map(str.strip, 'Ben Dichter,Armin Najarpour,Ryan Ly'.split(','))), contact=list(map(str.strip, '*****@*****.**'.split(',')))) for type_name in ('LabMetaData', 'DynamicTableRegion', 'DynamicTable', 'VectorIndex'): ns_builder.include_type(type_name, namespace='core') ecephys_ext = NWBGroupSpec( doc= 'Group that holds proposed extracellular electrophysiology extensions.', neurodata_type_def='EcephysExt', neurodata_type_inc='LabMetaData', default_name='ecephys_ext', groups=[ NWBGroupSpec(name='bipolar_scheme_table', neurodata_type_inc='BipolarSchemeTable', doc='Bipolar referencing scheme used', quantity='?') ]) bipolar_scheme = NWBGroupSpec( doc='Table that holds information about the bipolar scheme used', neurodata_type_def='BipolarSchemeTable', neurodata_type_inc='DynamicTable', default_name='bipolar_scheme') bipolar_scheme.add_dataset(name='anodes', neurodata_type_inc='DynamicTableRegion', doc='references the electrodes table', dims=('num_electrodes', ), shape=(None, ), dtype='int') bipolar_scheme.add_dataset(name='cathodes', neurodata_type_inc='DynamicTableRegion', doc='references the electrodes table', dims=('num_electrodes', ), shape=(None, ), dtype='int') bipolar_scheme.add_dataset(name='anodes_index', neurodata_type_inc='VectorIndex', doc='Indices for the anode table', dims=('num_electrode_grp', ), shape=(None, )) bipolar_scheme.add_dataset(name='cathodes_index', neurodata_type_inc='VectorIndex', doc='Indices for the cathode table', dims=('num_electrode_grp', ), shape=(None, )) new_data_types = [ecephys_ext, bipolar_scheme] # export the spec to yaml files in the spec folder output_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)
def main(): ns_builder = NWBNamespaceBuilder( doc='nwb extention for voltage imaging technique called TEMPO', name=name, version='0.1.0', author=list(map(str.strip, 'Saksham Sharda'.split(','))), contact=list(map(str.strip, '*****@*****.**'.split(','))) ) ns_builder.include_type('VectorData', namespace='hdmf-common') ns_builder.include_type('DynamicTable', namespace='hdmf-common') ns_builder.include_type('Subject', namespace='core') ns_builder.include_type('NWBDataInterface', namespace='core') ns_builder.include_type('NWBContainer', namespace='core') ns_builder.include_type('Device', namespace='core') measurement = NWBDatasetSpec('Flexible vectordataset with a custom unit/conversion/resolution' ' field similar to timeseries.data', attributes=[ NWBAttributeSpec('unit', 'The base unit of measure used to store data. This should be in the SI unit.' 'COMMENT: This is the SI unit (when appropriate) of the stored data, such as ' 'Volts. If the actual data is stored in millivolts, the field ''conversion'' ' 'below describes how to convert the data to the specified SI unit.', 'text'), NWBAttributeSpec('conversion', 'Scalar to multiply each element in ' 'data to convert it to the specified unit', 'float32', required=False, default_value=1.0), NWBAttributeSpec('resolution', 'Smallest meaningful difference between values in data, stored in the specified ' 'by unit. COMMENT: E.g., the change in value of the least significant bit, or ' 'a larger number if signal noise is known to be present. If unknown, use -1.0', 'float32', required=False, default_value=0.0) ], neurodata_type_def='Measurement', neurodata_type_inc='VectorData', ) # Typedef for laserline laserline_device = NWBGroupSpec(neurodata_type_def='LaserLine', neurodata_type_inc='Device', doc='description of laserline device, part for a TEMPO device', attributes=[ NWBAttributeSpec('reference', 'reference of the laserline module', dtype='text', required=False) ], quantity='*') laserline_device.add_dataset( name='analog_modulation_frequency', neurodata_type_inc=measurement, doc='analog_modulation_frequency of the laserline module', shape=(1,), dtype='text', quantity='?' ) laserline_device.add_dataset( name='power', neurodata_type_inc=measurement, doc='power of the laserline module', shape=(1,), dtype='float', quantity='?' ) laserline_devices = NWBGroupSpec(neurodata_type_def='LaserLineDevices', neurodata_type_inc='NWBDataInterface', name='laserline_devices', doc='A container for dynamic addition of LaserLine devices', quantity='?', groups=[laserline_device]) # Typedef for PhotoDetector photodetector_device = NWBGroupSpec(neurodata_type_def='PhotoDetector', neurodata_type_inc='Device', doc='description of photodetector device, part for a TEMPO device', attributes=[ NWBAttributeSpec('reference', 'reference of the photodetector module', dtype='text', required=False) ], quantity='*') photodetector_device.add_dataset( name='gain', neurodata_type_inc=measurement, doc='gain of the photodetector module', shape=(1,), dtype='float', quantity='?' ) photodetector_device.add_dataset( name='bandwidth', neurodata_type_inc=measurement, doc='bandwidth metadata of the photodetector module', shape=(1,), dtype='float', quantity='?' ) photodetector_devices = NWBGroupSpec(neurodata_type_def='PhotoDetectorDevices', neurodata_type_inc='NWBDataInterface', name='photodetector_devices', doc='A container for dynamic addition of PhotoDetector devices', quantity='?', groups=[photodetector_device]) # Typedef for LockInAmplifier lockinamp_device = NWBGroupSpec(neurodata_type_def='LockInAmplifier', neurodata_type_inc='DynamicTable', doc='description of lock_in_amp device, part for a TEMPO device', attributes=[ NWBAttributeSpec('demodulation_filter_order', 'demodulation_filter_order of the lockinamp_device module', dtype='float', required=False, default_value=-1), NWBAttributeSpec('reference', 'reference of the lockinamp_device module', dtype='text', required=False) ], quantity='*') lockinamp_device.add_dataset( name='demod_bandwidth', neurodata_type_inc=measurement, doc='demod_bandwidth of lock_in_amp', shape=(1,), dtype='float', quantity='?' ) lockinamp_device.add_dataset( name='channel_name', neurodata_type_inc='VectorData', doc='name of the channel of lock_in_amp', dims=('no_of_channels',), shape=(None,), dtype='text', quantity='?' ) lockinamp_device.add_dataset( name='offset', neurodata_type_inc=measurement, doc='offset for channel of lock_in_amp', dims=('no_of_channels',), shape=(None,), dtype='float', quantity='?' ) lockinamp_device.add_dataset( name='gain', neurodata_type_inc='VectorData', doc='gain for channel of lock_in_amp', dims=('no_of_channels',), shape=(None,), dtype='float', quantity='?' ) lockinamp_devices = NWBGroupSpec(neurodata_type_def='LockInAmplifierDevices', neurodata_type_inc='NWBDataInterface', name='lockinamp_devices', doc='A container for dynamic addition of LockInAmplifier devices', quantity='?', groups=[lockinamp_device]) tempo_device = NWBGroupSpec(neurodata_type_def='TEMPO', neurodata_type_inc='Device', doc='datatype for a TEMPO device', attributes=[NWBAttributeSpec( name='no_of_modules', doc='the number of electronic modules with this acquisition system', dtype='int', required=False, default_value=3)], groups=[laserline_devices, photodetector_devices, lockinamp_devices] ) # surgical meta-data specification: surgery = NWBGroupSpec(neurodata_type_def='Surgery', neurodata_type_inc='Subject', doc='Surgery related meta-data of subject', name='surgery_data', attributes=[NWBAttributeSpec( name='surgery_date', doc='date of surgery', dtype='text', required=False), NWBAttributeSpec( name='surgery_notes', doc='surgery notes', dtype='text', required=False), NWBAttributeSpec( name='surgery_pharmacology', doc='pharmacology data', dtype='text', required=False), NWBAttributeSpec( name='surgery_arget_anatomy', doc='target anatomy of the surgery', dtype='text', required=False)], groups=[ NWBGroupSpec( name='implantation', doc='implantation related data', links=[NWBLinkSpec(name='implantation_device', doc='device implanted during surgery', target_type='Device')], attributes=[NWBAttributeSpec( name='ophys_implant_name', doc='optical physiology implant name', dtype='text', required=False), NWBAttributeSpec( name='ephys_implant_name', doc='electrophysiology implant name', dtype='text', required=False)], quantity='?'), NWBGroupSpec( name='virus_injection', doc='virus injection related data', attributes=[NWBAttributeSpec( name='virus_injection_id', doc='id of virus injected', dtype='text', required=False), NWBAttributeSpec( name='virus_injection_opsin', doc='opsin/protein used', dtype='text', required=False), NWBAttributeSpec( name='virus_injection_opsin_l_r', doc='opsin/protein left/right description' 'enter \'L\' or \'R\'', dtype='text', required=False, default_value='L/R'), NWBAttributeSpec( name='virus_injection_scheme', doc='description of injection scheme eg.' '\'single_bolus\'', dtype='text', required=False), NWBAttributeSpec( name='virus_injection_tag', doc='tag for the virus injected', dtype='text', required=False), NWBAttributeSpec( name='virus_injection_coordinates_description', doc='description of coordinates' '\'AP\'/\'ML\'/\'DV\'', dtype='text', required=False), NWBAttributeSpec( name='virus_injection_volume', doc='volume of virus injected in ml', dtype='float', required=False, default_value=-1.0)], datasets=[NWBDatasetSpec( name='virus_injection_coordinates', doc='coordinates of virus injection', dtype='text', quantity='?')], quantity='?'), NWBGroupSpec( name='ophys_injection', doc='optical physiology fluorescence injection metadata', attributes=[NWBAttributeSpec( name='ophys_injection_date', doc='date of fluorscent protein injection', dtype='text', required=False), NWBAttributeSpec( name='ophys_injection_volume', doc='volume of fluorscent protein injected', dtype='float', required=False), NWBAttributeSpec( name='ophys_injection_brain_area', doc='brain area of fluorscent protein injection', dtype='text', required=False) ], datasets=[NWBDatasetSpec( name='ophys_injection_flr_protein_data', doc='fluorescence protein name and concentration table', neurodata_type_inc='DynamicTable') ], quantity='?') ], quantity='?') subject = NWBGroupSpec( neurodata_type_def='SubjectComplete', neurodata_type_inc='Surgery', doc='Mouse metadata used with the TEMPO device', attributes=[NWBAttributeSpec( name='sacrificial_date', doc='sacrificial date of the animal ', dtype='text', required=False), NWBAttributeSpec( name='strain', doc='strain of the animal', dtype='text', required=False)], ) new_data_types = [measurement, tempo_device, surgery, subject] export_spec(ns_builder, new_data_types)
def main(): ns_builder = NWBNamespaceBuilder( doc= 'Holds structures for recording data from multiple compartments of multiple ' 'neurons in a single TimeSeries', name='ndx-simulation-output', version='0.2.2', author='Ben Dichter', contact='*****@*****.**') Compartments = NWBGroupSpec( default_name='compartments', neurodata_type_def='Compartments', neurodata_type_inc='DynamicTable', doc='table that holds information about what places are being recorded' ) Compartments.add_dataset( name='number', neurodata_type_inc='VectorData', dtype='int', doc='cell compartment ids corresponding to a each column in the data') Compartments.add_dataset(name='number_index', neurodata_type_inc='VectorIndex', doc='maps cell to compartments', quantity='?') Compartments.add_dataset( name='position', neurodata_type_inc='VectorData', dtype='float', quantity='?', doc= 'position of recording within a compartment. 0 is close to soma, 1 is other end' ) Compartments.add_dataset(name='position_index', neurodata_type_inc='VectorIndex', doc='indexes position', quantity='?') Compartments.add_dataset(name='label', neurodata_type_inc='VectorData', doc='labels for compartments', dtype='text', quantity='?') Compartments.add_dataset(name='label_index', neurodata_type_inc='VectorIndex', doc='indexes label', quantity='?') CompartmentsSeries = NWBGroupSpec( neurodata_type_def='CompartmentSeries', neurodata_type_inc='TimeSeries', doc='Stores continuous data from cell compartments') CompartmentsSeries.add_link( name='compartments', target_type='Compartments', quantity='?', doc='meta-data about compartments in this CompartmentSeries') SimulationMetaData = NWBGroupSpec( name='simulation', neurodata_type_def='SimulationMetaData', neurodata_type_inc='LabMetaData', doc='group that holds metadata for simulation') SimulationMetaData.add_group( name='compartments', neurodata_type_inc='Compartments', doc='table that holds information about what places are being recorded' ) SimulationMetaData.add_attribute( name='help', dtype='text', doc='help', value='container for simulation meta-data that goes in /general') new_data_types = [Compartments, CompartmentsSeries, SimulationMetaData] types_to_include = [ 'TimeSeries', 'VectorData', 'VectorIndex', 'DynamicTable', 'LabMetaData' ] for ndtype in types_to_include: ns_builder.include_type(ndtype, namespace='core') export_spec(ns_builder, new_data_types)
def main(): # these arguments were auto-generated from your cookiecutter inputs ns_builder = NWBNamespaceBuilder( doc='NWB extension for survey/behavioral data', name='ndx-survey-data', version='0.1.0', author=list(map(str.strip, 'Ben Dichter, Armin Najarpour Foroushani'.split(','))), contact=list(map(str.strip, '*****@*****.**'.split(','))) ) for type_name in ('DynamicTable', 'VectorData'): ns_builder.include_type(type_name, namespace='core') survey_data = NWBGroupSpec( doc='Table that holds information about the survey/behavior', neurodata_type_def='SurveyTable', neurodata_type_inc='DynamicTable', default_name='survey_data' ) question_response = NWBDatasetSpec( doc='Column that holds information about a question', neurodata_type_def='QuestionResponse', neurodata_type_inc='VectorData', default_name='question_response', attributes=[NWBAttributeSpec(name='options', doc='Response Options', dtype='text')] ) survey_data.add_dataset( name='nrs_pain_intensity_rating', neurodata_type_inc=question_response, doc='NRS Pain Intensity Rating', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='nrs_pain_relief_rating', neurodata_type_inc=question_response, doc='NRS Pain Relief Rating', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='nrs_relative_pain_intensity_rating', neurodata_type_inc=question_response, doc='NRS Relative Pain Intensity Rating', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='nrs_pain_unpleasantness', neurodata_type_inc=question_response, doc='NRS Pain Unpleasantness', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='vas_pain_intensity_rating', neurodata_type_inc=question_response, doc='VAS Pain Intensity Rating', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='vas_pain_relief_rating', neurodata_type_inc=question_response, doc='VAS Pain Relief Rating', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='vas_relative_pain_intensity_rating', neurodata_type_inc=question_response, doc='VAS Relative Pain Intensity Rating', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='vas_pain_unpleasantness', neurodata_type_inc=question_response, doc='VAS Pain Unpleasantness', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='throbbing', neurodata_type_inc=question_response, doc='Throbbing', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='shooting', neurodata_type_inc=question_response, doc='Shooting', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='stabbing', neurodata_type_inc=question_response, doc='Stabbing', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='sharp', neurodata_type_inc=question_response, doc='Sharp', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='cramping', neurodata_type_inc=question_response, doc='Cramping', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='gnawing', neurodata_type_inc=question_response, doc='Gnawing', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='hot_burning', neurodata_type_inc=question_response, doc='Hot-burning', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='aching', neurodata_type_inc=question_response, doc='Aching', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='heavy', neurodata_type_inc=question_response, doc='Heavy', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='tender', neurodata_type_inc=question_response, doc='Tender', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='splitting', neurodata_type_inc=question_response, doc='Splitting', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='tiring_exhausting', neurodata_type_inc=question_response, doc='Tiring-Exhausting', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='sickening', neurodata_type_inc=question_response, doc='Sickening', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='fearful', neurodata_type_inc=question_response, doc='Fearful', dims=('num_samples',), shape=(None,), dtype='text' ) survey_data.add_dataset( name='cruel_punishing', neurodata_type_inc=question_response, doc='Cruel-Punishing', dims=('num_samples',), shape=(None,), dtype='text' ) new_data_types = [survey_data, question_response] # export the spec to yaml files in the spec folder output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)
def main(): # these arguments were auto-generated from your cookiecutter inputs ns_builder = NWBNamespaceBuilder(doc='Data types for recording data from multiple compartments of multiple ' 'neurons in a single TimeSeries.', name='ndx-simulation-output', version='0.2.6', author='Ben Dichter', contact='*****@*****.**') types_to_include = ['TimeSeries', 'VectorData', 'VectorIndex', 'DynamicTable', 'LabMetaData'] for ndtype in types_to_include: ns_builder.include_type(ndtype, namespace='core') Compartments = NWBGroupSpec(default_name='compartments', neurodata_type_def='Compartments', neurodata_type_inc='DynamicTable', doc='Table that holds information about what places are being recorded.') Compartments.add_dataset(name='number', neurodata_type_inc='VectorData', dtype='int', doc='Cell compartment ids corresponding to a each column in the data.') Compartments.add_dataset(name='number_index', neurodata_type_inc='VectorIndex', doc='Index that maps cell to compartments.', quantity='?') Compartments.add_dataset(name='position', neurodata_type_inc='VectorData', dtype='float', quantity='?', doc='Position of recording within a compartment. 0 is close to soma, 1 is other end.') Compartments.add_dataset(name='position_index', neurodata_type_inc='VectorIndex', doc='Index for position.', quantity='?') Compartments.add_dataset(name='label', neurodata_type_inc='VectorData', doc='Labels for compartments.', dtype='text', quantity='?') Compartments.add_dataset(name='label_index', neurodata_type_inc='VectorIndex', doc='indexes label', quantity='?') CompartmentsSeries = NWBGroupSpec(neurodata_type_def='CompartmentSeries', neurodata_type_inc='TimeSeries', doc='Stores continuous data from cell compartments') CompartmentsSeries.add_link(name='compartments', target_type='Compartments', quantity='?', doc='Metadata about compartments in this CompartmentSeries.') SimulationMetaData = NWBGroupSpec(name='simulation', neurodata_type_def='SimulationMetaData', neurodata_type_inc='LabMetaData', doc='Group that holds metadata for simulations.') SimulationMetaData.add_group(name='compartments', neurodata_type_inc='Compartments', doc='Table that holds information about what places are being recorded.') new_data_types = [Compartments, CompartmentsSeries, SimulationMetaData] # export the spec to yaml files in the spec folder output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)
def main(): # these arguments were auto-generated from your cookiecutter inputs ns_builder = NWBNamespaceBuilder( doc='stores electrical stimulation waveforms', name='ndx-electrical-stim', version='0.1.0', author=list(map(str.strip, 'Jessie R. Liu'.split(','))), contact=list(map(str.strip, '*****@*****.**'.split(',')))) # TODO: specify the neurodata_types that are used by the extension as well # as in which namespace they are found # this is similar to specifying the Python modules that need to be imported # to use your new data types ns_builder.include_type('TimeSeries', namespace='core') ns_builder.include_type('TimeIntervals', namespace='core') ns_builder.include_type('DynamicTableRegion', namespace='hdmf-common') ns_builder.include_type('VectorData', namespace='hdmf-common') # TODO: define your new data types # see https://pynwb.readthedocs.io/en/latest/extensions.html#extending-nwb # for more information stim_series = NWBGroupSpec( neurodata_type_def='StimSeries', neurodata_type_inc='TimeSeries', doc=('An extension of TimeSeries to include stimulation waveforms ' 'used during electrical stimulation.'), ) stim_series.add_dataset(name='bipolar_electrodes', neurodata_type_inc='DynamicTableRegion', doc='DynamicTableRegion pointer to the ' 'bipolar electrode pairs corresponding to the ' 'stimulation waveforms.') stim_table = NWBGroupSpec( neurodata_type_def='StimTable', neurodata_type_inc='TimeIntervals', doc=('An extension of TimeIntervals to hold parameters used for ' 'various stimulation events.'), ) stim_table.add_dataset(name='bipolar_pair', neurodata_type_inc='DynamicTableRegion', doc='DynamicTableRegion pointer to the ' 'bipolar electrode pair used for this ' 'stimulation event.') stim_table.add_dataset(name='frequency', neurodata_type_inc='VectorData', doc='Frequency of stimulation waveform, in Hz.') stim_table.add_dataset(name='amplitude', neurodata_type_inc='VectorData', doc='Amplitude of stimulation waveform, in Amps.') stim_table.add_dataset(name='pulse_width', neurodata_type_inc='VectorData', doc='Pulse width of stimulation waveform, ' 'in seconds/phase') new_data_types = [stim_series, stim_table] # export the spec to yaml files in the spec folder output_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', 'spec')) export_spec(ns_builder, new_data_types, output_dir)