def test_load_namespace_with_reftype_attribute(self): ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', self.prefix, version='0.1.0') test_ds_ext = NWBDatasetSpec(doc='test dataset to add an attr', name='test_data', shape=(None,), attributes=[NWBAttributeSpec(name='target_ds', doc='the target the dataset applies to', dtype=RefSpec('TimeSeries', 'object'))], neurodata_type_def='my_new_type') ns_builder.add_spec(self.ext_source, test_ds_ext) ns_builder.export(self.ns_path, outdir=self.tempdir) get_type_map(extensions=os.path.join(self.tempdir, self.ns_path))
def run_integration_tests(verbose=True): pynwb_test_result = run_test_suite("tests/integration/hdf5", "integration tests", verbose=verbose) test_cases = pynwb_test_result.get_all_cases_run() import pynwb type_map = pynwb.get_type_map() tested_containers = {} for test_case in test_cases: if not hasattr(test_case, 'container'): continue container_class = test_case.container.__class__ if container_class not in tested_containers: tested_containers[container_class] = [test_case._testMethodName] else: tested_containers[container_class].append( test_case._testMethodName) count_missing = 0 for container_class in type_map.get_container_classes('core'): if container_class not in tested_containers: count_missing += 1 if verbose > 1: logging.info( '%s missing test case; should define in %s' % (container_class, inspect.getfile(container_class))) if count_missing > 0: logging.info('%d classes missing integration tests in ui_write' % count_missing) else: logging.info('all classes have integration tests')
def test_load_namespace_with_reftype_attribute_check_autoclass_const(self): ns_builder = NWBNamespaceBuilder('Extension for use in my Lab', self.prefix) test_ds_ext = NWBDatasetSpec( doc='test dataset to add an attr', name='test_data', shape=(None, ), attributes=[ NWBAttributeSpec(name='target_ds', doc='the target the dataset applies to', dtype=RefSpec('TimeSeries', 'object')) ], neurodata_type_def='my_new_type') ns_builder.add_spec(self.ext_source, test_ds_ext) ns_builder.export(self.ns_path, outdir=self.tempdir) type_map = get_type_map( extensions=os.path.join(self.tempdir, self.ns_path)) my_new_type = type_map.get_container_cls(self.prefix, 'my_new_type') docval = None for tmp in get_docval(my_new_type.__init__): if tmp['name'] == 'target_ds': docval = tmp break self.assertIsNotNone(docval) self.assertEqual(docval['type'], TimeSeries)
def test_catch_dup_name(self): ns_builder1 = NWBNamespaceBuilder('Extension for us in my Lab', "pynwb_test_extension1") ext1 = NWBGroupSpec('A custom ElectricalSeries for my lab', attributes=[ NWBAttributeSpec(name='trode_id', doc='the tetrode id', dtype='int') ], neurodata_type_inc='ElectricalSeries', neurodata_type_def='TetrodeSeries') ns_builder1.add_spec(self.ext_source1, ext1) ns_builder1.export(self.ns_path1, outdir=self.tempdir) ns_builder2 = NWBNamespaceBuilder('Extension for us in my Lab', "pynwb_test_extension1") ext2 = NWBGroupSpec('A custom ElectricalSeries for my lab', attributes=[ NWBAttributeSpec(name='trode_id', doc='the tetrode id', dtype='int') ], neurodata_type_inc='ElectricalSeries', neurodata_type_def='TetrodeSeries') ns_builder2.add_spec(self.ext_source2, ext2) ns_builder2.export(self.ns_path2, outdir=self.tempdir) type_map = get_type_map( extensions=os.path.join(self.tempdir, self.ns_path1)) with self.assertWarnsRegex( UserWarning, r"ignoring namespace '\S+' because it already exists"): type_map.load_namespaces(os.path.join(self.tempdir, self.ns_path2))
def test_catch_dup_name(self): ns_builder1 = NWBNamespaceBuilder('Extension for us in my Lab', "pynwb_test_extension1", version='0.1.0') ext1 = NWBGroupSpec('A custom ElectricalSeries for my lab', attributes=[ NWBAttributeSpec(name='trode_id', doc='the tetrode id', dtype='int') ], neurodata_type_inc='ElectricalSeries', neurodata_type_def='TetrodeSeries') ns_builder1.add_spec(self.ext_source1, ext1) ns_builder1.export(self.ns_path1, outdir=self.tempdir) ns_builder2 = NWBNamespaceBuilder('Extension for us in my Lab', "pynwb_test_extension1", version='0.1.0') ext2 = NWBGroupSpec('A custom ElectricalSeries for my lab', attributes=[ NWBAttributeSpec(name='trode_id', doc='the tetrode id', dtype='int') ], neurodata_type_inc='ElectricalSeries', neurodata_type_def='TetrodeSeries') ns_builder2.add_spec(self.ext_source2, ext2) ns_builder2.export(self.ns_path2, outdir=self.tempdir) type_map = get_type_map( extensions=os.path.join(self.tempdir, self.ns_path1)) type_map.load_namespaces(os.path.join(self.tempdir, self.ns_path2))
def run_integration_tests(verbose=True): pynwb_test_result = run_test_suite("tests/integration", "integration tests", verbose=verbose) test_cases = pynwb_test_result.get_all_cases_run() import pynwb type_map = pynwb.get_type_map() tested_containers = {} required_tests = {} for test_case in test_cases: if not hasattr(test_case, 'container'): continue container_class = test_case.container.__class__ if container_class not in tested_containers: tested_containers[container_class] = [test_case._testMethodName] else: tested_containers[container_class].append( test_case._testMethodName) if container_class not in required_tests: required_tests[container_class] = list(test_case.required_tests) else: required_tests[container_class].extend(test_case.required_tests) count_missing = 0 for container_class in type_map.get_container_classes('core'): if container_class not in tested_containers: count_missing += 1 if verbose > 1: logging.info( '%s missing test case; should define in %s' % (container_class, inspect.getfile(container_class))) continue test_methods = tested_containers[container_class] required = required_tests[container_class] methods_missing = set(required) - set(test_methods) if methods_missing != set([]): count_missing += 1 if verbose > 1: logging.info( '%s missing test method(s) \"%s\"; should define in %s' % (container_class, ', '.join(methods_missing), inspect.getfile(container_class))) if count_missing > 0: logging.info('%d classes missing integration tests in ui_write' % count_missing) else: logging.info('all classes have integration tests')
def get_container( container_name: typing.Optional[str] = None ) -> typing.Union[typing.List[NWBContainer], NWBContainer]: """ Get and list pyNWB containers by name. If called with no arguments, returns all container objects. Otherwise return the container named ``'container_name'``. Eg. get ``pynwb.file.NWBFile`` by calling with ``'NWBFile'`` Args: container_name (str, None): if None, return all containers. Otherwise return container by name Returns: list of Containers, or Container itself. """ if container_name is None: # return all containers return get_type_map().get_container_classes() else: return get_type_map().get_container_cls('core', container_name)
def setUp(self): type_map = get_type_map() self.manager = BuildManager(type_map) self.path = "test_pynwb_io_hdf5.h5" self.start_time = datetime(1970, 1, 1, 12, 0, 0) self.create_date = datetime(2017, 4, 15, 12, 0, 0) self.ts_builder = GroupBuilder('test_timeseries', attributes={'ancestry': 'TimeSeries', 'source': 'example_source', 'neurodata_type': 'TimeSeries', 'help': 'General purpose TimeSeries'}, datasets={'data': DatasetBuilder('data', list(range(100, 200, 10)), attributes={'unit': 'SIunit', 'conversion': 1.0, 'resolution': 0.1}), 'timestamps': DatasetBuilder( 'timestamps', list(range(10)), attributes={'unit': 'Seconds', 'interval': 1})}) self.ts = TimeSeries('test_timeseries', 'example_source', list(range(100, 200, 10)), unit='SIunit', resolution=0.1, timestamps=list(range(10))) self.manager.prebuilt(self.ts, self.ts_builder) self.builder = GroupBuilder( 'root', groups={'acquisition': GroupBuilder('acquisition', groups={'timeseries': GroupBuilder('timeseries', groups={'test_timeseries': self.ts_builder}), 'images': GroupBuilder('images')}), 'analysis': GroupBuilder('analysis'), 'epochs': GroupBuilder('epochs'), 'general': GroupBuilder('general'), 'processing': GroupBuilder('processing'), 'stimulus': GroupBuilder( 'stimulus', groups={'presentation': GroupBuilder('presentation'), 'templates': GroupBuilder('templates')})}, datasets={'file_create_date': DatasetBuilder('file_create_date', [str(self.create_date)]), 'identifier': DatasetBuilder('identifier', 'TEST123'), 'session_description': DatasetBuilder('session_description', 'a test NWB File'), 'nwb_version': DatasetBuilder('nwb_version', '1.0.6'), 'session_start_time': DatasetBuilder('session_start_time', str(self.start_time))}, attributes={'neurodata_type': 'NWBFile'})
def __init__(self, **kwargs): path, mode, manager, extensions, load_namespaces, file_obj, comm =\ popargs('path', 'mode', 'manager', 'extensions', 'load_namespaces', 'file', 'comm', kwargs) # root group self.__rgroup = file_obj chunk_store = getattr(file_obj, 'chunk_store', None) if chunk_store is not None: try: filename = getattr(chunk_store.source, 'path', None) if filename is None: filename = chunk_store.source.name except: filename = None if filename is None: filename = f'{type(file_obj.store).__name__}' self.__rgroup.filename = filename file_obj = self.__set_rgroup(file_obj) self.__built = dict() # keep track of each builder for each dataset/group/link for each file self.__read = dict() # keep track of which files have been read. Key is the filename value is the builder self.__file = file_obj if load_namespaces: if manager is not None: warn("loading namespaces from file - ignoring 'manager'") if extensions is not None: warn("loading namespaces from file - ignoring 'extensions' argument") # namespaces are not loaded when creating an NWBZARRHDF5IO object in write mode if 'w' in mode or mode == 'x': raise ValueError("cannot load namespaces from file when writing to it") tm = get_type_map() self.load_namespaces(tm, path, file=file_obj) manager = BuildManager(tm) # XXX: Leaving this here in case we want to revert to this strategy for # loading cached namespaces # ns_catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace) # super(NWBZARRHDF5IO, self).load_namespaces(ns_catalog, path) # tm = TypeMap(ns_catalog) # tm.copy_mappers(get_type_map()) else: if manager is not None and extensions is not None: raise ValueError("'manager' and 'extensions' cannot be specified together") elif extensions is not None: manager = get_manager(extensions=extensions) elif manager is None: manager = get_manager() self.logger = logging.getLogger('%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)) if file_obj is not None: if path is None: path = file_obj.filename elif os.path.abspath(file_obj.filename) != os.path.abspath(path): msg = 'You argued %s as this object\'s path, ' % path msg += 'but supplied a file with filename: %s' % file_obj.filename raise ValueError(msg) elif path is None: TypeError("Must supply either 'path' or 'file' arg to HDF5IO.") if file_obj is None and not os.path.exists(path) and (mode == 'r' or mode == 'r+'): msg = "Unable to open file %s in '%s' mode. File does not exist." % (path, mode) raise UnsupportedOperation(msg) if file_obj is None and os.path.exists(path) and (mode == 'w-' or mode == 'x'): msg = "Unable to open file %s in '%s' mode. File already exists." % (path, mode) raise UnsupportedOperation(msg) if manager is None: manager = BuildManager(TypeMap(NamespaceCatalog())) elif isinstance(manager, TypeMap): manager = BuildManager(manager) # TO DO # self._HDF5IO__comm = comm self._HDF5IO__mode = mode self._HDF5IO__path = path self._HDF5IO__file = file_obj super(_HDF5IO, self).__init__(manager, source=path) self._HDF5IO__ref_queue = deque() # a queue of the references that need to be added self._HDF5IO__dci_queue = deque() # a queue of DataChunkIterators that need to be exhausted
def test_get_class(self): self.test_export() type_map = get_type_map( extensions=os.path.join(self.tempdir, self.ns_path)) type_map.get_container_cls(self.prefix, 'TetrodeSeries')
def test_load_namespace(self): self.test_export() get_type_map(extensions=os.path.join(self.tempdir, self.ns_path))
# Abbreviate the documentation of the main object for which a table is rendered in the table. # This is commonly set to True as doc of the main object is alrready rendered as the main intro for the # section describing the object spec_appreviate_main_object_doc_in_tables = True # Show a title for the tables spec_show_title_for_tables = True # Char to be used as prefix to indicate the depth of an object in the specification hierarchy spec_table_depth_char = '.' # '→' '.' # Add a LaTeX clearpage after each main section describing a neurodata_type. This helps in LaTeX to keep the ordering # of figures, tables, and code blocks consistent in particular when the hierarchy_plots are included spec_add_latex_clearpage_after_ndt_sections = True # Resolve includes to always show the full list of objects that are part of a type (True) # or to show only the parts that are actually new to a current type while only linking to base types spec_resolve_type_inc = False # Default type map to be used. This is the type map where dependent namespaces are stored. In the case of # NWB this is spec_default_type_map = pynwb.get_type_map() import pynwb # noqa: E402 spec_default_type_map = pynwb.get_type_map() # Default specification classes for groups datasets and namespaces. In the case of NWB these are the NWB-specfic # spec classes. In the general cases these are the spec classes from HDMF spec_group_spec_cls = pynwb.spec.NWBGroupSpec spec_dataset_spec_cls = pynwb.spec.NWBDatasetSpec spec_namespace_spec_cls = pynwb.spec.NWBNamespace
pynwb_test_result = unittest.TextTestRunner(verbosity=args.verbosity).run( unittest.TestLoader().discover("tests/unit/pynwb_tests")) TOTAL += pynwb_test_result.testsRun FAILURES += len(pynwb_test_result.failures) ERRORS += len(pynwb_test_result.errors) # Run integration tests if flags['integration'] in args.suites: logging.info('running integration tests') integration_test_result = unittest.TextTestRunner(verbosity=args.verbosity).run( unittest.TestLoader().discover("tests/integration")) TOTAL += integration_test_result.testsRun FAILURES += len(integration_test_result.failures) ERRORS += len(integration_test_result.errors) type_map = pynwb.get_type_map() import imp name = 'integration' imp_result = imp.find_module(name, ['tests']) mod = imp.load_module(name, imp_result[0], imp_result[1], imp_result[2]) d = mod.ui_write.base.container_tests MISSING_INT = list() for cls in type_map.get_container_classes('core'): if cls not in d: MISSING_INT.append(cls) if len(MISSING_INT) > 0: logging.info('%d classes missing integration tests in ui_write' % len(MISSING_INT)) else: logging.info('all classes have integration tests')