def get_metadata( self, digest: Optional[Digest] = None, ignore_errors: bool = True, ) -> BareAsset: bids_metadata = BIDSAsset.get_metadata(self) nwb_metadata = NWBAsset.get_metadata(self, digest, ignore_errors) return BareAsset( **{**bids_metadata.dict(), **nwb_metadata.dict(exclude_none=True)} )
def get_metadata( self, digest: Optional[Digest] = None, ignore_errors: bool = True, ) -> BareAsset: metadata = self.bids_dataset_description.get_asset_metadata(self) start_time = end_time = datetime.now().astimezone() add_common_metadata(metadata, self.filepath, start_time, end_time, digest) metadata["path"] = self.path return BareAsset(**metadata)
def validate_dandi_nwb(filepath, schema_version=None, devel_debug=False): """Provide validation of .nwb file regarding requirements we impose""" if schema_version is not None: from dandischema.models import BareAsset, get_schema_version from pydantic import ValidationError from .metadata import nwb2asset current_version = get_schema_version() if schema_version != current_version: raise ValueError( f"Unsupported schema version: {schema_version}; expected {current_version}" ) try: asset = nwb2asset(filepath, digest=32 * "d" + "-1", digest_type="dandi_etag") BareAsset(**asset.dict()) except ValidationError as e: if devel_debug: raise lgr.warning("Validation error for %s: %s", filepath, e, extra={"validating": True}) return [str(e)] except Exception as e: if devel_debug: raise lgr.warning( "Unexpected validation error for %s: %s", filepath, e, extra={"validating": True}, ) return [f"Failed to read metadata: {e}"] return [] else: # make sure that we have some basic metadata fields we require try: meta = get_metadata(filepath) except Exception as e: if devel_debug: raise lgr.warning( "Failed to read metadata in %s: %s", filepath, e, extra={"validating": True}, ) return [f"Failed to read metadata: {e}"] return _check_required_fields(meta, _required_nwb_metadata_fields)
def process_ndtypes(asset: models.BareAsset, nd_types: Iterable[str]) -> models.BareAsset: approach = set() technique = set() variables = set() for val in nd_types: val = val.split()[0] if val not in neurodata_typemap: continue if neurodata_typemap[val]["approach"]: approach.add(neurodata_typemap[val]["approach"]) if neurodata_typemap[val]["technique"]: technique.add(neurodata_typemap[val]["technique"]) variables.add(val) asset.approach = [models.ApproachType(name=val) for val in approach] asset.measurementTechnique = [ models.MeasurementTechniqueType(name=val) for val in technique ] asset.variableMeasured = [ models.PropertyValue(value=val) for val in variables ] return asset
def get_validation_errors( self, schema_version: Optional[str] = None, devel_debug: bool = False, ) -> list[str]: if schema_version is not None: current_version = get_schema_version() if schema_version != current_version: raise ValueError( f"Unsupported schema version: {schema_version}; expected {current_version}" ) try: asset = self.get_metadata(digest=DUMMY_DIGEST) BareAsset(**asset.dict()) except ValidationError as e: if devel_debug: raise lgr.warning( "Validation error for %s: %s", self.filepath, e, extra={"validating": True}, ) return [str(e)] except Exception as e: if devel_debug: raise lgr.warning( "Unexpected validation error for %s: %s", self.filepath, e, extra={"validating": True}, ) return [f"Failed to read metadata: {e}"] return [] else: # TODO: Do something else? return []