def validate(cls, obj, raise_errors=False): r"""Validate an object to check if it could be of this type. Args: obj (object): Object to validate. raise_errors (bool, optional): If True, errors will be raised when the object fails to be validated. Defaults to False. Returns: bool: True if the object could be of this type, False otherwise. """ if isinstance(obj, np.ndarray) and (obj.ndim == 0): obj = obj.reshape((1, ))[0] if super(ScalarMetaschemaType, cls).validate(units.get_data(obj), raise_errors=raise_errors): dtype = data2dtype(obj) if cls.is_fixed and ('subtype' in cls.fixed_properties): type_list = [ constants.VALID_TYPES[cls.fixed_properties['subtype']] ] else: type_list = constants.NUMPY_TYPES if dtype.name.startswith(tuple(type_list)): return True else: if raise_errors: raise ValueError( ("dtype %s dosn't corresponding with any " + "of the accepted types: %s") % (str(dtype), str(type_list))) return False
def from_array(cls, arr, unit_str=None, dtype=None, typedef=None): r"""Get object representation of the data. Args: arr (np.ndarray): Numpy array. unit_str (str, optional): Units that should be added to returned object. dtype (np.dtype, optional): Numpy data type that should be maintained as a base class when adding units. Defaults to None and is determined from the object or typedef (if provided). typedef (dict, optional): Type definition that should be used to decode the object. Defaults to None and is determined from the object or dtype (if provided). Returns: object: Object representation of the data in the input array. """ # if (typedef is None) and (dtype is not None): # typedef = dtype2definition(dtype) # elif (dtype is None) and (typedef is not None): # dtype = definition2dtype(typedef) if (cls.name not in ['1darray', 'ndarray']) and (arr.ndim > 0): out = arr[0] else: out = arr if typedef is not None: # Cast numpy type to native python type if they are equivalent out = cls.as_python_type(out, typedef) if unit_str is not None: if dtype is None: dtype = data2dtype(out) out = units.add_units(out, unit_str, dtype=dtype) return out
def func_deserialize(self, msg): r"""Deserialize a message. Args: msg: Message to be deserialized. Returns: obj: Deserialized message. """ if self.format_str is None: raise RuntimeError("Format string is not defined.") if self.as_array: out = serialize.table_to_array( msg, self.format_str, use_astropy=self.use_astropy, names=self.get_field_names(as_bytes=True)) out = self.datatype.coerce_type(out, typedef=self.typedef) out = self.datatype.transform_type(out, typedef=self.typedef) else: out = list(serialize.process_message(msg, self.format_str)) field_units = self.get_field_units() if field_units is not None: out = [ units.add_units(x, u, dtype=data2dtype(x)) for x, u in zip(out, field_units) ] return out
def encode(cls, instance, typedef=None): r"""Encoder for the 'subtype' scalar property.""" dtype = data2dtype(instance) out = None for k, v in constants.VALID_TYPES.items(): if dtype.name.startswith(v): out = k break if out is None: raise MetaschemaTypeError( 'Cannot find subtype string for dtype %s' % dtype) return out
def to_array(cls, obj): r"""Get np.array representation of the data. Args: obj (object): Object to get array for. Returns: np.ndarray: Array representation of object. """ obj_nounits = units.get_data(obj) if isinstance(obj_nounits, np.ndarray): arr = obj_nounits else: dtype = data2dtype(obj_nounits) arr = np.array([obj_nounits], dtype=dtype) return arr
def encode(cls, instance, typedef=None): r"""Encoder for the 'precision' scalar property.""" dtype = data2dtype(instance) out = dtype.itemsize * 8 # in bits return out
def test_data2dtype_errors(): r"""Check that error is raised for list, dict, & tuple objects.""" with pytest.raises(metaschema.MetaschemaTypeError): metaschema.data2dtype([])