def _dump(py_obj, h_group, call_id=None, **kwargs): """ Dump a python object to a group within an HDF5 file. This function is called recursively by the main dump() function. Args: py_obj: python object to dump. h_group (h5.File.group): group to dump data into. call_id (int): index to identify object's relative location in the iterable. """ # Check if we have a unloaded loader for the provided py_obj load_loader(py_obj) # Firstly, check if item is a numpy array. If so, just dump it. if check_is_ndarray_like(py_obj): create_hkl_dataset(py_obj, h_group, call_id, **kwargs) # Next, check if item is a dict elif isinstance(py_obj, dict): create_hkl_dataset(py_obj, h_group, call_id, **kwargs) # If not, check if item is iterable elif check_is_iterable(py_obj): item_type = check_iterable_item_type(py_obj) # item_type == False implies multiple types. Create a dataset if not item_type: h_subgroup = create_hkl_group(py_obj, h_group, call_id) for ii, py_subobj in enumerate(py_obj): _dump(py_subobj, h_subgroup, call_id=ii, **kwargs) # otherwise, subitems have same type. Check if subtype is an iterable # (e.g. list of lists), or not (e.g. list of ints, which should be # treated as a single dataset). else: if item_type in dumpable_dtypes: create_hkl_dataset(py_obj, h_group, call_id, **kwargs) else: h_subgroup = create_hkl_group(py_obj, h_group, call_id) for ii, py_subobj in enumerate(py_obj): _dump(py_subobj, h_subgroup, call_id=ii, **kwargs) # item is not iterable, so create a dataset for it else: create_hkl_dataset(py_obj, h_group, call_id, **kwargs)
def test_check_is_iterable(): assert check_is_iterable([1, 2, 3]) is True assert check_is_iterable(1) is False
def test_check_is_iterable(): assert check_is_iterable([1, 2, 3]) assert not check_is_iterable(1)
def test_check_is_iterable(): assert check_is_iterable([1,2,3]) is True assert check_is_iterable(1) is False
def test_is_iterable(): a = [1, 2, 3] b = 1 assert helpers.check_is_iterable(a) assert not helpers.check_is_iterable(b)