def save(name, u_kn, N_k, s_n=None, least_significant_digit=None): """Create an HDF5 dump of an existing MBAR job for later use / testing. Parameters ---------- name : str Name of dataset u_kn : np.ndarray, dtype='float', shape=(n_states, n_samples) Reduced potential energies N_k : np.ndarray, dtype='int', shape=(n_states) Number of samples taken from each state s_n : np.ndarray, optional, default=None, dtype=int, shape=(n_samples) The state of origin of each state. If none, guess the state origins. least_significant_digit : int, optional, default=None If not None, perform lossy compression using tables.Filter(least_significant_digit=least_significant_digit) Notes ----- The output HDF5 files should be readible by the helper funtions pymbar_datasets.py """ import tables (n_states, n_samples) = u_kn.shape u_kn = ensure_type(u_kn, 'float', 2, "u_kn or Q_kn", shape=(n_states, n_samples)) N_k = ensure_type(N_k, 'int64', 1, "N_k", shape=(n_states, )) if s_n is None: s_n = get_sn(N_k) s_n = ensure_type(s_n, 'int64', 1, "s_n", shape=(n_samples, )) hdf_filename = os.path.join("./", "%s.h5" % name) f = tables.File(hdf_filename, 'a') f.createCArray("/", "u_kn", tables.Float64Atom(), obj=u_kn, filters=tables.Filters( complevel=9, complib="zlib", least_significant_digit=least_significant_digit)) f.createCArray("/", "N_k", tables.Int64Atom(), obj=N_k, filters=tables.Filters(complevel=9, complib="zlib")) f.createCArray("/", "s_n", tables.Int64Atom(), obj=s_n, filters=tables.Filters(complevel=9, complib="zlib")) f.close()
def create_mdvlarray(self, where, name, atom=None, title="", filters=None, expectedrows=None, chunkshape=None, byteorder=None, createparents=False, obj=None): """Function to create a multi dimensional VLArray""" pnode = self._get_or_create_path(where, createparents) tb.file._checkfilters(filters) sharray = tb.VLArray(pnode, name + "_shape", tb.Int64Atom(), expectedrows=expectedrows) return MDVLarray(pnode, name, atom, title=title, filters=filters, expectedrows=expectedrows, chunkshape=chunkshape, byteorder=byteorder)
def write_audio16(self, topic_group, data): # Fix nan possibilities with the first value that is good # Currently not supported.... ''' if np.any(np.isnan(data['data'])): replace_idx = np.where(np.all(np.isnan(data['data']), axis=1))[0] good_idx = np.where(np.all(np.logical_not(np.isnan(data['data'])), axis=1))[0][0] data['data'][replace_idx] = data['data'][good_idx] data['time'][replace_idx] = data['time'][good_idx] ''' converted_arr = [] for seg in data['data']: if isinstance(seg, int): converted_arr.append(np.array([seg])) else: converted_arr.append(np.fromstring(seg, dtype=np.uint8)) data['raw_audio'] = converted_arr #data['raw_audio'] = np.fromstring(''.join(data['data']), dtype=np.uint8) # Pull out left and right audio # Warning: this might be flipped...(right/left) # NOTE: Don't need to do this currently for mono channel (Kinect and Mic). Later make a flag #data['right_audio'], data['left_audio'] = raw_audio[0::2],raw_audio[1::2] #self.pytable_writer_helper(topic_group, ['left_audio', 'right_audio'], tables.Int64Atom(), data) self.pytable_writer_helper(topic_group, ['time'], tables.Int64Atom(), data) self.pytable_extend_writer_helper(topic_group, ['raw_audio'], tables.UInt8Atom(), data)
def create_tree_edges_distributions_storage(self): fileh = tables.open_file(self.__hdf5_storage, 'w') atom = tables.Int64Atom() edge_distances_distributions = fileh.create_carray(fileh.root, 'edge_distances_distributions', atom, (self.__max_ref_dist, self.__max_overall_dist + 1), '', filters = None) fileh.close()
def createOutputStorage(filename): file = tables.open_file(filename, mode='w') datetimeAtom = tables.Int64Atom() datetimeArray = file.create_earray(file.root, 'datetime', datetimeAtom, (0,)) dataAtom = tables.Float64Atom() dataArray = file.create_earray(file.root, 'data', dataAtom, (0, M * N + P * 2)) return file, datetimeArray, dataArray
def create_earray(db, name, element_shape, type='f'): if type == 'f' or type == 'float32': atom = tables.Float32Atom() elif type == 'i' or type == 'float64': atom = tables.Int64Atom() else: raise Exception("unknown array type; choose one of: 'i', 'f'") return db.createEArray(db.root, name, atom, shape=(0, ) + tuple(element_shape), filters=tables.Filters(9))
def test_table(): ndim = 60000 h5file = tb.openFile('test.h5', mode='w', title="Test Array") root = h5file.root #Float64Atom x = h5file.createCArray(root,'x',tb.Int64Atom(),shape=(ndim,ndim)) x[:100,:100] = np.random.random_integers(0, 100,size=(100,100)) # Now put in some data #print x[1:3,1:10] h5file.close()
def repeat_expt(smplr, n_expts, n_labels, output_file=None): """ Parameters ---------- smplr : sub-class of PassiveSampler sampler must have a sample_distinct method, reset method and ... n_expts : int number of expts to run n_labels : int number of labels to query from the oracle in each expt """ FILTERS = tables.Filters(complib='zlib', complevel=5) max_iter = smplr._max_iter n_class = smplr._n_class if max_iter < n_labels: raise ValueError( "Cannot query {} labels. Sampler ".format(n_labels) + "instance supports only {} iterations".format(max_iter)) if output_file is None: # Use current date/time as filename output_file = 'expt_' + time.strftime("%d-%m-%Y_%H:%M:%S") + '.h5' logging.info("Writing output to {}".format(output_file)) f = tables.open_file(output_file, mode='w', filters=FILTERS) float_atom = tables.Float64Atom() bool_atom = tables.BoolAtom() int_atom = tables.Int64Atom() array_F = f.create_carray(f.root, 'F_measure', float_atom, (n_expts, n_labels, n_class)) array_s = f.create_carray(f.root, 'n_iterations', int_atom, (n_expts, 1)) array_t = f.create_carray(f.root, 'CPU_time', float_atom, (n_expts, 1)) logging.info("Starting {} experiments".format(n_expts)) for i in range(n_expts): if i % np.ceil(n_expts / 10).astype(int) == 0: logging.info("Completed {} of {} experiments".format(i, n_expts)) ti = time.process_time() smplr.reset() smplr.sample_distinct(n_labels) tf = time.process_time() if hasattr(smplr, 'queried_oracle_'): array_F[i, :, :] = smplr.estimate_[smplr.queried_oracle_] else: array_F[i, :, :] = smplr.estimate_ array_s[i] = smplr.t_ array_t[i] = tf - ti f.close() logging.info("Completed all experiments")
def create_fold_data(): while True: fold = q.get() print(fold) files_d = {} files_d['val'] = files[fold * 3:(fold + 1) * 3] files_d['train'] = files[:fold * 3] files_d['train'] += files[(fold + 1) * 3:] for set_name in ['train', 'val']: print('{}\t{}'.format(fold, set_name)) n = len(files_d[set_name]) f = tables.open_file(dst.format(set_name, fold), 'w') data = f.create_earray(f.root, 'data', tables.Float32Atom(), (0, features), expectedrows=n * 5 * 7476) targets = f.create_earray(f.root, 'targets', tables.Int64Atom(), (0, ), expectedrows=n * 5 * 7476) for filename in files_d[set_name]: print('{}\t{}'.format(fold, filename)) t = open(target_dst.format(filename[:12])) targets_csv = csv.reader(t) targets_single = [] for row in targets_csv: targets_single += [row[1]] t.close() targets_single = targets_single[:7476] for i in range(5): targets.append(np.array(targets_single)) for d in ['', '_-1', '_1', '_-2', '_2']: print('{}\t{}'.format(fold, d)) mat = sio.loadmat(path.format(d) + filename)['dataFull'] data.append(mat[:7476]) f.close() q.task_done()
def write_bluetooth(self, topic_group, data): str_fields = ['mac_addr', 'dev_name'] self.pytable_writer_helper(topic_group, str_fields, tables.StringAtom(itemsize=20), data) self.pytable_writer_helper(topic_group, ['is_present'], tables.BoolAtom(), data) self.pytable_writer_helper(topic_group, ['rssi'], tables.Int64Atom(), data) self.pytable_writer_helper(topic_group, ['time'], tables.Float64Atom(), data)
def BM_G(file,n,ell,N): h5file = tb.openFile(file, mode='w', title="Test Array") root = h5file.root x = h5file.createCArray(root,'x',tb.Int64Atom(),shape=(n,N)) g = [ 2**z for z in np.arange(ell) ] for i in range(0, n): x[i,ell*i:ell*(i+1)] = g #print 'G:'+str(x[0,0:ell]) h5file.close()
def write_image(self, topic_group, data): # Note: you need to load and reshape (data.reshape(480,640,3)) self.pytable_writer_helper(topic_group, ['data'], tables.UInt8Atom(), data) self.pytable_writer_helper(topic_group, ['width', 'height', 'step', 'is_bigendian'], tables.Int64Atom(), data) self.pytable_writer_helper(topic_group, ['encoding'], tables.StringAtom(itemsize=15), data) self.pytable_writer_helper(topic_group, ['time'], tables.Float64Atom(), data)
def assign_array(db,name,a,verbose=1): if a.dtype==dtype('int32'): atom = tables.Int32Atom() elif a.dtype==dtype('int64'): atom = tables.Int64Atom() elif a.dtype==dtype('f') or a.dtype==dtype('d'): atom = tables.Float32Atom() else: raise Exception('unknown array type: %s'%a.dtype) if verbose: print "[writing",name,a.shape,atom,"]" node = db.createEArray(db.root,name,atom,shape=[0]+list(a.shape[1:]),filters=tables.Filters(9)) node.append(a)
def createOutputStorage(self, filename): file = tables.open_file(filename, mode='w') datetimeAtom = tables.Int64Atom() datetimeArray = file.create_earray(file.root, 'datetime', datetimeAtom, (0, )) dataAtom = tables.Float64Atom() # M * N = size of table, P = size of bar`s high and low level dataArray = file.create_earray(file.root, 'data', dataAtom, (0, self.M * self.N + self.P * 2)) return file, datetimeArray, dataArray
def setUp(self): """setup() is called before very test and just creates a temporary work space for reading/writing files.""" fid, self.filename1 = tempfile.mkstemp() fid, self.filename2 = tempfile.mkstemp() self.data = np.arange(10000, dtype=np.int64) #Write Data to an HDF5 file as a compressed CArray. hdfFile = tables.File(self.filename1, 'a') #The filter is the same used to save MSMB2 data hdfFile.createCArray("/", "arr_0", tables.Int64Atom(), self.data.shape, filters=io.COMPRESSION) hdfFile.root.arr_0[:] = self.data[:] hdfFile.flush() hdfFile.close()
def _create_table(self, name, example): """ Create a new table within the HDF file, where the tables shape and its datatype are determined by *example*. """ type_map = { np.dtype(np.float64): tables.Float64Atom(), np.dtype(np.float32): tables.Float32Atom(), np.dtype(np.int): tables.Int64Atom(), np.dtype(np.int8): tables.Int8Atom(), np.dtype(np.uint8): tables.UInt8Atom(), np.dtype(np.int16): tables.Int16Atom(), np.dtype(np.uint16): tables.UInt16Atom(), np.dtype(np.int32): tables.Int32Atom(), np.dtype(np.uint32): tables.UInt32Atom(), np.dtype(np.bool): tables.BoolAtom(), } try: if type(example) == np.ndarray: h5type = type_map[example.dtype] elif type(example) == str: h5type = tables.VLStringAtom() except KeyError: raise TypeError( "Could not create table %s because of unknown dtype '%s'" % (name, example.dtype)) #+ ", of name: " % example.shape) if type(example) == np.ndarray: h5dim = (0, ) + example.shape h5 = self.h5 filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True) self.tables[name] = h5.create_earray(h5.root, name, h5type, h5dim, filters=filters) elif type(example) == str: h5 = self.h5 filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True) self.tables[name] = h5.create_vlarray(h5.root, name, h5type, filters=filters) self.types[name] = type(example)
def _create_table_list(self, name, example): """ Create a new table within the HDF file, where the tables shape and its datatype are determined by *example*. The modified version for creating table with appendList """ type_map = { np.dtype(np.float64): tables.Float64Atom(), np.dtype(np.float32): tables.Float32Atom(), np.dtype(np.int): tables.Int64Atom(), np.dtype(np.int8): tables.Int8Atom(), np.dtype(np.uint8): tables.UInt8Atom(), np.dtype(np.int16): tables.Int16Atom(), np.dtype(np.uint16): tables.UInt16Atom(), np.dtype(np.int32): tables.Int32Atom(), np.dtype(np.uint32): tables.UInt32Atom(), np.dtype(np.bool): tables.BoolAtom(), } try: if type(example) == np.ndarray: h5type = type_map[example.dtype] elif type(example) == list and type(example[0]) == str: h5type = tables.VLStringAtom() except KeyError: raise TypeError("Don't know how to handle dtype '%s'" % example.dtype) if type(example) == np.ndarray: h5dim = (0, ) + example.shape[1:] h5 = self.h5 filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True) self.tables[name] = h5.create_earray(h5.root, name, h5type, h5dim, filters=filters) elif type(example) == list and type(example[0]) == str: h5 = self.h5 filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True) self.tables[name] = h5.create_vlarray(h5.root, name, h5type, filters=filters) self.types[name] = type(example)
def saveDataset(self, X, ACTION, Y, POSSIBLE_ACTIONS): f = tables.open_file(self.dataFilePath, mode='a') # Is this the first time? if not "/X" in f: atom = tables.Int64Atom() atomFloat = tables.Float64Atom() c_array = f.create_earray(f.root, 'X', atomFloat, (0, X.shape[1])) c_array = f.create_earray(f.root, 'ACTION', atom, (0, 1)) c_array = f.create_earray(f.root, 'Y', atomFloat, (0, 1)) c_array = f.create_earray(f.root, 'POSSIBLE_ACTIONS', atomFloat, (0, POSSIBLE_ACTIONS.shape[1])) f.root.X.append(X) f.root.ACTION.append(ACTION.reshape(-1, 1)) f.root.Y.append(Y.reshape(-1, 1)) f.root.POSSIBLE_ACTIONS.append(POSSIBLE_ACTIONS) f.close()
def sparse_save(matrix, filename, dtype=np.dtype(np.float64)): print "SAVE SPARSE" print matrix.shape atom = tb.Atom.from_dtype(dtype) f = tb.open_file(filename, 'w') print "saving data" filters = tb.Filters(complevel=5, complib='blosc') out = f.create_carray(f.root, 'data', atom, shape=matrix.data.shape, filters=filters) out[:] = matrix.data print "saving indices" out = f.create_carray(f.root, 'indices', tb.Int64Atom(), shape=matrix.indices.shape, filters=filters) out[:] = matrix.indices print "saving indptr" out = f.create_carray(f.root, 'indptr', tb.Int64Atom(), shape=matrix.indptr.shape, filters=filters) out[:] = matrix.indptr print "saving done" f.close()
def BM_PlainEnc(params,m, key, fileC): #create ciphertext file Cfile = tb.openFile(fileC, mode='w', title="Test Array") root = Cfile.root x = Cfile.createCArray(root,'x',tb.Int64Atom(),shape=(params.n,params.N)) Gfile = tb.openFile(params.G) for i in range(0, params.n): x1 = Gfile.root.x[i, :] tmp = (m*x1) %params.q x[i,:]=tmp # print str(i)+':'+str(x[i,:]) # print str(x[params.n-1, (params.N-params.ell) : params.N]) Gfile.close() Cfile.close()
def add_timestamps(self, name, clk_p, max_rates, bg_rate, num_particles, bg_particle, populations=None, overwrite=False, chunksize=2**16, comp_filter=default_compression): if name in self.h5file.root.timestamps: if overwrite: self.h5file.remove_node('/timestamps', name=name) self.h5file.remove_node('/timestamps', name=name + '_par') else: msg = 'Timestamp array already exist (%s)' % name raise ExistingArrayError(msg) times_array = self.h5file.create_earray( '/timestamps', name, atom=tables.Int64Atom(), shape=(0, ), chunkshape=(chunksize, ), filters=comp_filter, title='Simulated photon timestamps') times_array.set_attr('clk_p', clk_p) times_array.set_attr('max_rates', max_rates) times_array.set_attr('bg_rate', bg_rate) times_array.set_attr('populations', populations) times_array.set_attr('PyBroMo', __version__) times_array.set_attr('creation_time', current_time()) particles_array = self.h5file.create_earray( '/timestamps', name + '_par', atom=tables.UInt8Atom(), shape=(0, ), chunkshape=(chunksize, ), filters=comp_filter, title='Particle number for each timestamp') particles_array.set_attr('num_particles', num_particles) particles_array.set_attr('bg_particle', bg_particle) particles_array.set_attr('PyBroMo', __version__) particles_array.set_attr('creation_time', current_time()) return times_array, particles_array
def _ensure_heirarchy(self): r = self.root db = self.db filters = tb.Filters(complib=b'zlib', complevel=1) if 'replays' not in r: db.create_vlarray(r, 'replays', atom=tb.VLStringAtom()) if 'metadata' not in r: db.create_table(r, 'metadata', filters=filters, description=METADATA_DESC) if 'actions' not in r: db.create_earray(r, 'actions', atom=tb.Int64Atom(), shape=(0, 2, NSTEPS), filters=filters)
def _create_column_from_dtype(self, h5: tb.File, table_path: str, col_name: str, col_dtype: str, shape: tuple): colpath = self._path(table_path, col_name) if re.match(r'[nf]', col_dtype): self._create_column(h5, colpath, atom=tb.Float64Atom(), shape=shape) elif re.match(r'i', col_dtype): self._create_column(h5, colpath, atom=tb.Int64Atom(), shape=shape) elif re.match(r'[osc](\d+)', col_dtype): m = re.match(r'[osc](\d+)', col_dtype) size = int(m.group(1)) self._create_column(h5, colpath, atom=tb.StringAtom(size), shape=shape) else: raise Exception(f'Unrecognized col_dtype: {col_dtype}')
def test_load_save_hdf5(self): with tempfile.NamedTemporaryFile() as tmp_file: make_hdf5(tmp_file.name, self.test_data.shape, tables.Int64Atom()) save_hdf5(tmp_file.name, self.test_data, 0) self.assertTrue( shape_check_hdf5(tmp_file.name, (1, ) + self.test_data.shape)) load_data = load_hdf5(tmp_file.name, 0) self.assertTrue(np.allclose(self.test_data, load_data)) new_test_data = self.test_data * 20 save_hdf5(tmp_file.name, new_test_data, 0, mode='r+') load_data = load_hdf5(tmp_file.name, 0) self.assertTrue(np.allclose(new_test_data, load_data))
def _create_table(self, name, example, parent=None): """ Create a new table within the HDF file, where the tables shape and its datatype are determined by *example*. """ h5 = self.h5 filters = tables.Filters(complevel=self.compression_level, complib='zlib', shuffle=True) if parent is None: parent = h5.root if type(example) == str: h5type = tables.VLStringAtom() h5.createVLArray(parent, name, h5type, filters=filters) return if type(example) == dict: self.h5.createGroup(parent, name) return #If we get here then we're dealing with numpy arrays example = np.asarray(example) #MODIFICATION: appended name everywhere and introduced string type_map = { np.dtype(np.float64).name: tables.Float64Atom(), np.dtype(np.float32).name: tables.Float32Atom(), np.dtype(np.int).name: tables.Int64Atom(), np.dtype(np.int8).name: tables.Int8Atom(), np.dtype(np.uint8).name: tables.UInt8Atom(), np.dtype(np.int16).name: tables.Int16Atom(), np.dtype(np.uint16).name: tables.UInt16Atom(), np.dtype(np.int32).name: tables.Int32Atom(), np.dtype(np.uint32).name: tables.UInt32Atom(), np.dtype(np.bool).name: tables.BoolAtom(), # Maximal string length of 128 per string - change if needed 'string32': tables.StringAtom(128) } try: h5type = type_map[example.dtype.name] h5dim = (0, ) + example.shape h5.createEArray(parent, name, h5type, h5dim, filters=filters) except KeyError: raise TypeError("Don't know how to handle dtype '%s'" % example.dtype)
def BM_mult3(file1, file2, file3, params,colcal): #ignore colcal h5file1 = tb.open_file(file1) h5file2 = tb.open_file(file2) h5file3 = tb.openFile(file3, mode='w', title="Test Array") root3 = h5file3.root x3 = h5file2.createCArray(root3,'x',tb.Int64Atom(),shape=(params.n,params.N)) for i in range(0, params.n//CHUNK): j=params.N x1 = h5file1.root.x[CHUNK*i:CHUNK*(i+1), :] x2 = h5file2.root.x[:,(j-params.ell):j] ##but we cal two columns to avoid np array error x2G= Ginv(x2, params) x3[CHUNK*i:CHUNK*(i+1), (j-params.ell):j]=np.dot(x1,x2G) %params.q h5file1.close() h5file2.close() h5file3.close()
def BM_mult(file1, file2, file3, params): h5file1 = tb.open_file(file1) h5file2 = tb.open_file(file2) h5file3 = tb.openFile(file3, mode='w', title="Test Array") root3 = h5file3.root x3 = h5file2.createCArray(root3,'x',tb.Int64Atom(),shape=(params.N,params.N)) for i in range(0, params.N//CHUNK): for j in range(0, params.N//CHUNK): x1 = h5file1.root.x[CHUNK*i:CHUNK*(i+1), :] x2 = h5file2.root.x[:,CHUNK*j:CHUNK*(j+1)] x3[CHUNK*i:CHUNK*(i+1), CHUNK*j:CHUNK*(j+1)]=np.dot(x1,x2) %params.q h5file1.close() h5file2.close() h5file3.close()
def BM_SecEnc(params,m, key, fileC): Cbar=np.random.random_integers(0,params.q,(params.n-1,params.N)) e=Gau(params.N, params.var) %params.q b=(np.dot(np.transpose(key.sbar),Cbar)) %params.q b=(np.transpose(e)-b) %params.q #1*N array #initial C=(Cbar b^t) C=np.concatenate((Cbar,b),axis=0) #create ciphertext file Cfile = tb.openFile(fileC, mode='w', title="Test Array") root = Cfile.root x = Cfile.createCArray(root,'x',tb.Int64Atom(),shape=(params.n,params.N)) Gfile = tb.openFile(params.G) for i in range(0, params.n): x1 = Gfile.root.x[i, :] x[i,:] = C[i,:]+m*x1 %params.q Gfile.close() Cfile.close()
def BM_add(file1, file2, file, params, flag_partial_add): h5file1 = tb.open_file(file1) h5file2 = tb.open_file(file2) h5file = tb.openFile(file, mode='w', title="Test Array") root = h5file.root x = h5file.createCArray(root,'x',tb.Int64Atom(),shape=(params.n,params.N)) for i in range(0, params.n//CHUNK): if flag_partial_add==0: x1 = h5file1.root.x[CHUNK*i:CHUNK*(i+1), :] x2 = h5file2.root.x[CHUNK*i:CHUNK*(i+1), :] x[CHUNK*i:CHUNK*(i+1), :]=x1+x2 %params.q else: x1 = h5file1.root.x[CHUNK*i:CHUNK*(i+1), (params.N-params.ell): params.N] x2 = h5file2.root.x[CHUNK*i:CHUNK*(i+1), (params.N-params.ell): params.N] x[CHUNK*i:CHUNK*(i+1), (params.N-params.ell): params.N]=x1+x2 %params.q h5file1.close() h5file2.close() h5file.close()
def store_matrix(lil_matrix, h5_filename): """ Writes a sparse matrix to an h5 file. Args: lil_matrix: sparse matrix h5_filename: path where a h5 file can be written. If a file with that name exists, it will be deleted. """ if os.path.exists(h5_filename): os.remove(h5_filename) lil_matrix = sparse.lil_matrix(lil_matrix) filters = tables.Filters(complevel=5, complib='zlib') matrix_file = tables.open_file(h5_filename, mode='w', filters=filters, title='matrix') data_table = matrix_file.create_vlarray(matrix_file.root, 'data', tables.Float64Atom(shape=()), 'data', filters=tables.Filters(1)) for row in lil_matrix.data: data_table.append(row) rows = matrix_file.create_vlarray(matrix_file.root, 'rows', tables.Int64Atom(shape=()), "ragged array of ints", filters=filters) for row in lil_matrix.rows: rows.append(row) matrix_file.create_array(matrix_file.root, 'shape', obj=np.array(lil_matrix.shape), title='Matrix shape') matrix_file.close()