def function(self): A = getattr(self, storage1 + 'A') B = getattr(self, storage2 + 'B') Rd = kernel(A, B) self.assert_(isinstance(Rd, blaze.Array)) self.assert_(Rd._data.capabilities.deferred) p = _store(storage3 + 'Rd') if storage3 == 'dsk' else None try: Rc = blaze.eval(Rd, storage=p) self.assert_(isinstance(Rc, blaze.Array)) npy_data = getattr(self, 'npy' + R) assert_allclose(np.array(dd_as_py(Rc._data)), npy_data) if storage3 == 'dsk': self.assert_(Rc._data.capabilities.persistent) else: self.assert_(not Rc._data.capabilities.persistent) finally: try: if p is not None: blaze.drop(p) except: pass # show the real error...
def test_hdf5(h5): import h5py h5.extend(data) drop(h5) with h5py.File(h5.path, mode='r') as f: with pytest.raises(KeyError): f['/test']
def test_drop(pyt): drop(pyt) with pytest.raises(tb.ClosedNodeError): drop(pyt)
def maybe_remove(persist): import os.path if os.path.exists(persist.path): # Remove every directory starting with rootdir blaze.drop(persist)
del describe_array # -------------------------------------------------------------------- print_section('Persisted arrays') def maybe_remove(persist): import os.path if os.path.exists(persist.path): # Remove every directory starting with rootdir blaze.drop(persist) # Create an empty array on-disk dname = 'blz://persisted.blz' store = blaze.Storage(dname) maybe_remove(store) p = blaze.zeros('0, float64', storage=store) # Feed it with some data blaze.append(p, range(10)) print('Before re-opening:', p) # Re-open the dataset in URI p2 = blaze.open(store) print('After re-opening:', p2) blaze.drop(dname)
'''Sample module showing how to read CSV files into blaze arrays''' import blaze from blaze.datadescriptor import dd_as_py #from dynd import nd # A CSV toy example csv_buf = u"""k1,v1,1,False k2,v2,2,True k3,v3,3,False """ csv_schema = "{ f0: string; f1: string; f2: int16; f3: bool }" # Create a temporary CSV file in URI and open the dataset dname = 'csv:///tmp/test.csv' store = blaze.Storage(dname) print "store:", store with file(store.path, "wb") as f: f.write(csv_buf) arr = blaze.open(store, csv_schema) #print('Blaze array:', arr) # XXX This does not work yet #print('Blaze array:', nd.array(arr)) # XXX idem # Convert the data to a native Python object print('Blaze array:', dd_as_py(arr._data)) # Remove the temporary CSV file blaze.drop(store)
# advance sources tpl = (i,) cf_args = [_mk_array_c_ref(t, er.read_single(tpl), sh) for t, er, sh in args ] with dst.buffered_ptr() as dst_ptr: cf_args.append(_mk_array_c_ref(res_ctype, dst_ptr, res_shape[1:])) cf(*cf_args) return blaze.Array(res_dd) res = execute_datadescriptor_ooc_2(d._data, 'foo.blz') banner("result (ooc_2)") print(res) blaze.drop(blaze.Persist('foo.blz')) del(res) res = execute_datadescriptor_ooc(d._data, 'bar.blz') banner("result (ooc)") print(res) blaze.drop(blaze.Persist('bar.blz')) del(res) banner("Executor iterating") ex = BlazeExecutor(d._data, 1) shape, dtype = to_numpy(d._data.dshape) res_dd = BLZDataDescriptor(blz.zeros((0,)+shape[1:], dtype=dtype, rootdir='baz.blz'))
def test_drop(mongo): drop(mongo.tmp_collection) assert mongo.tmp_collection.count() == 0
val = random.uniform(-math.pi, math.pi) factor = math.sqrt(reduce(add, [j * 10**i for i, j in enumerate(reversed(el))])) op0[el] = math.sin(val) * factor op1[el] = math.cos(val) * factor logging.info("initialization took %f seconds", (time.time()-t)) expr = op0*op0 + op1*op1 eval_in_mem(expr, 0, dump_result=True) eval_in_mem(expr, 0, dump_result=True) eval_in_mem(expr, iter_dims=1, dump_result=True) eval_in_mem(expr, iter_dims=1, chunk=3, dump_result=True) eval_in_mem(expr, iter_dims=1, chunk=4, dump_result=True) eval_in_mem(expr, iter_dims=1, chunk=5, dump_result=True) stor = blaze.Storage('blz://persisted.blz') t = time.time() result = blaze.eval(expr, storage=stor) logging.info("evaluation blz took %f seconds", time.time()-t) logging.debug(str(result)) blaze.drop(stor) t = time.time() result = blaze.eval(expr) logging.info("evaluation hierarchical %f seconds", time.time()-t) logging.debug(str(result))
# advance sources tpl = (i,) cf_args = [_mk_array_c_ref(t, er.read_single(tpl), sh) for t, er, sh in args ] with dst.buffered_ptr() as dst_ptr: cf_args.append(_mk_array_c_ref(res_ctype, dst_ptr, res_shape[1:])) cf(*cf_args) return blaze.Array(res_dd) res = execute_datadescriptor_ooc_2(d._data, 'foo.blz') banner("result (ooc_2)") print(res) blaze.drop(blaze.Storage('foo.blz')) del(res) res = execute_datadescriptor_ooc(d._data, 'bar.blz') banner("result (ooc)") print(res) blaze.drop(blaze.Storage('bar.blz')) del(res) banner("Executor iterating") ex = BlazeExecutor(d._data, 1) shape, dtype = to_numpy(d._data.dshape) res_dd = BLZDataDescriptor(blz.zeros((0,)+shape[1:], dtype=dtype, rootdir='baz.blz'))
f.create_array(f.root.g, 'a2', a2) print("Created HDF5 file with the next contents:\n%s" % str(f)) print_section('opening and handling datasets in hdf5 files') # Open an homogeneous dataset there store = blaze.Storage(fname, format='hdf5') a = blaze.open(store, datapath="/a1") # Print it print("/a1 contents:", a) # Print the datashape print("datashape for /a1:", a.dshape) # Open another homogeneous dataset there store = blaze.Storage(fname, format='hdf5') a = blaze.open(store, datapath="/g/a2") # Print it print("/g/a2 contents:", a) # Print the datashape print("datashape for /g/a2:", a.dshape) # Now, get an heterogeneous dataset store = blaze.Storage(fname, format='hdf5') t = blaze.open(store, datapath="/t1") # Print it print("/t1 contents:", t) # Print the datashape print("datashape for /t1:", t.dshape) # Finally, get rid of the sample file blaze.drop(store)