def test_read_write_int4(self): s = BytesIO() for x in (0, 1, 2**30): s.seek(0) write_int4(s, x) s.seek(0) assert x == read_int4(s)
def a(self): s = BytesIO() for sample in ([], [0], [2, 1], range(7)): int_array = IntArray(file=s, number_of_ints=10, maximum_int=10) for j, x in enumerate(sample): int_array[j] = x non_blanks = set(int_array) non_blanks.discard(int_array.get_blank_value()) assert set(sample) == non_blanks, (list(int_array), sample) assert raises(IndexError, int_array.__getitem__, 10) int_array2 = IntArray(file=BytesIO(s.getvalue())) int_array3 = IntArray(number_of_ints=10, maximum_int=300) for x in range(10): assert int_array3.get(x) == None assert int_array3[1] == int_array3.get_blank_value() int_array3[1] = 42 assert int_array3.get(1)== 42 assert len(int_array3) == 10 raises(ValueError, int_array3.__setitem__, 2, 100000) int_array4 = IntArray(number_of_ints=10) assert int_array4.get(1, default=42) == 42 assert int_array4.get(100, default=42) == 42 assert list(iteritems(int_array4)) == [] int_array4[3] = 4 int_array4[8] = 9 assert list(iteritems(int_array4)) == [(3, 4), (8, 9)]
def read_write_int8(self): s = BytesIO() for x in (0, 1, 2**60): s.seek(0) write_int8(s, x) s.seek(0) assert x == read_int8(s)
def b(self): s = BytesIO() for x in ('', 'a', 'ab', 'a' * 1000): x = as_bytes(x) s.seek(0) write(s, x) s.seek(0) assert x == read(s, len(x))
def d(self): s = BytesIO() for x in ("", "a", "ab", "a" * 1000): x = as_bytes(x) s.seek(0) write_int4_str(s, x) s.seek(0) assert x == read_int4_str(s)
def b(self): s = BytesIO() for x in ("", "a", "ab", "a" * 1000): x = as_bytes(x) s.seek(0) write(s, x) s.seek(0) assert x == read(s, len(x))
def __init__(self, connection): self.sio = BytesIO() self.pickler = Pickler(self.sio, PICKLE_PROTOCOL) self.pickler.persistent_id = method(call_if_persistent, self._persistent_id) self.objects_found = [] self.refs = set() # populated by _persistent_id() self.connection = connection
def d(self): s = BytesIO() for x in ('', 'a', 'ab', 'a' * 1000): x = as_bytes(x) s.seek(0) write_int4_str(s, x) s.seek(0) assert x == read_int4_str(s)
def e(self): s = BytesIO() durus.utils.TRACE = True for x in ('', 'a', 'ab', 'a' * 1000): x = as_bytes(x) s.seek(0) write_int8_str(s, x) s.seek(0) assert x == read_int8_str(s) durus.utils.TRACE = False
def b(self): n = 1000 s = BytesIO() word_array = WordArray(file=s, bytes_per_word=8, number_of_words=n) for x in xrange(n): word_array[x] = int8_to_str(x) assert word_array[-1] == int8_to_str(n - 1) for x in xrange(n): assert x == str_to_int8(word_array[x]) word_array[x] = int8_to_str(2*x) assert x == str_to_int8(word_array[x]) / 2 assert len(word_array) == n assert raises(IndexError, word_array.__getitem__, n + 1) s.seek(0) word_array2 = WordArray(file=s) word_array2[-1] = as_bytes('mmmmmmmm') assert word_array2[-1] == as_bytes('mmmmmmmm')
def a(self): s = BytesIO() b = ByteArray(size=10000, file=s) assert list(b) == [as_bytes('\x00') for j in xrange(10000)], list(b) for j in xrange(10000): assert as_bytes('\x00') == b[j] for j in xrange(10000): b[j] = as_bytes('!') for j in xrange(10000): assert as_bytes('!') == b[j] assert b[0:3] == as_bytes('!!!') assert b[47:50] == as_bytes('!!!'), repr(b[47:50]) s = BytesIO() b2 = ByteArray(file=s) b2.set_size(10000, init_byte=as_bytes('\xff')) for j in xrange(10000): assert as_bytes('\xff') == b2[j], (j, b2[j]) s.seek(0) raises(AssertionError, ByteArray, size=20000, file=s)
class FakeSocket (object): def __init__(self, *args): self.io = BytesIO(join_bytes(as_bytes(a) for a in args)) def recv(self, n): sys.stdout.write('recv %s\n' % n) return self.io.read(n) def sendall(self, s): sys.stdout.write('sendall %r\n' % s) def write(self, s): sys.stdout.write('write %r\n' % s)
def __init__(self, *args): self.io = BytesIO(join_bytes(as_bytes(a) for a in args))
def d(self): s = BytesIO(as_bytes('nope')) assert raises(AssertionError, Shelf, s, readonly=True) s = BytesIO(as_bytes("SHELF-1\nbogus")) assert raises(ShortRead, Shelf, s, readonly=True)
class ObjectWriter(object): """ Serializes objects for storage in the database. The client is responsible for calling the close() method to avoid leaking memory. The ObjectWriter uses a Pickler internally, and Pickler objects do not participate in garbage collection. """ def __init__(self, connection): self.sio = BytesIO() self.pickler = Pickler(self.sio, PICKLE_PROTOCOL) self.pickler.persistent_id = method(call_if_persistent, self._persistent_id) self.objects_found = [] self.refs = set() # populated by _persistent_id() self.connection = connection def close(self): # see ObjectWriter.__doc__ # Explicitly break cycle involving pickler self.pickler.persistent_id = int self.pickler = None def _persistent_id(self, obj): """(PersistentBase) -> (oid:str, klass:type) This is called on PersistentBase instances during pickling. """ if obj._p_oid is None: obj._p_oid = self.connection.new_oid() obj._p_connection = self.connection self.objects_found.append(obj) elif obj._p_connection is not self.connection: raise ValueError("Reference to %r has a different connection." % obj) self.refs.add(obj._p_oid) return obj._p_oid, type(obj) def gen_new_objects(self, obj): def once(obj): raise RuntimeError('gen_new_objects() already called.') self.gen_new_objects = once yield obj # The modified object is also a "new" object. for obj in self.objects_found: yield obj def get_state(self, obj): self.sio.seek(0) # recycle BytesIO instance self.sio.truncate() self.pickler.clear_memo() self.pickler.dump(type(obj)) self.refs.clear() position = self.sio.tell() self.pickler.dump(obj.__getstate__()) uncompressed = self.sio.getvalue() pickled_type = uncompressed[:position] pickled_state = uncompressed[position:] if WRITE_COMPRESSED_STATE_PICKLES: state = compress(pickled_state) else: state = pickled_state data = pickled_type + state self.refs.discard(obj._p_oid) return data, join_bytes(self.refs)
def c(self): s = BytesIO(as_bytes('asdfasdfadsf')) s.seek(0) assert raises(ShortRead, WordArray, file=s)
class ObjectWriter (object): """ Serializes objects for storage in the database. The client is responsible for calling the close() method to avoid leaking memory. The ObjectWriter uses a Pickler internally, and Pickler objects do not participate in garbage collection. """ def __init__(self, connection): self.sio = BytesIO() self.pickler = Pickler(self.sio, PICKLE_PROTOCOL) self.pickler.persistent_id = method( call_if_persistent, self._persistent_id) self.objects_found = [] self.refs = set() # populated by _persistent_id() self.connection = connection def close(self): # see ObjectWriter.__doc__ # Explicitly break cycle involving pickler self.pickler.persistent_id = int self.pickler = None def _persistent_id(self, obj): """(PersistentBase) -> (oid:str, klass:type) This is called on PersistentBase instances during pickling. """ if obj._p_oid is None: obj._p_oid = self.connection.new_oid() obj._p_connection = self.connection self.objects_found.append(obj) elif obj._p_connection is not self.connection: raise ValueError( "Reference to %r has a different connection." % obj) self.refs.add(obj._p_oid) return obj._p_oid, type(obj) def gen_new_objects(self, obj): def once(obj): raise RuntimeError('gen_new_objects() already called.') self.gen_new_objects = once yield obj # The modified object is also a "new" object. for obj in self.objects_found: yield obj def get_state(self, obj): self.sio.seek(0) # recycle BytesIO instance self.sio.truncate() self.pickler.clear_memo() self.pickler.dump(type(obj)) self.refs.clear() position = self.sio.tell() self.pickler.dump(obj.__getstate__()) uncompressed = self.sio.getvalue() pickled_type = uncompressed[:position] pickled_state = uncompressed[position:] if WRITE_COMPRESSED_STATE_PICKLES: state = compress(pickled_state) else: state = pickled_state data = pickled_type + state self.refs.discard(obj._p_oid) return data, join_bytes(self.refs)
def get_state(self, data, load=True): self.load_count += 1 s = BytesIO() s.write(data) s.seek(0) unpickler = self._get_unpickler(s) klass = unpickler.load() position = s.tell() if data[s.tell()] == COMPRESSED_START_BYTE: # This is almost certainly a compressed pickle. try: decompressed = decompress(data[position:]) except zlib_error: pass # let the unpickler try anyway. else: s.write(decompressed) s.seek(position) if load: return unpickler.load() else: return s.read()