def _makeFile(self, id='test', title='', file='', chunks=()): from OFS.Image import File from OFS.Image import Pdata fileobj = File(id, title, file) jar = fileobj._p_jar = DummyConnection() if chunks: chunks = list(chunks) chunks.reverse() head = None for chunk in chunks: # build Pdata chain pdata = Pdata(chunk) pdata.next = head head = pdata fileobj.data = head return fileobj
def _read_data(self, persistent_object, value): """Copied from OFS.Image._read_data with some modernisation. Returns always a Pdata and its size - persistent_object: Object known by storage to access it. - value: value to wrapp into Pdata """ n = self._max_len if isinstance(value, (str, unicode)): if isinstance(value, unicode): value = value.encode('utf-8') size=len(value) if size < n: return Pdata(value), size # Big string: cut it into smaller chunks value = StringIO(value) if isinstance(value, FileUpload) and not value: raise ValueError, 'File not specified' if isinstance(value, Pdata): size = self._read_size_from_pdata(value) return value, size # Clear md5sum to force refreshing self.md5sum = None seek=value.seek read=value.read seek(0,2) size=end=value.tell() if size <= 2*n: seek(0) return Pdata(read(size)), size # Make sure we have an _p_jar, even if we are a new object, by # doing a sub-transaction commit. transaction.savepoint(optimistic=True) if persistent_object._p_jar is None: # Ugh seek(0) return Pdata(read(size)), size # Now we're going to build a linked list from back # to front to minimize the number of database updates # and to allow us to get things out of memory as soon as # possible. next = None while end > 0: pos = end-n if pos < n: pos = 0 # we always want at least n bytes seek(pos) # Create the object and assign it a next pointer # in the same transaction, so that there is only # a single database update for it. data = Pdata(read(end-pos)) persistent_object._p_jar.add(data) data.next = next # Save the object so that we can release its memory. transaction.savepoint(optimistic=True) data._p_deactivate() # The object should be assigned an oid and be a ghost. assert data._p_oid is not None assert data._p_state == -1 next = data end = pos return next, size
def _read_data(self, persistent_object, value): """Copied from OFS.Image._read_data with some modernisation. Returns always a Pdata and its size - persistent_object: Object known by storage to access it. - value: value to wrapp into Pdata """ n = self._max_len if isinstance(value, (str, unicode)): if isinstance(value, unicode): value = value.encode('utf-8') size = len(value) if size < n: return Pdata(value), size # Big string: cut it into smaller chunks value = StringIO(value) if isinstance(value, FileUpload) and not value: raise ValueError('File not specified') if isinstance(value, Pdata): size = self._read_size_from_pdata(value) return value, size # Clear md5sum to force refreshing self.md5sum = None seek = value.seek read = value.read seek(0, 2) size = end = value.tell() if size <= 2 * n: seek(0) return Pdata(read(size)), size # Make sure we have an _p_jar, even if we are a new object, by # doing a sub-transaction commit. transaction.savepoint(optimistic=True) if persistent_object._p_jar is None: # Ugh seek(0) return Pdata(read(size)), size # Now we're going to build a linked list from back # to front to minimize the number of database updates # and to allow us to get things out of memory as soon as # possible. next_ = None while end > 0: pos = end - n if pos < n: pos = 0 # we always want at least n bytes seek(pos) # Create the object and assign it a next pointer # in the same transaction, so that there is only # a single database update for it. data = Pdata(read(end - pos)) persistent_object._p_jar.add(data) data.next = next_ # Save the object so that we can release its memory. transaction.savepoint(optimistic=True) data._p_deactivate() # The object should be assigned an oid and be a ghost. assert data._p_oid is not None assert data._p_state == -1 next_ = data end = pos return next_, size
def _read_data(self, file): import transaction n=1 << 16 # Make sure we have an _p_jar, even if we are a new object, by # doing a sub-transaction commit. transaction.savepoint(optimistic=True) if isinstance(file, str): size=len(file) if size<n or IBlobStorage.providedBy(self._p_jar.db().storage): #for blobs we don't have to cut anything up or if the size<n return file,size # Big string: cut it into smaller chunks file = StringIO(file) if isinstance(file, FileUpload) and not file: raise ValueError, 'File not specified' if hasattr(file, '__class__') and file.__class__ is Pdata: size=len(file) return file, size seek=file.seek read=file.read seek(0,2) size=end=file.tell() if IBlobStorage.providedBy(self._p_jar.db().storage): seek(0) return read(size), size if size <= 2*n: seek(0) if size < n: return read(size), size return Pdata(read(size)), size if self._p_jar is None: # Ugh seek(0) return Pdata(read(size)), size # Now we're going to build a linked list from back # to front to minimize the number of database updates # and to allow us to get things out of memory as soon as # possible. next = None while end > 0: pos = end-n if pos < n: pos = 0 # we always want at least n bytes seek(pos) # Create the object and assign it a next pointer # in the same transaction, so that there is only # a single database update for it. data = Pdata(read(end-pos)) self._p_jar.add(data) data.next = next # Save the object so that we can release its memory. transaction.savepoint(optimistic=True) data._p_deactivate() # The object should be assigned an oid and be a ghost. assert data._p_oid is not None assert data._p_state == -1 next = data end = pos return next, size