def readline(self, size=-1): """Read one line or up to `size` bytes from the file. :Parameters: - `size` (optional): the maximum number of bytes to read """ if size == 0: return b'' remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder received = 0 data = StringIO() while received < size: chunk_data = self.readchunk() pos = chunk_data.find(NEWLN, 0, size) if pos != -1: size = received + pos + 1 received += len(chunk_data) data.write(chunk_data) if pos != -1: break self.__position -= received - size # Return 'size' bytes and store the rest. data.seek(size) self.__buffer = data.read() data.seek(0) return data.read(size)
def write(self, data): """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as :attr:`encoding` before being written. Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of :class:`str` (:class:`bytes` in python 3), a file-like object, or an instance of :class:`unicode` (:class:`str` in python 3). Unicode data is only allowed if the file has an :attr:`encoding` attribute. :Parameters: - `data`: string of bytes or file-like object to be written to the file """ if self._closed: raise ValueError("cannot write to a closed file") try: # file-like read = data.read except AttributeError: # string if not isinstance(data, (text_type, bytes)): raise TypeError("can only write strings or file-like objects") if isinstance(data, text_type): try: data = data.encode(self.encoding) except AttributeError: raise TypeError("must specify an encoding for file in " "order to write %s" % (text_type.__name__,)) read = StringIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() if space: try: to_write = read(space) except: self.abort() raise self._buffer.write(to_write) if len(to_write) < space: return # EOF or incomplete self.__flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) == self.chunk_size: self.__flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write)
def read(self, size=-1): """Read at most `size` bytes from the file (less if there isn't enough data). The bytes are returned as an instance of :class:`str` (:class:`bytes` in python 3). If `size` is negative or omitted all data is read. :Parameters: - `size` (optional): the number of bytes to read """ self._ensure_file() if size == 0: return EMPTY remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder received = 0 data = StringIO() while received < size: chunk_data = self.readchunk() received += len(chunk_data) data.write(chunk_data) # Detect extra chunks. max_chunk_n = math.ceil(self.length / float(self.chunk_size)) chunk = self.__chunks.find_one({"files_id": self._id, "n": {"$gte": max_chunk_n}}) # According to spec, ignore extra chunks if they are empty. if chunk is not None and len(chunk['data']): raise CorruptGridFile( "Extra chunk found: expected %i chunks but found " "chunk with n=%i" % (max_chunk_n, chunk['n'])) self.__position -= received - size # Return 'size' bytes and store the rest. data.seek(size) self.__buffer = data.read() data.seek(0) return data.read(size)
class GridIn(object): """Class to write data to GridFS. """ def __init__(self, root_collection, **kwargs): """Write a file to GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. Raises :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. Any of the file level options specified in the `GridFS Spec <http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as keyword arguments. Any additional keyword arguments will be set as additional fields on the file document. Valid keyword arguments include: - ``"_id"``: unique ID for this file (default: :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must not have already been used for another file - ``"filename"``: human name for the file - ``"contentType"`` or ``"content_type"``: valid mime-type for the file - ``"chunkSize"`` or ``"chunk_size"``: size of each of the chunks, in bytes (default: 255 kb) - ``"encoding"``: encoding used for this file. In Python 2, any :class:`unicode` that is written to the file will be converted to a :class:`str`. In Python 3, any :class:`str` that is written to the file will be converted to :class:`bytes`. :Parameters: - `root_collection`: root collection to write to - `**kwargs` (optional): file level options (see above) .. versionchanged:: 3.0 `root_collection` must use an acknowledged :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): raise TypeError("root_collection must be an " "instance of Collection") # With w=0, 'filemd5' might run before the final chunks are written. if not root_collection.write_concern.acknowledged: raise ConfigurationError('root_collection must use ' 'acknowledged write_concern') # Handle alternative naming if "content_type" in kwargs: kwargs["contentType"] = kwargs.pop("content_type") if "chunk_size" in kwargs: kwargs["chunkSize"] = kwargs.pop("chunk_size") coll = root_collection.with_options( read_preference=ReadPreference.PRIMARY) kwargs['md5'] = md5() # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) object.__setattr__(self, "_coll", coll) object.__setattr__(self, "_chunks", coll.chunks) object.__setattr__(self, "_file", kwargs) object.__setattr__(self, "_buffer", StringIO()) object.__setattr__(self, "_position", 0) object.__setattr__(self, "_chunk_number", 0) object.__setattr__(self, "_closed", False) object.__setattr__(self, "_ensured_index", False) def __create_index(self, collection, index_key, unique): doc = collection.find_one(projection={"_id": 1}) if doc is None: try: index_keys =[index_spec['key'] for index_spec in collection.list_indexes()] except OperationFailure: index_keys = [] if index_key not in index_keys: collection.create_index(index_key.items(), unique=unique) def __ensure_indexes(self): if not object.__getattribute__(self, "_ensured_index"): self.__create_index(self._coll.files, _F_INDEX, False) self.__create_index(self._coll.chunks, _C_INDEX, True) object.__setattr__(self, "_ensured_index", True) def abort(self): """Remove all chunks/files that may have been uploaded and close. """ self._coll.chunks.delete_many({"files_id": self._file['_id']}) self._coll.files.delete_one({"_id": self._file['_id']}) object.__setattr__(self, "_closed", True) @property def closed(self): """Is this file closed? """ return self._closed _id = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) filename = _grid_in_property("filename", "Name of this file.") name = _grid_in_property("filename", "Alias for `filename`.") content_type = _grid_in_property("contentType", "Mime-type for this file.") length = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) upload_date = _grid_in_property("uploadDate", "Date that this file was uploaded.", closed_only=True) md5 = _grid_in_property("md5", "MD5 of the contents of this file " "(generated on the server).", closed_only=True) def __getattr__(self, name): if name in self._file: return self._file[name] raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name, value): # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: object.__setattr__(self, name, value) else: # All other attributes are part of the document in db.fs.files. # Store them to be sent to server on close() or if closed, send # them now. self._file[name] = value if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) def __flush_data(self, data): """Flush `data` to a chunk. """ # Ensure the index, even if there's nothing to write, so # the filemd5 command always succeeds. self.__ensure_indexes() self._file['md5'].update(data) if not data: return assert(len(data) <= self.chunk_size) chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} try: self._chunks.insert_one(chunk) except DuplicateKeyError: self._raise_file_exists(self._file['_id']) self._chunk_number += 1 self._position += len(data) def __flush_buffer(self): """Flush the buffer contents out to a chunk. """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = StringIO() def __flush(self): """Flush the file to the database. """ try: self.__flush_buffer() self._file['md5'] = self._file["md5"].hexdigest() self._file["length"] = self._position self._file["uploadDate"] = datetime.datetime.utcnow() return self._coll.files.insert_one(self._file) except DuplicateKeyError: self._raise_file_exists(self._id) def _raise_file_exists(self, file_id): """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) def close(self): """Flush the file and close it. A closed file cannot be written any more. Calling :meth:`close` more than once is allowed. """ if not self._closed: self.__flush() object.__setattr__(self, "_closed", True) def write(self, data): """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as :attr:`encoding` before being written. Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of :class:`str` (:class:`bytes` in python 3), a file-like object, or an instance of :class:`unicode` (:class:`str` in python 3). Unicode data is only allowed if the file has an :attr:`encoding` attribute. :Parameters: - `data`: string of bytes or file-like object to be written to the file """ if self._closed: raise ValueError("cannot write to a closed file") try: # file-like read = data.read except AttributeError: # string if not isinstance(data, (text_type, bytes)): raise TypeError("can only write strings or file-like objects") if isinstance(data, text_type): try: data = data.encode(self.encoding) except AttributeError: raise TypeError("must specify an encoding for file in " "order to write %s" % (text_type.__name__,)) read = StringIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() if space: try: to_write = read(space) except: self.abort() raise self._buffer.write(to_write) if len(to_write) < space: return # EOF or incomplete self.__flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) == self.chunk_size: self.__flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write) def writelines(self, sequence): """Write a sequence of strings to the file. Does not add seperators. """ for line in sequence: self.write(line) def __enter__(self): """Support for the context manager protocol. """ return self def __exit__(self, exc_type, exc_val, exc_tb): """Support for the context manager protocol. Close the file and allow exceptions to propagate. """ self.close() # propagate exceptions return False
def __flush_buffer(self): """Flush the buffer contents out to a chunk. """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = StringIO()
def __init__(self, root_collection, **kwargs): """Write a file to GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. Raises :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. Any of the file level options specified in the `GridFS Spec <http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as keyword arguments. Any additional keyword arguments will be set as additional fields on the file document. Valid keyword arguments include: - ``"_id"``: unique ID for this file (default: :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must not have already been used for another file - ``"filename"``: human name for the file - ``"contentType"`` or ``"content_type"``: valid mime-type for the file - ``"chunkSize"`` or ``"chunk_size"``: size of each of the chunks, in bytes (default: 255 kb) - ``"encoding"``: encoding used for this file. In Python 2, any :class:`unicode` that is written to the file will be converted to a :class:`str`. In Python 3, any :class:`str` that is written to the file will be converted to :class:`bytes`. :Parameters: - `root_collection`: root collection to write to - `**kwargs` (optional): file level options (see above) .. versionchanged:: 3.0 `root_collection` must use an acknowledged :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): raise TypeError("root_collection must be an " "instance of Collection") # With w=0, 'filemd5' might run before the final chunks are written. if not root_collection.write_concern.acknowledged: raise ConfigurationError('root_collection must use ' 'acknowledged write_concern') # Handle alternative naming if "content_type" in kwargs: kwargs["contentType"] = kwargs.pop("content_type") if "chunk_size" in kwargs: kwargs["chunkSize"] = kwargs.pop("chunk_size") coll = root_collection.with_options( read_preference=ReadPreference.PRIMARY) kwargs['md5'] = md5() # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) object.__setattr__(self, "_coll", coll) object.__setattr__(self, "_chunks", coll.chunks) object.__setattr__(self, "_file", kwargs) object.__setattr__(self, "_buffer", StringIO()) object.__setattr__(self, "_position", 0) object.__setattr__(self, "_chunk_number", 0) object.__setattr__(self, "_closed", False) object.__setattr__(self, "_ensured_index", False)
def _do_batched_write_command(namespace, operation, command, docs, check_keys, opts, ctx): """Execute a batch of insert, update, or delete commands. """ max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size # Max BSON object size + 16k - 2 bytes for ending NUL bytes. # Server guarantees there is enough room: SERVER-10643. max_cmd_size = max_bson_size + _COMMAND_OVERHEAD ordered = command.get('ordered', True) buf = StringIO() # Save space for message length and request id buf.write(_ZERO_64) # responseTo, opCode buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00") # No options buf.write(_ZERO_32) # Namespace as C string buf.write(b(namespace)) buf.write(_ZERO_8) # Skip: 0, Limit: -1 buf.write(_SKIPLIM) # Where to write command document length command_start = buf.tell() buf.write(bson.BSON.encode(command)) # Start of payload buf.seek(-1, 2) # Work around some Jython weirdness. buf.truncate() try: buf.write(_OP_MAP[operation]) except KeyError: raise InvalidOperation('Unknown command') if operation in (_UPDATE, _DELETE): check_keys = False # Where to write list document length list_start = buf.tell() - 4 to_send = [] def send_message(): """Finalize and send the current OP_QUERY message. """ # Close list and command documents buf.write(_ZERO_16) # Write document lengths and request id length = buf.tell() buf.seek(list_start) buf.write(struct.pack('<i', length - list_start - 1)) buf.seek(command_start) buf.write(struct.pack('<i', length - command_start)) buf.seek(4) request_id = _randint() buf.write(struct.pack('<i', request_id)) buf.seek(0) buf.write(struct.pack('<i', length)) return ctx.write_command(request_id, buf.getvalue(), to_send) # If there are multiple batches we'll # merge results in the caller. results = [] idx = 0 idx_offset = 0 has_docs = False for doc in docs: has_docs = True # Encode the current operation key = b(str(idx)) value = bson.BSON.encode(doc, check_keys, opts) # Send a batch? enough_data = (buf.tell() + len(key) + len(value) + 2) >= max_cmd_size enough_documents = (idx >= max_write_batch_size) if enough_data or enough_documents: if not idx: write_op = "insert" if operation == _INSERT else None _raise_document_too_large( write_op, len(value), max_bson_size) result = send_message() results.append((idx_offset, result)) if ordered and "writeErrors" in result: return results # Truncate back to the start of list elements buf.seek(list_start + 4) buf.truncate() idx_offset += idx idx = 0 key = b'0' to_send = [] buf.write(_BSONOBJ) buf.write(key) buf.write(_ZERO_8) buf.write(value) to_send.append(doc) idx += 1 if not has_docs: raise InvalidOperation("cannot do an empty bulk write") results.append((idx_offset, send_message())) return results
def _do_batched_insert(collection_name, docs, check_keys, safe, last_error_args, continue_on_error, opts, ctx): """Insert `docs` using multiple batches. """ def _insert_message(insert_message, send_safe): """Build the insert message with header and GLE. """ request_id, final_message = __pack_message(2002, insert_message) if send_safe: request_id, error_message, _ = __last_error(collection_name, last_error_args) final_message += error_message return request_id, final_message send_safe = safe or not continue_on_error last_error = None data = StringIO() data.write(struct.pack("<i", int(continue_on_error))) data.write(bson._make_c_string(collection_name)) message_length = begin_loc = data.tell() has_docs = False to_send = [] for doc in docs: encoded = bson.BSON.encode(doc, check_keys, opts) encoded_length = len(encoded) too_large = (encoded_length > ctx.max_bson_size) message_length += encoded_length if message_length < ctx.max_message_size and not too_large: data.write(encoded) to_send.append(doc) has_docs = True continue if has_docs: # We have enough data, send this message. try: request_id, msg = _insert_message(data.getvalue(), send_safe) ctx.legacy_write(request_id, msg, 0, send_safe, to_send) # Exception type could be OperationFailure or a subtype # (e.g. DuplicateKeyError) except OperationFailure as exc: # Like it says, continue on error... if continue_on_error: # Store exception details to re-raise after the final batch. last_error = exc # With unacknowledged writes just return at the first error. elif not safe: return # With acknowledged writes raise immediately. else: raise if too_large: _raise_document_too_large( "insert", encoded_length, ctx.max_bson_size) message_length = begin_loc + encoded_length data.seek(begin_loc) data.truncate() data.write(encoded) to_send = [doc] if not has_docs: raise InvalidOperation("cannot do an empty bulk insert") request_id, msg = _insert_message(data.getvalue(), safe) ctx.legacy_write(request_id, msg, 0, safe, to_send) # Re-raise any exception stored due to continue_on_error if last_error is not None: raise last_error