def test_shuffled_randomblocks(self): ''' Save RUN_SIZE random blocks, close, retrieve in random order. ''' # save random blocks to a file blocks = {} with open(self.pathname, 'wb') as f: for n in range(RUN_SIZE): with self.subTest(put_block_n=n): data = make_randblock(rand0(MAX_BLOCK_SIZE + 1)) dr = DataRecord(data) offset = f.tell() blocks[offset] = data f.write(bytes(dr)) # shuffle the block offsets offsets = list(blocks.keys()) random.shuffle(offsets) # retrieve the blocks in random order, check for correct content with open(self.pathname, 'rb') as f: for n, offset in enumerate(offsets): with self.subTest(shuffled_offsets_n=n, offset=offset): f.seek(offset) bfr = CornuCopyBuffer.from_file(f) dr = DataRecord.parse(bfr) data = dr.data self.assertTrue(data == blocks[offset])
def upload_file( self, f, *, bucket_name: str, path: str, file_info=None, content_type=None, upload_progress=None, ): ''' Upload the data from the file `f` to `path` within `bucket_name`. Return a `dict` containing the upload result. The default implementation calls `self.upload_buffer()`. Parameters: * `f`: the file * `bucket_name`: the bucket name * `path`: the subpath within the bucket * `file_info`: an optional mapping of extra information about the file * `content_type`: an optional MIME content type value * `upload_progress`: an optional `cs.progress.Progress` instance to which to report upload data ''' return self.upload_buffer( CornuCopyBuffer.from_file(f), bucket_name=bucket_name, path=path, file_info=file_info, content_type=content_type, upload_progress=upload_progress, )
def upload_file( self, f, *, bucket_name: str, path: str, file_info=None, content_type=None, upload_progress=None, ): ''' Upload the data from the file `f` to `path` within `bucket_name`. Return a `dict` containing the B2 `FileVersion` attribute values. Note that the b2api expects to be able to seek when given a file so this tries to `mmap.mmap` the file and use the bytes upload interface, falling back to coping to a scratch file. Parameters: * `f`: the file, preferably seekable * `bucket_name`: the bucket name * `path`: the subpath within the bucket * `file_info`: an optional mapping of extra information about the file * `content_type`: an optional MIME content type value * `upload_progress`: an optional `cs.progress.Progress` instance to which to report upload data ''' try: fd = f.fileno() mm = mmap(fd, 0, prot=PROT_READ) except (AttributeError, OSError) as e: # no .fileno, not mmapable warning("f=%s: %s", f, e) # upload via a scratch file bfr = f if isinstance( f, CornuCopyBuffer) else CornuCopyBuffer.from_file(f) return self.upload_buffer( bfr, bucket_name=bucket_name, path=path, file_info=file_info, content_type=content_type, upload_progress=upload_progress, ) else: file_version = self._b2_upload_bytes( mm, bucket_name=bucket_name, path=path, upload_progress=upload_progress, ) return file_version.as_dict()
def __init__(self, recv, send, request_handler=None, name=None, packet_grace=None, tick=None): ''' Initialise the PacketConnection. Parameters: * `recv`: inbound binary stream. If this is an `int` it is taken to be an OS file descriptor, otherwise it should be a `cs.buffer.CornuCopyBuffer` or a file like object with a `read1` or `read` method. * `send`: outbound binary stream. If this is an `int` it is taken to be an OS file descriptor, otherwise it should be a file like object with `.write(bytes)` and `.flush()` methods. For a file descriptor sending is done via an os.dup() of the supplied descriptor, so the caller remains responsible for closing the original descriptor. * `packet_grace`: default pause in the packet sending worker to allow another packet to be queued before flushing the output stream. Default: `DEFAULT_PACKET_GRACE`s. A value of `0` will flush immediately if the queue is empty. * `request_handler`: an optional callable accepting (`rq_type`, `flags`, `payload`). The request_handler may return one of 5 values on success: * `None`: response will be 0 flags and an empty payload. * `int`: flags only. Response will be the flags and an empty payload. * `bytes`: payload only. Response will be 0 flags and the payload. * `str`: payload only. Response will be 0 flags and the str encoded as bytes using UTF-8. * `(int, bytes)`: Specify flags and payload for response. An unsuccessful request should raise an exception, which will cause a failure response packet. * `tick`: optional tick parameter, default `None`. If `None`, do nothing. If a Boolean, call `tick_fd_2` if true, otherwise do nothing. Otherwise `tick` should be a callable accepting a byteslike value. ''' if name is None: name = str(seq()) self.name = name if isinstance(recv, int): self._recv = CornuCopyBuffer.from_fd(recv) elif isinstance(recv, CornuCopyBuffer): self._recv = recv else: self._recv = CornuCopyBuffer.from_file(recv) if isinstance(send, int): self._send = os.fdopen(os.dup(send), 'wb') else: self._send = send if packet_grace is None: packet_grace = DEFAULT_PACKET_GRACE if tick is None: tick = lambda bs: None elif isinstance(tick, bool): if tick: tick = tick_fd_2 else: tick = lambda bs: None self.packet_grace = packet_grace self.request_handler = request_handler self.tick = tick # tags of requests in play against the local system self._channel_request_tags = {0: set()} self.notify_recv_eof = set() self.notify_send_eof = set() # LateFunctions for the requests we are performing for the remote system self._running = set() # requests we have outstanding against the remote system self._pending = {0: {}} # sequence of tag numbers # TODO: later, reuse old tags to prevent monotonic growth of tag field self._tag_seq = Seq(1) # work queue for local requests self._later = Later(4, name="%s:Later" % (self, )) self._later.open() # dispatch queue of Packets to send self._sendQ = IterableQueue(16) self._lock = Lock() self.closed = False # debugging: check for reuse of (channel,tag) etc self.__sent = set() self.__send_queued = set() # dispatch Thread to process received packets self._recv_thread = bg_thread(self._receive_loop, name="%s[_receive_loop]" % (self.name, )) # dispatch Thread to send data # primary purpose is to bundle output by deferring flushes self._send_thread = bg_thread(self._send_loop, name="%s[_send]" % (self.name, ))