def test_out_of_order_io_requests(self): self.queue.put(IORequest(self.filename, 6, b'morestuff')) self.queue.put(IORequest(self.filename, 0, b'foobar')) self.queue.put(IOCloseRequest(self.filename)) self.queue.put(ShutdownThreadRequest()) self.io_thread.run() with open(self.filename, 'rb') as f: self.assertEqual(f.read(), b'foobarmorestuff')
def test_io_thread_moves_on_after_failed_task(self): # This first request will fail because 'unknown-file' does not exist. self.queue.put(IORequest('unknown-file', 0, b'foobar', False)) # But the IO thread should move on to these requests and not # exit it's run loop. self.queue.put(IORequest(self.filename, 0, b'foobar', False)) self.queue.put(IOCloseRequest(self.filename)) self.queue.put(ShutdownThreadRequest()) self.io_thread.run() with open(self.filename, 'rb') as f: self.assertEqual(f.read(), b'foobar')
def test_stream_requests(self): # Test that offset has no affect on the order in which requests # are written to stdout. The order of requests for a stream are # first in first out. self.queue.put(IORequest('nonexistant-file', 10, b'foobar', True)) self.queue.put(IORequest('nonexistant-file', 6, b'otherstuff', True)) # The thread should not try to close the file name because it is # writing to stdout. If it does, the thread will fail because # the file does not exist. self.queue.put(ShutdownThreadRequest()) with mock.patch('sys.stdout', new=six.StringIO()) as mock_stdout: self.io_thread.run() self.assertEqual(mock_stdout.getvalue(), 'foobarotherstuff')
def test_multiple_files_in_queue(self): second_file = os.path.join(self.temp_dir, 'bar') open(second_file, 'w').close() self.queue.put(IORequest(self.filename, 0, b'foobar')) self.queue.put(IORequest(second_file, 0, b'otherstuff')) self.queue.put(IOCloseRequest(second_file)) self.queue.put(IOCloseRequest(self.filename)) self.queue.put(ShutdownThreadRequest()) self.io_thread.run() with open(self.filename, 'rb') as f: self.assertEqual(f.read(), b'foobar') with open(second_file, 'rb') as f: self.assertEqual(f.read(), b'otherstuff')
def test_handles_io_request(self): self.queue.put(IORequest(self.filename, 0, b'foobar')) self.queue.put(IOCloseRequest(self.filename)) self.queue.put(ShutdownThreadRequest()) self.io_thread.run() with open(self.filename, 'rb') as f: self.assertEqual(f.read(), b'foobar')
def test_handles_io_request(self): self.queue.put(IORequest(self.filename, 0, b'foobar')) self.queue.put(IOCloseRequest(self.filename)) self.queue.put(QUEUE_END_SENTINEL) self.io_thread.run() with open(self.filename, 'rb') as f: self.assertEqual(f.read(), b'foobar')
def test_mtime_set_at_file_close_time(self): # We're picking something other than the close time so that can verify # that the IOCloseRequest can specify what the mtime should be. now_time = int(time.time() - 100) self.queue.put(IORequest(self.filename, 0, b'foobar', False)) self.queue.put(IOCloseRequest(self.filename, now_time)) self.queue.put(ShutdownThreadRequest()) self.io_thread.run() actual_mtime = int(os.stat(self.filename).st_mtime) self.assertEqual(actual_mtime, now_time)
def _queue_writes_in_chunks(self, body, iterate_chunk_size): amount_read = 0 current = body.read(iterate_chunk_size) while current: offset = self._part_number * self._chunk_size + amount_read LOGGER.debug("Submitting IORequest to write queue.") self._io_queue.put( IORequest(self._filename.dest, offset, current, self._filename.is_stream)) LOGGER.debug("Request successfully submitted.") amount_read += len(current) current = body.read(iterate_chunk_size) # Change log message. LOGGER.debug("Done queueing writes for part number %s to file: %s", self._part_number, self._filename.dest)
def _queue_writes_for_stream(self, body): # We have to handle an output stream differently. The main reason is # that we cannot seek() in the output stream. This means that we need # to queue the writes in order. If we queue IO writes in smaller than # part size chunks, on the case of a retry we'll need to do a range GET # for only the remaining parts. The other alternative, which is what # we do here, is to just request the entire chunk size write. self._context.wait_for_turn(self._part_number) chunk = body.read() offset = self._part_number * self._chunk_size LOGGER.debug("Submitting IORequest to write queue.") self._io_queue.put( IORequest(self._filename.dest, offset, chunk, self._filename.is_stream)) self._context.done_with_turn()
def _queue_writes(self, body): self._context.wait_for_file_created() LOGGER.debug("Writing part number %s to file: %s", self._part_number, self._filename.dest) iterate_chunk_size = self.ITERATE_CHUNK_SIZE body.set_socket_timeout(self.READ_TIMEOUT) amount_read = 0 current = body.read(iterate_chunk_size) while current: offset = self._part_number * self._chunk_size + amount_read self._io_queue.put(IORequest(self._filename.dest, offset, current)) amount_read += len(current) current = body.read(iterate_chunk_size) # Change log message. LOGGER.debug("Done queueing writes for part number %s to file: %s", self._part_number, self._filename.dest)
def _queue_writes(self, body): self._context.wait_for_file_created() LOGGER.debug("Writing part number %s to file: %s", self._part_number, self._filename.dest) iterate_chunk_size = self.ITERATE_CHUNK_SIZE body.set_socket_timeout(self.READ_TIMEOUT) amount_read = 0 current = body.read(iterate_chunk_size) if self._filename.is_stream: self._context.wait_for_turn(self._part_number) while current: offset = self._part_number * self._chunk_size + amount_read LOGGER.debug("Submitting IORequest to write queue.") self._io_queue.put( IORequest(self._filename.dest, offset, current, self._filename.is_stream)) LOGGER.debug("Request successfully submitted.") amount_read += len(current) current = body.read(iterate_chunk_size) # Change log message. LOGGER.debug("Done queueing writes for part number %s to file: %s", self._part_number, self._filename.dest) if self._filename.is_stream: self._context.done_with_turn()
def __call__(self): for i in range(50): executor.write_queue.put(IORequest(f.name, 0, b'foobar'))