def test_tell_returns_file_pointer(self, parted_file): # Arrange parted_file._file_pointer = kb(2) # Act pos = parted_file._tell() # Assert assert pos == kb(2)
def test_seek_absolute_should_set_filepointer_to_offset(self, parted_file): # Arrange parted_file._file_pointer = kb(1) # Act parted_file._seek(offset=kb(0), whence=0) # Assert assert parted_file._file_pointer == kb(0)
def test_seek_relative_should_add_ofset_to_filepointer(self, parted_file): # Arrange parted_file._file_pointer = kb(1) # Act parted_file._seek(offset=kb(1), whence=1) # Assert assert parted_file._file_pointer == kb(2)
def test_read_returns_only_data_of_current_part_with_bigger_sizehint(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act chunk = parted_file._read(kb(2)) # Assert assert len(chunk) == kb(1)
def test_read_returns_data_from_current_part_and_calls_itself_for_next_part(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act read_data = parted_file._read() # Assert assert len(read_data) == kb(2)
def test_seek_goes_to_current_part_and_sets_other_parts_to_start(self, parted_file): # Arrange parted_file.parts[0].seek = Mock() parted_file.parts[1].seek = Mock() # Act parted_file._seek(offset=kb(5), whence=0) # Assert parted_file.parts[0].seek.assert_called_once_with(kb(0), 0) parted_file.parts[1].seek.assert_called_once_with(kb(1), 0)
def test_read_as_chunks_returns_none_at_end_of_file(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(4), whence=0) parted_file._read(kb(1)) # Act eof = parted_file._read(kb(1)) # Assert assert eof is None
def test_read_returns_only_data_of_current_part_with_bigger_sizehint( self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act chunk = parted_file._read(kb(2)) # Assert assert len(chunk) == kb(1)
def test_read_returns_data_from_current_part_and_calls_itself_for_next_part( self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act read_data = parted_file._read() # Assert assert len(read_data) == kb(2)
def test_seek_goes_to_current_part_and_sets_other_parts_to_start( self, parted_file): # Arrange parted_file.parts[0].seek = Mock() parted_file.parts[1].seek = Mock() # Act parted_file._seek(offset=kb(5), whence=0) # Assert parted_file.parts[0].seek.assert_called_once_with(kb(0), 0) parted_file.parts[1].seek.assert_called_once_with(kb(1), 0)
def test_open_with_existing_parts_opens_them_in_correct_order(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part2", data=urandom(kb(2))) # Act f = fs.open("backup.tar", mode="r+") # Assert created_parts = [part.name for part in f.parts] assert created_parts == ["backup.tar.part0", "backup.tar.part1", "backup.tar.part2"]
def test_sync_files_copies_file_if_it_only_exists_on_userfs(self, drive): # Arrange drive.userfs.setcontents("newfile.txt", urandom(kb(1))) drive.remotefs.setcontents("oldfile.txt", urandom(kb(1))) drive.userfs.setcontents("oldfile.txt", urandom(kb(2))) # Act drive.sync_files() # Arrange assert drive.remotefs.exists("newfile.txt") assert drive.remotefs.getsize("oldfile.txt") == kb(2)
def test_patchfile_updates_remote(self, drive): # Arrange new_data = urandom(kb(2)) old_data = urandom(kb(1)) drive.userfs.setcontents("source.txt", new_data) drive.remotefs.setcontents("source.txt", old_data) # Act drive.patchfile("source.txt") # Assert assert drive.remotefs.getcontents("source.txt") == new_data
def test_has_conflict_returns_false_if_source_is_newer(self, drive): # Arrange drive.userfs.setcontents("source.txt", urandom(kb(1))) drive.userfs.settimes("source.txt", modified_time=datetime.today()) drive.remotefs.setcontents("dest.txt", urandom(kb(2))) drive.remotefs.settimes("dest.txt", modified_time=datetime.today() - timedelta(days=1)) # Act conflict = drive.has_conflict(src="source.txt", dst="dest.txt") # Assert assert not conflict
def test_open_with_existing_parts_opens_them_in_correct_order(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part2", data=urandom(kb(2))) # Act f = fs.open("backup.tar", mode="r+") # Assert created_parts = [part.name for part in f.parts] assert created_parts == [ "backup.tar.part0", "backup.tar.part1", "backup.tar.part2" ]
def test_remove_switches_writefs_to_location_of_existing_file(self, fs): # Arrange fs.fs_lookup["fs1"].setcontents("backup.tar.part0", data=urandom(kb(4))) fs.fs_lookup["fs2"].setcontents("backup.tar.part1", data=urandom(kb(3))) # Act fs.remove("backup.tar.part0") fs.remove("backup.tar.part1") # Assert assert not fs.exists("backup.tar.part0") assert not fs.exists("backup.tar.part1")
def test_remove_deletes_all_parts(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part2", data=urandom(kb(2))) fs.wrapped_fs.remove = Mock() # Act fs.remove("backup.tar") # Assert fs.wrapped_fs.remove.assert_has_calls( [call("backup.tar.part0"), call("backup.tar.part1"), call("backup.tar.part2")], any_order=True )
def test_sync_dirs_copies_only_not_existing_dirs(self, drive): # Arrange drive.userfs.makedir("synced") drive.userfs.setcontents("synced/newfile.txt", urandom(kb(1))) drive.remotefs.makedir("notsynced") drive.userfs.makedir("notsynced") drive.userfs.setcontents("notsynced/oldfile.txt", urandom(kb(2))) # Act drive.sync_dirs() # Arrange assert drive.remotefs.exists("synced/newfile.txt") assert not drive.remotefs.exists("notsynced/oldfile.txt")
def test_remove_deletes_all_parts(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part2", data=urandom(kb(2))) fs.wrapped_fs.remove = Mock() # Act fs.remove("backup.tar") # Assert fs.wrapped_fs.remove.assert_has_calls([ call("backup.tar.part0"), call("backup.tar.part1"), call("backup.tar.part2") ], any_order=True)
def test_current_part_raises_error_when_file_pointer_is_bigger_than_parts(self, parted_file): # Arrange parted_file._mode = "r" parted_file._file_pointer = 4 * kb(4) # Act & Assert with raises(InvalidFilePointerLocation): _ = parted_file.current_part
def test_getinfo_returns_latest_times(self, fs_with_test_file): # Arrange created_max = date.today() + timedelta(days=10) accessed_max = date.today() + timedelta(days=10) modfied_max = date.today() + timedelta(days=10) def getinfo_patch(path): if path == "backup.tar.part0": return { "created_time": created_max, "modified_time": date.today(), "accessed_time": accessed_max } else: return { "created_time": date.today(), "modified_time": modfied_max, "accessed_time": date.today() } fs_with_test_file.wrapped_fs.getinfo = getinfo_patch fs_with_test_file.getsize = lambda p: kb(7) # Act info = fs_with_test_file.getinfo("backup.tar") # Assert assert info["created_time"] == created_max assert info["accessed_time"] == accessed_max assert info["modified_time"] == modfied_max
def test_getcontents_reads_file(self, fs): # Arrange data = urandom(kb(6)) fs.setcontents("backup.tar", data) # Act saved_data = fs.getcontents("backup.tar") # Assert assert saved_data == data
def test_current_part_raises_error_when_file_pointer_is_bigger_than_parts( self, parted_file): # Arrange parted_file._mode = "r" parted_file._file_pointer = 4 * kb(4) # Act & Assert with raises(InvalidFilePointerLocation): _ = parted_file.current_part
def test_read_returns_none_after_read_all(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=0, whence=0) parted_file._read() # Act eof = parted_file._read() # Assert assert eof is None
def test_close_calls_super_for_flush_and_closes_all_parts(self, parted_file): # Arrange parted_file._write(urandom(kb(4))) parted_file.parts[0].close = Mock() parted_file.parts[1].close = Mock() # Act parted_file.close() # Assert parted_file.parts[0].close.assert_called_with() parted_file.parts[1].close.assert_called_with()
def test_close_calls_super_for_flush_and_closes_all_parts( self, parted_file): # Arrange parted_file._write(urandom(kb(4))) parted_file.parts[0].close = Mock() parted_file.parts[1].close = Mock() # Act parted_file.close() # Assert parted_file.parts[0].close.assert_called_with() parted_file.parts[1].close.assert_called_with()
def parted_file(self): fs = MemoryFS() mode = "wb+" path = "cuckoo.tar" parts = [ FilePart(fs.open("cuckoo.tar.part0", mode)), (fs.open("cuckoo.tar.part1", mode)) ] return PartedFile(path=path, mode=mode, fs=fs, max_part_size=kb(4), parts=parts)
def test_read_returns_data_from_current_part_in_chunks(self, parted_file): # Arrange parted_file._write(urandom(kb(5)), flushing=True) parted_file._seek(offset=kb(3), whence=0) # Act chunk1 = parted_file._read(kb(1)) chunk2 = parted_file._read(kb(1)) # Assert assert len(chunk1) == kb(1) assert len(chunk2) == kb(1)
def test_getinfo_returns_latest_times(self, fs_with_test_file): # Arrange created_max = date.today() + timedelta(days=10) accessed_max = date.today() + timedelta(days=10) modfied_max = date.today() + timedelta(days=10) def getinfo_patch(path): if path == "backup.tar.part0": return {"created_time": created_max, "modified_time": date.today(), "accessed_time": accessed_max} else: return {"created_time": date.today(), "modified_time": modfied_max, "accessed_time": date.today()} fs_with_test_file.wrapped_fs.getinfo = getinfo_patch fs_with_test_file.getsize = lambda p: kb(7) # Act info = fs_with_test_file.getinfo("backup.tar") # Assert assert info["created_time"] == created_max assert info["accessed_time"] == accessed_max assert info["modified_time"] == modfied_max
def test_open_switches_writefs_to_location_of_existing_file(self, fs): # Arrange fs.fs_lookup["fs1"].setcontents("backup.tar.part0", data=urandom(kb(4))) fs.fs_lookup["fs2"].setcontents("backup.tar.part1", data=urandom(kb(3))) # Act with fs.open("backup.tar.part0", mode="r+b") as fh: fh.write(urandom(kb(5))) with fs.open("backup.tar.part1", mode="r+b") as fh: fh.write(urandom(kb(2))) # Assert assert fs.getsize("backup.tar.part0") == kb(5) assert fs.getsize("backup.tar.part1") == kb(3)
def test_write_returns_data_that_is_bigger_than_max_part_size(self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(5))) # Assert assert len(unwritten_data) == kb(1)
def test_write_returns_none_if_all_data_could_be_written(self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(4))) # Assert assert unwritten_data is None
def test_write_sets_file_pointer_to_next_free_position(self, parted_file): # Act parted_file._write(urandom(kb(4))) # Assert assert parted_file._file_pointer == kb(4)
def test_write_with_flushing_mode_calls_itself_until_all_data_is_written(self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(5)), flushing=True) # Assert assert unwritten_data is None
def test_exists_returns_true_when_first_part_could_be_found(self, fs): # Arrange fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) # Act & Assert assert fs.exists("backup.tar")
def test_write_big_amount_expands_to_parts(self, parted_file): # Act parted_file._write(urandom(kb(12)), flushing=True) # Assert assert len(parted_file.parts) == 3
def fs_with_folder_structure(self, fs): fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) fs.wrapped_fs.setcontents("README.txt.part0", data=urandom(kb(1))) fs.wrapped_fs.makedir("older_backups") return fs
def fs_with_test_file(self, fs): fs.wrapped_fs.setcontents("backup.tar.part0", data=urandom(kb(4))) fs.wrapped_fs.setcontents("backup.tar.part1", data=urandom(kb(4))) return fs
def test_write_with_flushing_mode_calls_itself_until_all_data_is_written( self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(5)), flushing=True) # Assert assert unwritten_data is None
def test_write_returns_data_that_is_bigger_than_max_part_size( self, parted_file): # Act unwritten_data = parted_file._write(urandom(kb(5))) # Assert assert len(unwritten_data) == kb(1)
def test_seek_relative_to_end_should_set_filepointer_to_last_part( self, parted_file): # Act parted_file._seek(offset=-kb(4), whence=2) # Assert assert parted_file._file_pointer == kb(4)
def test_seek_relative_to_end_should_set_filepointer_to_last_part(self, parted_file): # Act parted_file._seek(offset=-kb(4), whence=2) # Assert assert parted_file._file_pointer == kb(4)