def testPrivateListManifestedPaths(self): changeset = self.vcs.new_staging_changeset() files.File('/foo', changeset=changeset).write('') files.File('/a/foo', changeset=changeset).write('') files.File('/a/bar', changeset=changeset).write('') files.File('/a/a/foo', changeset=changeset).write('') files.File('/b/foo', changeset=changeset).write('') changeset.finalize_associated_files() changeset = self.vcs.commit(changeset) # Changeset 2. paths_to_changeset_num = versions._list_manifested_paths( changeset=changeset, dir_path='/') self.assertSameElements(['/foo'], paths_to_changeset_num.keys()) paths_to_changeset_num = versions._list_manifested_paths( changeset=changeset, dir_path='/', recursive=True) self.assertSameElements( ['/foo', '/a/foo', '/a/bar', '/a/a/foo', '/b/foo'], paths_to_changeset_num.keys()) paths_to_changeset_num = versions._list_manifested_paths( changeset=changeset, dir_path='/a') self.assertSameElements(['/a/foo', '/a/bar'], paths_to_changeset_num.keys()) paths_to_changeset_num = versions._list_manifested_paths( changeset=changeset, dir_path='/b', recursive=True) self.assertSameElements(['/b/foo'], paths_to_changeset_num.keys())
def testCommitManyFiles(self): # Regression test for "operating on too many entity groups in a # single transaction" error. This usually happens through the HTTP API # when lazy file objects are created from the manifest and then end up # being evaluated one-by-one inside the commit code path. test_user = users.TitanUser('*****@*****.**') changeset = self.vcs.new_staging_changeset(created_by=test_user) for i in range(100): files.File('/foo%s' % i, changeset=changeset).write(str(i)) changeset.finalize_associated_files() # Recreate the changeset from scratch, and reassociate lazy file objects. changeset = versions.Changeset(changeset.num) for i in range(100): changeset.associate_file(files.File('/foo%s' % i, changeset=changeset)) changeset.finalize_associated_files() self.vcs.commit(changeset) # Test again, but test the list_files code path with force=True. changeset = self.vcs.new_staging_changeset(created_by=test_user) for i in range(100): files.File('/foo%s' % i, changeset=changeset).write(str(i)) changeset.finalize_associated_files() # Recreate the changeset from scratch, and reassociate lazy file objects. changeset = versions.Changeset(changeset.num) self.vcs.commit(changeset, force=True)
def write_namespace_testdata(self): files.File('/foo').write('foo') files.File('/b/bar').write('bar') # 'aaa' namespace, overlapping filenames with the default namespace. files.File('/foo', namespace='aaa').write('aaa-foo') files.File('/b/bar', namespace='aaa').write('aaa-bar') # 'bbb' namespace, no overlapping filenames. files.File('/b/qux', namespace='bbb').write('bbb-qux')
def testDelete(self): # Synchronous delete. titan_file = files.File('/foo/bar.html').write('') self.assertTrue(titan_file.exists) titan_file.delete() # Recreate object, just in case memoization is hiding an error: self.assertFalse(files.File('/foo/bar.html').exists) # Error handling. self.assertRaises(files.BadFileError, files.File('/fake.html').delete)
def testDelete(self): files.File('/foo').write('') files.File('/bar').write(LARGE_FILE_CONTENT) files.File('/qux').write('') blob_key = files.File('/bar').blob.key() files.Files(['/foo', '/bar']).delete() self.assertEqual(files.Files(['/qux']), files.Files(['/foo', '/bar', '/qux']).load()) # Verify that the blob is also deleted. self.assertIsNone(blobstore.get(blob_key))
def testLoad(self): files.File('/foo').write('') files.File('/bar').write('') titan_files = files.Files(paths=['/foo', '/bar', '/fake']) titan_files.load() self.assertIn('/foo', titan_files) self.assertIn('/bar', titan_files) self.assertTrue(titan_files['/foo'].is_loaded) self.assertTrue(titan_files['/bar'].is_loaded) self.assertNotIn('/fake', titan_files)
def testEmptyRootFile(self): # Regression test: make sure that content='' is correctly handled when # copying root file attributes. changeset = self.vcs.new_staging_changeset() files.File('/foo', changeset=changeset).write(u'∆∆∆') changeset.finalize_associated_files() self.vcs.commit(changeset) changeset = self.vcs.new_staging_changeset() files.File('/foo', changeset=changeset).write('', encoding='utf-8') changeset.finalize_associated_files() self.vcs.commit(changeset) self.assertEqual('', files.File('/foo').content)
def testCommit(self): test_user = users.TitanUser('*****@*****.**') changeset = self.vcs.new_staging_changeset(created_by=test_user) # Shouldn't be able to submit changesets with no changed files: self.assertRaises(versions.CommitError, self.vcs.commit, changeset, force=True) # Verify that the auto_current_user_add property is overwritten. self.assertEqual('*****@*****.**', str(changeset.created_by)) # Before a changeset is committed, its associated files must be finalized # to indicate that the object's files can be trusted for strong consistency. files.File('/foo', changeset=changeset).write('') self.assertRaises(versions.ChangesetError, self.vcs.commit, changeset) changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) # When a changeset is committed, a new changeset is created (so that # changes are always sequential) with a created time. The old changeset # is marked as deleted by submit. staged_changeset = versions.Changeset(1) self.assertEqual(CHANGESET_DELETED_BY_SUBMIT, staged_changeset.status) self.assertEqual(CHANGESET_SUBMITTED, final_changeset.status) # Also, the changesets are linked to each other: self.assertEqual(1, final_changeset.linked_changeset_num) self.assertEqual(2, staged_changeset.linked_changeset_num) self.assertEqual(versions.Changeset(1), final_changeset.linked_changeset) self.assertEqual(versions.Changeset(2), staged_changeset.linked_changeset) # Verify base_path properties also: self.assertEqual('/_titan/ver/2', final_changeset.base_path) self.assertEqual('/_titan/ver/1', final_changeset.linked_changeset_base_path) # Verify that the auto_current_user_add property is overwritten in the # final_changeset because it was overwritten in the staged_changeset. self.assertEqual('*****@*****.**', str(final_changeset.created_by)) # After commit(), files in a changeset cannot be modified. titan_file = files.File('/foo', changeset=changeset) self.assertRaises(versions.ChangesetError, titan_file.write, '') self.assertRaises(versions.ChangesetError, titan_file.delete)
def testSerialize(self): # serialize(). first_file = files.File('/foo/bar').write('foobar') second_file = files.File('/foo/bat/baz').write('foobatbaz') first_file_data = { u'name': u'bar', u'path': u'/foo/bar', u'paths': [u'/', u'/foo'], u'real_path': u'/foo/bar', u'mime_type': u'application/octet-stream', u'created': first_file.created, u'modified': first_file.modified, u'content': u'foobar', u'blob': None, u'created_by': u'*****@*****.**', u'modified_by': u'*****@*****.**', u'meta': {}, u'md5_hash': hashlib.md5('foobar').hexdigest(), u'size': len('foobar'), } second_file_data = { u'name': u'baz', u'path': u'/foo/bat/baz', u'paths': [u'/', u'/foo', u'/foo/bat'], u'real_path': u'/foo/bat/baz', u'mime_type': u'application/octet-stream', u'created': second_file.created, u'modified': second_file.modified, u'content': u'foobatbaz', u'blob': None, u'created_by': u'*****@*****.**', u'modified_by': u'*****@*****.**', u'meta': {}, u'md5_hash': hashlib.md5('foobatbaz').hexdigest(), u'size': len('foobatbaz'), } self.maxDiff = None expected_data = { first_file_data['path']: first_file_data, second_file_data['path']: second_file_data, } self.assertEqual( expected_data, files.Files.list(dir_path='/foo/', recursive=True).serialize(full=True))
def testMoveTo(self): files.File('/foo.html').write('Test', meta={'color': 'blue'}) files.File('/foo.html').move_to(files.File('/bar/qux.html')) self.assertFalse(files.File('/foo.html').exists) self.assertTrue(files.File('/bar/qux.html').exists) # Blob instead of content. files.File('/foo.html').write(blob=self.blob_key, meta={'flag': False}) files.File('/foo.html').move_to(files.File('/bar/qux.html')) titan_file = files.File('/bar/qux.html') blob_content = self.blob_reader.read() self.assertEqual(blob_content, titan_file.content) self.assertEqual('text/html', titan_file.mime_type) self.assertEqual(False, titan_file.meta.flag) self.assertRaises(AttributeError, lambda: titan_file.meta.color) self.assertRaises(AssertionError, files.File('/foo.html').move_to, '/test')
def testFilesCount(self): # Create files for testing. root_level = files.Files(['/index.html', '/qux']) first_level = files.Files(['/foo/bar']) second_level = files.Files([ '/foo/bar/baz', '/foo/bar/baz.html', '/foo/bar/baz.txt', ]) root_and_first_levels = files.Files.merge(root_level, first_level) all_files = files.Files(root_level.keys() + first_level.keys() + second_level.keys()) for titan_file in all_files.itervalues(): titan_file.write('') # Empty. self.assertEqual(0, files.Files.count('/fake/path')) # From root. self.assertEqual(len(root_level), files.Files.count('/')) self.assertEqual(len(all_files), files.Files.count('/', recursive=True)) # Limit recursion depth. self.assertEqual(len(root_and_first_levels), files.Files.count('/', recursive=True, depth=1)) # Custom filters: files.File('/a/foo').write('', meta={'color': 'red', 'item_id': 1}) files.File('/a/bar/qux').write('', meta={'color': 'red', 'item_id': 2}) files.File('/a/baz').write('', meta={'color': 'blue', 'item_id': 3}) # Single filter: filters = [files.FileProperty('color') == 'blue'] self.assertEqual( 1, files.Files.count('/', recursive=True, filters=filters)) # Multiple filters: filters = [ files.FileProperty('color') == 'red', files.FileProperty('item_id') == 2, ] self.assertEqual( 1, files.Files.count('/', recursive=True, filters=filters))
def testFileMixins(self): # Support behavior: subclass File and make write() also touch a # centralized file, while avoiding infinite recursion. class TouchRootMixin(files.File): def write(self, *args, **kwargs): # Note: this mixin is a bad idea in practice. Don't directly touch a # centralized file on every write, due to write rate limits. if self.path != '/root-touched-file': files.File('/root-touched-file').write('') super(TouchRootMixin, self).write(*args, **kwargs) class CustomFile(TouchRootMixin, files.File): pass custom_file = CustomFile('/foo/bar') custom_file.write('foo') self.assertTrue(custom_file.exists) self.assertTrue(files.File('/foo/bar').exists) self.assertTrue(files.File('/root-touched-file').exists)
def testMoveTo(self): # Populate the in-context cache by reading the file before creation. self.assertFalse(files.File('/x/b/foo').exists) files.File('/foo').write('') files.File('/a/foo').write('') files.File('/a/b/foo').write('') files.File('/c/foo').write('') result_files = files.Files() titan_files = files.Files.list('/a/', recursive=True) titan_files.move_to('/x', strip_prefix='/a/', result_files=result_files) expected_paths = [ '/x/foo', '/x/b/foo', ] self.assertSameElements(expected_paths, result_files.keys()) # Verify files has been deleted from the old directory. self.assertFalse(files.File('/a/foo').exists) self.assertFalse(files.File('/a/b/foo').exists) result_files = files.Files() failed_files = files.Files() files_paths = ['/foo', '/fake'] self.assertRaises(files.CopyFilesError, files.Files(files_paths).move_to, dir_path='/x', result_files=result_files, failed_files=failed_files) expected_paths = [ '/x/foo', ] failed_paths = [ '/x/fake', ] self.assertSameElements(expected_paths, result_files.keys()) self.assertSameElements(failed_paths, failed_files.keys()) # Verify files has been deleted from the old directory. self.assertFalse(files.File('/foo').exists) # Verify that the NDB in-context cache was cleared correctly. self.assertTrue(files.File('/x/b/foo').exists)
def testRegisterFileFactory(self): class FooFile(files.File): pass class BarFile(files.File): pass def TitanFileFactory(path, **unused_kwargs): if path.startswith('/foo/files/'): return FooFile elif path.startswith('/bar/files/'): return BarFile return files.File files.register_file_factory(TitanFileFactory) foo_file = files.File('/foo/files/a') bar_file = files.File('/bar/files/b') normal_file = files.File('/c') self.assertTrue(isinstance(foo_file, FooFile)) self.assertTrue(isinstance(bar_file, BarFile)) self.assertTrue(isinstance(normal_file, files.File))
def testRegisterFileMixins(self): class FooFileMixin(files.File): pass class BarFileMixin(files.File): @classmethod def should_apply_mixin(cls, **kwargs): if kwargs['path'].startswith('/bar/files/'): return True return False files.register_file_mixins([FooFileMixin, BarFileMixin]) # Pickle and unpickle each file to verify __reduce__ behavior. foo_file = pickle.loads(pickle.dumps(files.File('/foo/files/a'))) bar_file = pickle.loads(pickle.dumps(files.File('/bar/files/b'))) self.assertTrue(isinstance(foo_file, FooFileMixin)) self.assertFalse(isinstance(foo_file, BarFileMixin)) self.assertTrue(isinstance(bar_file, BarFileMixin)) self.assertTrue(isinstance(bar_file, FooFileMixin))
def testDefer(self): self.login('*****@*****.**') path = '/foo.txt' content = 'hello' deferred.defer(WriteFile, path, content) self.RunDeferredTasks() # Verify the task ran. titan_file = files.File(path) self.assertTrue(titan_file.exists) self.assertEqual(content, titan_file.content) # Verify that [email protected] is the created_by user. self.assertEqual('*****@*****.**', titan_file.created_by.email)
def testCopyTo(self): # Populate the in-context cache by reading the file before creation. self.assertFalse(files.File('/x/b/foo').exists) files.File('/foo').write('') files.File('/a/foo').write('') files.File('/a/b/foo').write('') files.File('/c/foo').write('') result_files = files.Files() failed_files = files.Files() files.Files.list('/').copy_to(dir_path='/x', result_files=result_files, failed_files=failed_files) expected_paths = [ '/x/foo', ] self.assertSameElements(expected_paths, result_files.keys()) self.assertEqual([], failed_files.keys()) titan_files = files.Files.list('/a/', recursive=True) titan_files.copy_to('/x', strip_prefix='/a/', result_files=result_files) expected_paths = [ '/x/foo', '/x/b/foo', ] self.assertSameElements(expected_paths, result_files.keys()) # With trailing slashes should be the same. result_files.clear() titan_files.copy_to('/x/', strip_prefix='/a/', result_files=result_files) self.assertSameElements(expected_paths, result_files.keys()) result_files = files.Files() failed_files = files.Files() files_paths = ['/foo', '/fake'] self.assertRaises(files.CopyFilesError, files.Files(files_paths).copy_to, dir_path='/x', result_files=result_files, failed_files=failed_files) expected_paths = [ '/x/foo', ] failed_paths = [ '/x/fake', ] self.assertSameElements(expected_paths, result_files.keys()) self.assertSameElements(failed_paths, failed_files.keys()) # Verify that the NDB in-context cache was cleared correctly. self.assertTrue(files.File('/x/b/foo').exists)
def testListDirectories(self): changeset = self.vcs.new_staging_changeset() files.File('/foo', changeset=changeset).write('') self.assertEqual([], changeset.list_directories('/').keys()) files.File('/a/foo', changeset=changeset).write('') files.File('/a/a/foo', changeset=changeset).write('') self.assertEqual(['/a'], changeset.list_directories('/').keys()) self.assertEqual([], changeset.list_directories('/fake/').keys()) changeset = self.vcs.commit(changeset, force=True) self.assertEqual(['/a'], changeset.list_directories('/').keys()) self.assertEqual([], changeset.list_directories('/fake/').keys()) changeset = self.vcs.new_staging_changeset() # Before writes: self.assertEqual([], changeset.list_directories('/').keys()) self.assertEqual( ['/a'], changeset.list_directories('/', include_manifested=True).keys()) self.assertEqual( ['/a/a'], changeset.list_directories('/a', include_manifested=True).keys()) files.File('/a/foo', changeset=changeset).write('') files.File('/a/a/a/foo', changeset=changeset).write('') files.File('/b/foo', changeset=changeset).write('') # Before commit: self.assertEqual(['/a', '/b'], changeset.list_directories('/').keys()) self.assertEqual(['/a/a'], changeset.list_directories('/a').keys()) self.assertEqual(['/a/a/a'], changeset.list_directories('/a/a').keys()) changeset = self.vcs.commit(changeset, force=True) # After commit: self.assertEqual(['/a', '/b'], changeset.list_directories('/').keys()) self.assertEqual(['/a/a'], changeset.list_directories('/a').keys()) self.assertEqual(['/a/a/a'], changeset.list_directories('/a/a').keys()) # No namespace mixing: changeset = self.vcs.new_staging_changeset(namespace='aaa') self.assertEqual([], changeset.list_directories('/').keys()) files.File('/foo', changeset=changeset, namespace='aaa').write('') changeset = self.vcs.commit(changeset, force=True) self.assertEqual([], changeset.list_directories('/').keys())
def testManifestedViewsAndRebase(self): # None of the following behaviors, including new_staging_changeset's setting # of base_changeset, should rely on eventually-consistent queries. # Verify this behavior by simulating a never-eventually-consistent HRD. policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0) self.datastore_stub.SetConsistencyPolicy(policy) self.make_namespaced_testdata() # Staging changeset 5. changeset = self.vcs.new_staging_changeset() self.assertEqual(5, changeset.num) self.assertEqual(4, changeset.base_changeset.num) files.File('/bar', changeset=changeset).delete() files.File('/qux', changeset=changeset).write('qux') # Changeset 6 and 7. changeset = self.vcs.new_staging_changeset() files.File('/foo', changeset=changeset).write('NEWfoo') changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) self.assertEqual(7, final_changeset.num) # Changeset 8 and 9. changeset = self.vcs.new_staging_changeset() for i in range(3200): files.File('/foo{}'.format(i), changeset=changeset).write('') changeset.finalize_associated_files() self.vcs.commit(changeset) # MANIFESTED FILESYSTEM VIEWS. # - Files read through a 'new' or 'submitted' changeset: first pull # from the changeset, then fallback to the base_changeset's manifest. # The manifested filesystem should by overlaid by the changeset's files. # - Files read through a 'deleted' or 'deleted-by-submit' changeset: # always pull from the base_changeset's manifest, ignore the changeset # since its changes will manifest when viewed at it's corresponding # submitted changeset. # - File modifications must go through 'new' changesets. changeset = versions.Changeset(1) # 'deleted-by-submit' self.assertFalse(files.File('/foo', changeset=changeset).exists) self.assertFalse(files.File('/bar', changeset=changeset).exists) self.assertIsNone(changeset._num_manifest_shards) changeset = versions.Changeset(2) # 'submitted' self.assertEqual('foo', files.File('/foo', changeset=changeset).content) self.assertFalse(files.File('/bar', changeset=changeset).exists) self.assertEqual(1, changeset._num_manifest_shards) changeset = versions.Changeset(3) # 'deleted-by-submit' self.assertEqual('foo', files.File('/foo', changeset=changeset).content) self.assertFalse(files.File('/bar', changeset=changeset).exists) self.assertIsNone(changeset._num_manifest_shards) changeset = versions.Changeset(4) # 'submitted' self.assertEqual('newfoo', files.File('/foo', changeset=changeset).content) self.assertEqual('bar', files.File('/bar', changeset=changeset).content) self.assertFalse(files.File('/qux', changeset=changeset).exists) self.assertEqual(1, changeset._num_manifest_shards) changeset = versions.Changeset(5) # 'staging' self.assertEqual('newfoo', files.File('/foo', changeset=changeset).content) self.assertEqual('qux', files.File('/qux', changeset=changeset).content) self.assertFalse(files.File('/bar', changeset=changeset).exists) self.assertIsNone(changeset._num_manifest_shards) changeset = versions.Changeset(6) # 'deleted-by-submit' self.assertEqual('newfoo', files.File('/foo', changeset=changeset).content) self.assertEqual('bar', files.File('/bar', changeset=changeset).content) self.assertIsNone(changeset._num_manifest_shards) changeset = versions.Changeset(7) # 'submitted' self.assertEqual('NEWfoo', files.File('/foo', changeset=changeset).content) self.assertEqual('bar', files.File('/bar', changeset=changeset).content) self.assertEqual(1, changeset._num_manifest_shards) changeset = versions.Changeset(9) # 'submitted' self.assertEqual(4, changeset._num_manifest_shards) # Verify that hash is evenly distributing the paths over the shards by # verifying the shards are not nearly full to 1000 paths. manifest_shard = changeset._get_manifest_shard_ent('/foo0') self.assertLess(900, len(manifest_shard.paths_to_changeset_num)) # Rebase the staging changeset and verify the new manifest files. changeset = versions.Changeset(5) # 'staging' changeset.rebase(versions.Changeset(7)) self.assertEqual('NEWfoo', files.File('/foo', changeset=changeset).content) self.assertFalse(files.File('/bar', changeset=changeset).exists) self.assertEqual('qux', files.File('/qux', changeset=changeset).content) # Cannot rebase to anything but submitted changesets. self.assertRaises( versions.ChangesetRebaseError, changeset.rebase, versions.Changeset(5)) self.assertRaises( versions.ChangesetRebaseError, changeset.rebase, versions.Changeset(6)) # Namespaces must match. self.assertRaises( versions.NamespaceMismatchError, changeset.rebase, versions.Changeset(7, namespace='aaa'))
def testNamespaceState(self): self.make_namespaced_testdata() # Verify the state of the filesystem in the default namespace. changeset = versions.Changeset(2) self.assertEqual('foo', files.File('/foo', changeset=changeset).content) # Files from other namespaces should not exist. self.assertFalse(files.File('/bar', changeset=changeset).exists) self.assertFalse(files.File('/qux', changeset=changeset).exists) changeset = versions.Changeset(4) # Verify the current state of the filesystem in the default namespace. # Force use of dynamic _FilePointer by not passing the changeset arg. self.assertEqual('newfoo', files.File('/foo').content) self.assertEqual('bar', files.File('/bar').content) # Verify the state of the filesystem in the 'aaa' namespace at changeset 2. changeset = versions.Changeset(2, namespace='aaa') titan_file = files.File('/foo', changeset=changeset, namespace='aaa') self.assertEqual('foo-aaa', titan_file.content) titan_file = files.File('/bar', changeset=changeset, namespace='aaa') self.assertEqual('bar-aaa', titan_file.content) # Files from other namespaces should not exist. titan_file = files.File('/qux', changeset=changeset, namespace='aaa') self.assertFalse(titan_file.exists) # Verify the state of the filesystem in the 'aaa' namespace at changeset 4. changeset = versions.Changeset(4, namespace='aaa') titan_file = files.File('/foo', changeset=changeset, namespace='aaa') self.assertEqual('newfoo-aaa', titan_file.content) titan_file = files.File('/bar', changeset=changeset, namespace='aaa') self.assertFalse(titan_file.exists) # Verify the current state of the filesystem in the 'aaa' namespace. # Force use of dynamic _FilePointer by not passing the changeset arg. self.assertEqual('newfoo-aaa', files.File('/foo', namespace='aaa').content) self.assertFalse(files.File('/bar', namespace='aaa').exists) # Verify the state of the filesystem in the 'bbb' namespace at changeset 2. changeset = versions.Changeset(2, namespace='bbb') titan_file = files.File('/foo', changeset=changeset, namespace='bbb') self.assertEqual('foo-bbb', titan_file.content) titan_file = files.File('/qux', changeset=changeset, namespace='bbb') self.assertEqual('qux-bbb', titan_file.content) # Files from other namespaces should not exist. titan_file = files.File('/bar', changeset=changeset, namespace='bbb') self.assertFalse(titan_file.exists)
def make_testdata(self): for _ in range(1, 10): self.vcs.new_staging_changeset() # Changeset 11 (was changeset 10 before commit): changeset = self.vcs.new_staging_changeset() self.assertIsNone(changeset.base_changeset) files.File('/foo', changeset=changeset).write('foo') files.File('/bar', changeset=changeset).write('bar') files.File('/qux', changeset=changeset).write('qux') changeset.finalize_associated_files() self.vcs.commit(changeset) # For testing, move the submitted datetime to 31 days ago. changeset_ent = versions.Changeset(11).changeset_ent created = datetime.datetime.now() - datetime.timedelta(days=31) changeset_ent.created = created changeset_ent.put() # Changset 13: changeset = self.vcs.new_staging_changeset() self.assertEqual(11, changeset.base_changeset.num) files.File('/foo', changeset=changeset).write('foo2') # edit files.File('/bar', changeset=changeset).delete() # delete files.File('/baz', changeset=changeset).write('baz') # create files.File('/qux', changeset=changeset).write('qux2') # edit changeset.finalize_associated_files() self.vcs.commit(changeset) self.assertEqual(13, self.vcs.get_last_submitted_changeset().num) self.assertIsNone(self.vcs.get_last_submitted_changeset().namespace) # Changset 15: changeset = self.vcs.new_staging_changeset() self.assertEqual(13, changeset.base_changeset.num) files.File('/foo', changeset=changeset).delete() # delete files.File('/bar', changeset=changeset).delete() # delete files.File('/baz', changeset=changeset).write('baz2') # edit changeset.finalize_associated_files() self.vcs.commit(changeset) # Changset 17: changeset = self.vcs.new_staging_changeset() modified_by = users.TitanUser('*****@*****.**') files.File('/foo', changeset=changeset).write( 'foo3', modified_by=modified_by) # re-create changeset.finalize_associated_files() self.vcs.commit(changeset)
def testFile(self): meta = {'color': 'blue', 'flag': False} titan_file = files.File('/foo/bar.html') titan_file.write('Test', meta=meta) # Init with path only, verify lazy-loading properties. titan_file = files.File('/foo/bar.html') self.assertFalse(titan_file.is_loaded) self.assertIsNone(titan_file._file_ent) _ = titan_file.mime_type self.assertNotEqual(None, titan_file._file_ent) self.assertTrue(titan_file.is_loaded) titan_file.unload() self.assertFalse(titan_file.is_loaded) self.assertIsNone(titan_file._file_ent) # Init with a _TitanFile entity. file_ent = files._TitanFile.get_by_id('/foo/bar.html') titan_file = files.File('/foo/bar.html', _file_ent=file_ent) self.assertEqual('/foo/bar.html', titan_file.path) self.assertEqual('bar.html', titan_file.name) self.assertEqual('bar', titan_file.name_clean) self.assertEqual('.html', titan_file.extension) self.assertTrue(titan_file.is_loaded) self.assertIsNotNone(titan_file._file_ent) # write(). self.assertEqual(titan_file.content, 'Test') titan_file.write('New content') self.assertEqual(titan_file.content, 'New content') titan_file.write('') self.assertEqual(titan_file.content, '') # Check meta data. self.assertEqual('blue', titan_file.meta.color) self.assertEqual(False, titan_file.meta.flag) # delete(). self.assertTrue(titan_file.exists) titan_file.delete() self.assertFalse(titan_file.exists) titan_file.write(content='Test', meta=meta) titan_file.delete() self.assertFalse(titan_file.exists) # __hash__(). self.assertEqual(hash(files.File('/foo')), hash(files.File('/foo'))) self.assertNotEqual(hash(files.File('/foo')), hash(files.File('/bar'))) self.assertNotEqual(hash(files.File('/foo')), hash(files.File('/foo', namespace='aaa'))) # serialize(). titan_file = files.File('/foo/bar/baz').write('', meta=meta) expected_data = { 'path': '/foo/bar/baz', 'real_path': '/foo/bar/baz', 'name': 'baz', 'paths': ['/', '/foo', '/foo/bar'], 'mime_type': u'application/octet-stream', 'created': titan_file.created, 'modified': titan_file.modified, 'content': '', 'blob': None, 'created_by': '*****@*****.**', 'modified_by': '*****@*****.**', 'meta': { 'color': 'blue', 'flag': False, }, 'size': 0, 'md5_hash': hashlib.md5('').hexdigest(), } self.assertEqual(expected_data, files.File('/foo/bar/baz').serialize(full=True)) # Properties: name, name_clean, extension, paths, mime_type, created, # modified, blob, created_by, modified_by, and size. titan_file = files.File('/foo/bar/baz.html') self.assertEqual('baz.html', titan_file.name) self.assertEqual('baz', titan_file.name_clean) self.assertEqual('.html', titan_file.extension) # Check bool handling: self.assertFalse(titan_file) titan_file.write('') self.assertTrue(titan_file) self.assertEqual(['/', '/foo', '/foo/bar'], titan_file.paths) self.assertEqual('text/html', titan_file.mime_type) self.assertTrue(isinstance(titan_file.created, datetime.datetime)) self.assertTrue(isinstance(titan_file.modified, datetime.datetime)) self.assertIsNone(titan_file.blob) self.assertEqual(users.TitanUser('*****@*****.**'), titan_file.created_by) self.assertEqual(users.TitanUser('*****@*****.**'), titan_file.modified_by) # Size: titan_file.write('foo') self.assertEqual(3, titan_file.size) titan_file.write(u'f♥♥') # "size" should represent the number of bytes, not the number of characters. # 'f♥♥' == 'f\xe2\x99\xa5\xe2\x99\xa5' == 1 + 3 + 3 == 7 self.assertEqual(7, titan_file.size) # "size" should use blob size if present: titan_file.write(LARGE_FILE_CONTENT) self.assertEqual(1 << 21, titan_file.size) # read() and content property. self.assertEqual(titan_file.content, titan_file.read()) # close(). self.assertIsNone(titan_file.close()) # Error handling: init with non-existent path. titan_file = files.File('/foo/fake.html') self.assertRaises(files.BadFileError, lambda: titan_file.paths) self.assertRaises(files.BadFileError, lambda: titan_file.content) self.assertRaises(files.BadFileError, titan_file.delete) self.assertRaises(files.BadFileError, titan_file.serialize) # Bad path arguments: self.assertRaises(ValueError, files.File, None) self.assertRaises(ValueError, files.File, '') self.assertRaises(ValueError, files.File, 'bar.html') self.assertRaises(ValueError, files.File, '/a/b/') self.assertRaises(ValueError, files.File, '/a//b') self.assertRaises(ValueError, files.File, '..') self.assertRaises(ValueError, files.File, '/a/../b') self.assertRaises(ValueError, files.File, '/')
def testListFiles(self): changeset = self.vcs.new_staging_changeset() # Test list files within first staging changeset. self.assertEqual( [], changeset.list_files('/').keys()) self.assertEqual( [], changeset.list_files('/', include_manifested=True).keys()) files.File('/foo', changeset=changeset).write('foo') files.File('/a/foo', changeset=changeset).write('foo') files.File('/a/bar', changeset=changeset).write('bar') changeset.finalize_associated_files() self.vcs.commit(changeset) # Changeset 2. changeset = self.vcs.new_staging_changeset() files.File('/foo', changeset=changeset).delete() changeset.finalize_associated_files() self.vcs.commit(changeset) # Changeset 4. titan_files = versions.Changeset(2).list_files(dir_path='/') self.assertEqual(['/foo'], titan_files.keys()) titan_files = versions.Changeset(2).list_files(dir_path='/', recursive=True) self.assertEqual(['/a/bar', '/a/foo', '/foo'], titan_files.keys()) titan_files = versions.Changeset(2).list_files(dir_path='/a/') self.assertEqual(['/a/bar', '/a/foo'], titan_files.keys()) # Test limit and offset. titan_files = versions.Changeset(2).list_files( dir_path='/', recursive=True, limit=1, offset=1) self.assertEqual(['/a/foo'], titan_files.keys()) # Test included_deleted. titan_files = versions.Changeset(4).list_files(dir_path='/') self.assertEqual(['/foo'], titan_files.keys()) self.assertEqual( versions.FileStatus.deleted, titan_files.values()[0].meta.status) titan_files = versions.Changeset(4).list_files( dir_path='/', include_deleted=False) self.assertEqual([], titan_files.keys()) # Test include_manifested on a finalized changeset. titan_files = versions.Changeset(4).list_files( '/', include_deleted=False, include_manifested=True) self.assertEqual([], titan_files.keys()) titan_files = versions.Changeset(4).list_files('/', include_manifested=True) self.assertEqual(['/foo'], titan_files.keys()) titan_files = versions.Changeset(4).list_files( '/', recursive=True, include_manifested=True) self.assertEqual(['/a/bar', '/a/foo', '/foo'], titan_files.keys()) titan_files = versions.Changeset(4).list_files( '/a/', recursive=True, include_manifested=True) self.assertEqual(['/a/bar', '/a/foo'], titan_files.keys()) titan_files = versions.Changeset(4).list_files( '/b', recursive=True, include_manifested=True) self.assertEqual([], titan_files.keys()) # Test include_manifested on a staging changeset. changeset = self.vcs.new_staging_changeset() # Changeset 5 (staging). files.File('/a/qux', changeset=changeset).write('qux') titan_files = versions.Changeset(5).list_files( '/', include_deleted=False, include_manifested=True) self.assertEqual([], titan_files.keys()) titan_files = versions.Changeset(5).list_files( '/', recursive=True, include_manifested=True) self.assertEqual(['/a/bar', '/a/foo', '/a/qux'], titan_files.keys()) titan_files = versions.Changeset(5).list_files( '/a/', recursive=True, include_manifested=True) self.assertEqual(['/a/bar', '/a/foo', '/a/qux'], titan_files.keys()) titan_files = versions.Changeset(5).list_files( '/b', recursive=True, include_manifested=True) self.assertEqual([], titan_files.keys()) # Verify reading a manifested and a non-manifested file. titan_files = versions.Changeset(5).list_files( '/', recursive=True, include_manifested=True) self.assertEqual('foo', titan_files['/a/foo'].content) self.assertEqual('qux', titan_files['/a/qux'].content) # Error handling. self.assertRaises( ValueError, versions.Changeset(2).list_files, dir_path='/', include_manifested=True, depth=1, filters=[], order=[]) self.assertRaises( ValueError, versions.Changeset(2).list_files, '/', include_manifested=True, limit=10, offset=1)
def testNamespaces(self): self.write_namespace_testdata() # Verify the state of the filesystem in the default namespace. self.assertEqual('foo', files.File('/foo').content) self.assertEqual('bar', files.File('/b/bar').content) self.assertFalse(files.File('/b/qux').exists) titan_files = files.Files.list('/', recursive=True) self.assertEqual({'/foo', '/b/bar'}, set(titan_files)) titan_files = files.Files(paths=['/foo', '/b/bar', '/b/qux']).load() self.assertEqual({'/foo', '/b/bar'}, set(titan_files)) # Verify the state of the filesystem in the 'aaa' namespace. self.assertEqual('aaa-foo', files.File('/foo', namespace='aaa').content) self.assertEqual('aaa-bar', files.File('/b/bar', namespace='aaa').content) self.assertFalse(files.File('/b/qux', namespace='aaa').exists) titan_files = files.Files.list('/', recursive=True, namespace='aaa') self.assertEqual({'/foo', '/b/bar'}, set(titan_files)) self.assertEqual('aaa-foo', titan_files['/foo'].content) self.assertEqual('aaa-bar', titan_files['/b/bar'].content) titan_files = files.Files(paths=['/foo', '/b/bar', '/b/qux'], namespace='aaa').load() self.assertEqual({'/foo', '/b/bar'}, set(titan_files)) self.assertEqual('aaa-foo', titan_files['/foo'].content) self.assertEqual('aaa-bar', titan_files['/b/bar'].content) # Verify the state of the filesystem in the 'bbb' namespace. self.assertEqual('bbb-qux', files.File('/b/qux', namespace='bbb').content) self.assertFalse(files.File('/foo', namespace='bbb').exists) self.assertFalse(files.File('/b/bar', namespace='bbb').exists) titan_files = files.Files.list('/', recursive=True, namespace='bbb') self.assertEqual({'/b/qux'}, set(titan_files)) self.assertEqual('bbb-qux', titan_files['/b/qux'].content) titan_files = files.Files(paths=['/foo', '/b/bar', '/b/qux'], namespace='bbb').load() self.assertEqual({'/b/qux'}, set(titan_files)) self.assertEqual('bbb-qux', titan_files['/b/qux'].content) # Namespace is not affected by file existence. self.assertIsNone(files.File('/foo').namespace) self.assertIsNone(files.File('/fake').namespace) self.assertEqual('aaa', files.File('/foo', namespace='aaa').namespace) self.assertEqual('zzz', files.File('/fake', namespace='zzz').namespace) # Cannot mix namespaces in files.Files. titan_files = files.Files() with self.assertRaises(files.NamespaceMismatchError): other_files = files.Files(paths=['/foo'], namespace='aaa') titan_files.update(other_files) # Files are not the same if their namespace is different. self.assertNotEqual(files.File('/foo'), files.File('/foo', namespace='aaa')) self.assertNotEqual(files.File('/foo', namespace='aaa'), files.File('/foo', namespace='zzz')) # Error handling (more extensive namespace validate tests in utils_test.py). self.assertRaises(ValueError, files.File, '/a', namespace='/') self.assertRaises(ValueError, files.File, '/a', namespace=u'∆')
def testCopyInNamespace(self): self.write_namespace_testdata() # Default namespace --> 'aaa' namespace. files.File('/foo').copy_to(files.File('/foo-copy', namespace='aaa')) self.assertEqual('foo', files.File('/foo-copy', namespace='aaa').content) self.assertFalse(files.File('/foo-copy').exists) # 'aaa' namespace --> default namespace. self.assertFalse(files.File('/foo-copy').exists) files.File('/foo', namespace='aaa').copy_to(files.File('/foo-copy')) self.assertEqual('aaa-foo', files.File('/foo-copy').content) # 'aaa' namespace --> 'bbb' namespace. self.assertFalse(files.File('/foo-copy', namespace='bbb').exists) files.File('/foo', namespace='aaa').copy_to( files.File('/foo-copy', namespace='bbb')) self.assertEqual('aaa-foo', files.File('/foo-copy', namespace='bbb').content)
def testMoveInNamespace(self): self.write_namespace_testdata() # Default namespace --> 'aaa' namespace. files.File('/foo').move_to(files.File('/foo-moved', namespace='aaa')) self.assertEqual('foo', files.File('/foo-moved', namespace='aaa').content) self.assertFalse(files.File('/foo-moved').exists) self.assertFalse(files.File('/foo').exists) # 'aaa' namespace --> default namespace. self.assertFalse(files.File('/foo-moved').exists) files.File('/foo', namespace='aaa').move_to(files.File('/foo-moved')) self.assertEqual('aaa-foo', files.File('/foo-moved').content) # 'aaa' namespace --> 'bbb' namespace. self.assertFalse(files.File('/b/bar-moved', namespace='bbb').exists) files.File('/b/bar', namespace='aaa').move_to( files.File('/b/bar-moved', namespace='bbb')) self.assertEqual('aaa-bar', files.File('/b/bar-moved', namespace='bbb').content)
def testOrderedFiles(self): # Create files for testing. root_level = files.OrderedFiles([ # These need to be alphabetically ordered here because they will be # usually returned that way from queries inside files.Files.list(), # except for when other filters are applied. '/bar', '/baz', '/foo', ]) for titan_file in root_level.itervalues(): titan_file.write('') # Verify that equality handles order checking. Do this first to make sure # that following assertEqual() calls are also checking for order. root_level_same_order = files.OrderedFiles([ # Intentionally not the same order, to test Sort() right below. '/baz', '/foo', '/bar', ]) root_level_same_order.sort() root_level_different_order = files.OrderedFiles([ '/foo', '/baz', '/bar', ]) self.assertEqual(root_level, root_level_same_order) self.assertNotEqual(root_level, root_level_different_order) self.assertNotEqual(files.OrderedFiles([]), root_level) # Test updating and removing items. new_root_level = files.OrderedFiles([ '/bar', '/baz', '/foo', ]) new_root_level.update(files.Files(['/qux'])) self.assertNotEqual(root_level, new_root_level) del new_root_level['/qux'] self.assertEqual(root_level, new_root_level) # Test files.OrderedFiles.list(). self.assertEqual(root_level, files.OrderedFiles.list('/')) self.assertNotEqual(root_level_different_order, files.OrderedFiles.list('/')) # Test files.OrderedFiles.list() with order= kwarg. self.Login('*****@*****.**') files.File('/a/middle').write('') self.Login('*****@*****.**') files.File('/a/last').write('') self.Login('*****@*****.**') files.File('/a/first').write('') order = [files.FileProperty('created_by')] results = files.OrderedFiles.list('/a', order=order) expected = files.OrderedFiles([ '/a/first', '/a/middle', '/a/last', ]) self.assertEqual(expected, results) # Test reverse order. order = [-files.FileProperty('created_by')] results = files.OrderedFiles.list('/a', order=order) expected = files.OrderedFiles([ '/a/last', '/a/middle', '/a/first', ]) self.assertEqual(expected, results) # Error handling. self.assertRaises(AttributeError, new_root_level.__setitem__, '/qux', files.File('/qux'))
def make_namespaced_testdata(self): meta = {'color': 'blue'} # Filesystem views at final changesets (the whole filesystem, not diffs): # # DEFAULT NAMESPACE. # Changeset 2: # /foo: 'foo' # Changeset 4: # /foo: 'newfoo' # /bar: 'bar' # # NAMESPACE 'aaa'. # Changeset 2: # /foo: 'foo-aaa' # /bar: 'bar-aaa' meta:{'color':'blue'} # Changeset 4: # /foo: 'newfoo-aaa' # # NAMESPACE 'bbb'. # Changeset 2: # /foo: 'foo-bbb' # /qux: 'qux-bbb' # Changeset 2, default namespace (was changeset 1 before commit). changeset = self.vcs.new_staging_changeset() self.assertEqual(1, changeset.num) self.assertIsNone(changeset.base_changeset) self.assertIsNone(changeset.namespace) files.File('/foo', changeset=changeset).write('foo') changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) self.assertEqual(2, final_changeset.num) self.assertIsNone(final_changeset.namespace) # Changeset 4, default namespace. changeset = self.vcs.new_staging_changeset() self.assertEqual(3, changeset.num) self.assertEqual(2, changeset.base_changeset.num) files.File('/foo', changeset=changeset).write('newfoo') files.File('/bar', changeset=changeset).write('bar') changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) self.assertEqual(4, final_changeset.num) self.assertIsNone(final_changeset.namespace) # Changeset 2, 'aaa' namespace. changeset = self.vcs.new_staging_changeset(namespace='aaa') self.assertEqual(1, changeset.num) self.assertIsNone(changeset.base_changeset) self.assertEqual('aaa', changeset.namespace) files.File('/foo', changeset=changeset, namespace='aaa').write('foo-aaa') titan_file = files.File('/bar', changeset=changeset, namespace='aaa').write( 'bar-aaa', meta=meta) self.assertEqual('blue', titan_file.meta.color) changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) self.assertEqual(2, final_changeset.num) self.assertEqual('aaa', final_changeset.namespace) # Changeset 2, 'bbb' namespace (some filenames overlap 'aaa' files). changeset = self.vcs.new_staging_changeset(namespace='bbb') self.assertEqual(1, changeset.num) files.File('/foo', changeset=changeset, namespace='bbb').write('foo-bbb') files.File('/qux', changeset=changeset, namespace='bbb').write('qux-bbb') changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) self.assertEqual(2, final_changeset.num) self.assertEqual('bbb', final_changeset.namespace) self.assertEqual( 'bbb', self.vcs.get_last_submitted_changeset(namespace='bbb').namespace) # Changeset 4, 'aaa' namespace. changeset = self.vcs.new_staging_changeset(namespace='aaa') self.assertEqual(3, changeset.num) self.assertEqual(2, changeset.base_changeset.num) files.File('/foo', changeset=changeset, namespace='aaa').write('newfoo-aaa') titan_file = files.File( '/bar', changeset=changeset, namespace='aaa').delete() # While we're here, verify that a marked-for-delete file (whose content and # meta have been nullified) is restored correctly if undeleted. titan_file = files.File( '/bar', changeset=changeset, namespace='aaa', _allow_deleted_files=True) self.assertRaises(AttributeError, lambda: titan_file.meta.color) # This should restore the file's existence, but the metadata should NOT # be present since it's a brand new file after deletion. titan_file.write('bar-restored-aaa') self.assertEqual('bar-restored-aaa', titan_file.content) self.assertRaises(AttributeError, lambda: titan_file.meta.color) # However, if the file is reverted THEN written, metadata should restore. changeset.revert_file(titan_file) titan_file.write(meta={'foo': 'foo'}) self.assertEqual('bar-aaa', titan_file.content) self.assertEqual('blue', titan_file.meta.color) # Delete it again, since that's the state we actually want. files.File('/bar', changeset=changeset, namespace='aaa').delete() changeset.finalize_associated_files() final_changeset = self.vcs.commit(changeset) self.assertEqual(4, final_changeset.num) self.assertEqual('aaa', final_changeset.namespace) # Cannot revert a file in a submitted changeset. changeset = versions.Changeset(4, namespace='aaa') titan_file = files.File('/foo', changeset=changeset, namespace='aaa') self.assertRaises( versions.ChangesetError, lambda: changeset.revert_file(titan_file)) # File namespaces must match their associated changeset namespace. self.assertRaises( versions.NamespaceMismatchError, lambda: files.File('/foo', changeset=changeset)) self.assertRaises( versions.NamespaceMismatchError, lambda: files.File('/foo', changeset=changeset, namespace='bbb'))
def testMixin(self): # NOTE: Look here first. If this test fails, other tests are likely broken. changeset = self.vcs.new_staging_changeset() meta = {'color': 'blue', 'flag': False} files.File('/foo', changeset=changeset).write('foo-versioned') files.File('/bar', changeset=changeset).write('bar-versioned', meta=meta) # exists(). self.assertFalse(files.File('/foo').exists) self.assertTrue(files.File('/foo', changeset=changeset).exists) self.assertFalse(files.File('/fake', changeset=changeset).exists) # Init with an uncommitted file path: self.assertFalse(files.File('/foo').exists) # Init with an uncommitted file within a changeset: titan_file = files.File('/foo', changeset=changeset) self.assertEqual('/foo', titan_file.path) self.assertEqual('/_titan/ver/1/foo', titan_file.versioned_path) expected_foo = files.File('/foo', changeset=changeset) expected_bar = files.File('/bar', changeset=changeset) actual_titan_files = files.Files(files=[ expected_foo, expected_bar, files.File('/fake', changeset=changeset), ]) actual_titan_files.load() expected_titan_files = files.Files(files=[ expected_foo, expected_bar, ]) self.assertEqual(expected_titan_files, actual_titan_files) # write(). titan_file = files.File('/foo', changeset=changeset) titan_file.write('foo', meta={'color': 'blue'}) self.assertEqual('/_titan/ver/1/foo', titan_file.versioned_path) # Delete (really "mark for deletion"). titan_file = files.File('/foo', changeset=changeset).delete() self.assertEqual('/_titan/ver/1/foo', titan_file.versioned_path) # Just for testing, set make sure the status is correct. titan_file._allow_deleted_files = True self.assertEqual(FILE_DELETED, titan_file.meta.status) # Files marked for delete technically exist at the versioned path, # but through the file interface they should pretend to not exist: self.assertFalse(files.File('/foo', changeset=changeset).exists) self.assertRaises(files.BadFileError, lambda: files.File('/foo', changeset=changeset).content) # Revert a file marked for deletion. changeset.revert_file(files.File('/foo', changeset=changeset)) self.assertFalse(files.File('/foo').exists) self.assertFalse(files.File('/foo', changeset=changeset).exists) # Commit the changeset (/bar is the only remaining file). changeset.finalize_associated_files() self.vcs.commit(changeset) # Check exists() with a committed file path. self.assertFalse(files.File('/foo').exists) self.assertTrue(files.File('/bar').exists) # Writing an already-existing file in a new changeset should # copy the existing file's content and attributes. changeset = self.vcs.new_staging_changeset() titan_file = files.File('/bar', changeset=changeset) # Should copy from last-committed file: titan_file.write(meta={'color': 'red'}) # Test original object: self.assertEqual('bar-versioned', titan_file.content) self.assertEqual('red', titan_file.meta.color) self.assertEqual(False, titan_file.meta.flag) # untouched meta property. # Test re-inited object: titan_file = files.File('/bar', changeset=changeset) self.assertEqual('bar-versioned', titan_file.content) self.assertEqual('red', titan_file.meta.color) self.assertEqual(False, titan_file.meta.flag) # untouched meta property.
def testFilesList(self): # Create files for testing. root_level = files.Files(['/index.html', '/qux']) first_level = files.Files(['/foo/bar']) second_level = files.Files([ '/foo/bar/baz', '/foo/bar/baz.html', '/foo/bar/baz.txt', ]) root_and_first_levels = files.Files.merge(root_level, first_level) first_and_second_levels = files.Files.merge(first_level, second_level) # files.Files.update(). all_files = files.Files([]) all_files.update(root_level) all_files.update(first_level) all_files.update(second_level) self.assertEqual(6, len(all_files)) # Test __eq__ (don't use assertEqual). self.assertTrue(files.Files(['/a', '/b']) == files.Files(['/a', '/b'])) self.assertFalse(files.Files(['/a', '/b']) == files.Files(['/a'])) for titan_file in all_files.itervalues(): titan_file.write('') # Empty. self.assertSameObjects(files.Files(), files.Files.list('/fake/path')) self.assertSameObjects(files.Files([]), files.Files.list('/fake/path')) # From root. self.assertSameObjects(root_level, files.Files.list('/')) titan_files = files.Files.list('/', recursive=True) self.assertSameObjects(all_files, titan_files) # From first level dir. self.assertSameObjects(first_level, files.Files.list('/foo')) self.assertSameObjects(first_level, files.Files.list('/foo/')) titan_files = files.Files.list('/foo', recursive=True) self.assertSameObjects(first_and_second_levels, titan_files) # From second level dir. self.assertSameObjects(second_level, files.Files.list('/foo/bar')) titan_files = files.Files.list('/foo/bar', recursive=True) self.assertSameObjects(second_level, titan_files) # Limit recursion depth. titan_files = files.Files.list('/', recursive=True, depth=1) self.assertSameObjects(root_and_first_levels, titan_files) titan_files = files.Files.list('/', recursive=True, depth=2) self.assertSameObjects(all_files, titan_files) titan_files = files.Files.list('/foo/', recursive=True, depth=1) self.assertSameObjects(first_and_second_levels, titan_files) # Limit the number of files returned. titan_files = files.Files.list('/foo', recursive=True, limit=1) self.assertEqual(1, len(titan_files)) # Support trailing slashes. self.assertSameObjects(second_level, files.Files.list('/foo/bar/')) titan_files = files.Files.list('/foo/bar/', recursive=True) self.assertSameObjects(second_level, titan_files) # Custom filters: files.File('/a/foo').write('', meta={'color': 'red', 'count': 1}) files.File('/a/bar/qux').write('', meta={'color': 'red', 'count': 2}) files.File('/a/baz').write('', meta={'color': 'blue', 'count': 3}) # Single filter: filters = [files.FileProperty('color') == 'red'] titan_files = files.Files.list('/a', filters=filters) self.assertSameObjects(['/a/foo'], titan_files) # Multiple filters: filters = [ files.FileProperty('color') == 'blue', files.FileProperty('count') == 3, ] titan_files = files.Files.list('/', recursive=True, filters=filters) self.assertEqual(files.Files(['/a/baz']), titan_files) # Recursive: filters = [files.FileProperty('color') == 'red'] titan_files = files.Files.list('/', recursive=True, filters=filters) self.assertEqual(files.Files(['/a/foo', '/a/bar/qux']), titan_files) # Non-meta property: user = users.TitanUser('*****@*****.**') filters = [ files.FileProperty('created_by') == str(user), files.FileProperty('count') == 2, ] titan_files = files.Files.list('/a/', recursive=True, filters=filters) self.assertEqual(files.Files(['/a/bar/qux']), titan_files) # Error handling. self.assertRaises(ValueError, files.Files.list, '') self.assertRaises(ValueError, files.Files.list, '//') self.assertRaises(ValueError, files.Files.list, '/..') self.assertRaises(ValueError, files.Files.list, '/', recursive=True, depth=0) self.assertRaises(ValueError, files.Files.list, '/', recursive=False, depth=1)