def test_foxx_config_management(db): service_mount = generate_service_mount() missing_mount = generate_service_mount() # Prep the test service db.foxx.create_service( mount=service_mount, source=service_file, config={}, ) # Test get service config assert db.foxx.config(service_mount) == {} # Test get missing service config with assert_raises(FoxxConfigGetError) as err: db.foxx.config(missing_mount) assert err.value.error_code == 3009 # Test update service config assert db.foxx.update_config(service_mount, {}) == {'values': {}} # Test update missing service config with assert_raises(FoxxConfigUpdateError) as err: db.foxx.update_config(missing_mount, {}) assert err.value.error_code == 3009 # Test replace service config assert db.foxx.replace_config(service_mount, {}) == {'values': {}} # Test replace missing service config with assert_raises(FoxxConfigReplaceError) as err: db.foxx.replace_config(missing_mount, {}) assert err.value.error_code == 3009
def test_user_change_password(client, sys_db): username = generate_username() password1 = generate_string() password2 = generate_string() sys_db.create_user(username, password1) sys_db.update_permission(username, 'rw', sys_db.name) db1 = client.db(sys_db.name, username, password1) db2 = client.db(sys_db.name, username, password2) # Check authentication assert isinstance(db1.properties(), dict) with assert_raises(DatabasePropertiesError) as err: db2.properties() assert err.value.http_code == 401 # Update the user password and check again sys_db.update_user(username, password2) assert isinstance(db2.properties(), dict) with assert_raises(DatabasePropertiesError) as err: db1.properties() assert err.value.http_code == 401 # Replace the user password back and check again sys_db.update_user(username, password1) assert isinstance(db1.properties(), dict) with assert_raises(DatabasePropertiesError) as err: db2.properties() assert err.value.http_code == 401
def test_foxx_development_toggle(db): service_mount = generate_service_mount() missing_mount = generate_service_mount() # Prep the test service db.foxx.create_service( mount=service_mount, source=service_file, development=False, ) # Test enable development mode service = db.foxx.enable_development(service_mount) assert service['mount'] == service_mount assert service['name'] == 'test' assert service['development'] is True # Test enable development mode for missing service with assert_raises(FoxxDevModeEnableError) as err: db.foxx.enable_development(missing_mount) assert err.value.error_code == 3009 # Test disable development mode service = db.foxx.disable_development(service_mount) assert service['mount'] == service_mount assert service['name'] == 'test' assert service['development'] is False # Test disable development mode for missing service with assert_raises(FoxxDevModeDisableError) as err: db.foxx.disable_development(missing_mount) assert err.value.error_code == 3009
def test_delete_index(icol, bad_col): old_indexes = set(extract("id", icol.indexes())) icol.add_hash_index(["attr3", "attr4"], unique=True) icol.add_skiplist_index(["attr3", "attr4"], unique=True) icol.add_fulltext_index(fields=["attr3"], min_length=10) new_indexes = set(extract("id", icol.indexes())) assert new_indexes.issuperset(old_indexes) indexes_to_delete = new_indexes - old_indexes for index_id in indexes_to_delete: assert icol.delete_index(index_id) is True new_indexes = set(extract("id", icol.indexes())) assert new_indexes == old_indexes # Test delete missing indexes for index_id in indexes_to_delete: assert icol.delete_index(index_id, ignore_missing=True) is False for index_id in indexes_to_delete: with assert_raises(IndexDeleteError) as err: icol.delete_index(index_id, ignore_missing=False) assert err.value.error_code == 1212 # Test delete indexes with bad collection for index_id in indexes_to_delete: with assert_raises(IndexDeleteError) as err: bad_col.delete_index(index_id, ignore_missing=False) assert err.value.error_code in {11, 1228}
def test_foxx_dependency_management(db): service_mount = generate_service_mount() missing_mount = generate_service_mount() # Prep the test service db.foxx.create_service(mount=service_mount, source=service_file, dependencies={}) # Test get service dependencies assert db.foxx.dependencies(service_mount) == {} # Test get missing service dependencies with assert_raises(FoxxDependencyGetError) as err: db.foxx.dependencies(missing_mount) assert err.value.error_code == 3009 # Test update service dependencies assert db.foxx.update_dependencies(service_mount, {}) == {'values': {}} # Test update missing service dependencies with assert_raises(FoxxDependencyUpdateError) as err: db.foxx.update_dependencies(missing_mount, {}) assert err.value.error_code == 3009 # Test replace service dependencies assert db.foxx.replace_dependencies(service_mount, {}) == {'values': {}} # Test replace missing service dependencies with assert_raises(FoxxDependencyReplaceError) as err: db.foxx.replace_dependencies(missing_mount, {}) assert err.value.error_code == 3009
def test_aql_cache_management(db, bad_db): # Test get AQL cache properties properties = db.aql.cache.properties() assert 'mode' in properties assert 'limit' in properties # Test get AQL cache properties with bad database with assert_raises(AQLCachePropertiesError): bad_db.aql.cache.properties() # Test get AQL cache configure properties properties = db.aql.cache.configure(mode='on', limit=100) assert properties['mode'] == 'on' assert properties['limit'] == 100 properties = db.aql.cache.properties() assert properties['mode'] == 'on' assert properties['limit'] == 100 # Test get AQL cache configure properties with bad database with assert_raises(AQLCacheConfigureError): bad_db.aql.cache.configure(mode='on') # Test get AQL cache clear result = db.aql.cache.clear() assert isinstance(result, bool) # Test get AQL cache clear with bad database with assert_raises(AQLCacheClearError) as err: bad_db.aql.cache.clear() assert err.value.error_code == 1228
def test_foxx_dependency_management(db): service_mount = generate_service_mount() missing_mount = generate_service_mount() # Prep the test service db.foxx.create_service( mount=service_mount, source=service_file, dependencies={} ) # Test get service dependencies assert db.foxx.dependencies(service_mount) == {} # Test get missing service dependencies with assert_raises(FoxxDependencyGetError) as err: db.foxx.dependencies(missing_mount) assert err.value.error_code == 3009 # Test update service dependencies assert db.foxx.update_dependencies(service_mount, {}) == {'values': {}} # Test update missing service dependencies with assert_raises(FoxxDependencyUpdateError) as err: db.foxx.update_dependencies(missing_mount, {}) assert err.value.error_code == 3009 # Test replace service dependencies assert db.foxx.replace_dependencies(service_mount, {}) == {'values': {}} # Test replace missing service dependencies with assert_raises(FoxxDependencyReplaceError) as err: db.foxx.replace_dependencies(missing_mount, {}) assert err.value.error_code == 3009
def test_replication_dump_methods(db, bad_db, col, docs, cluster): if cluster: pytest.skip("Not tested in a cluster setup") result = db.replication.create_dump_batch(ttl=1000) assert "id" in result and "last_tick" in result batch_id = result["id"] with assert_raises(ReplicationDumpBatchCreateError) as err: bad_db.replication.create_dump_batch(ttl=1000) assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} result = db.replication.dump( collection=col.name, batch_id=batch_id, chunk_size=0, deserialize=True ) assert "content" in result assert "check_more" in result with assert_raises(ReplicationDumpError) as err: bad_db.replication.dump(collection=col.name, batch_id=batch_id) assert err.value.error_code == HTTP_UNAUTHORIZED assert db.replication.extend_dump_batch(batch_id, ttl=1000) is True with assert_raises(ReplicationDumpBatchExtendError) as err: bad_db.replication.extend_dump_batch(batch_id, ttl=1000) assert err.value.error_code == HTTP_UNAUTHORIZED assert db.replication.delete_dump_batch(batch_id) is True with assert_raises(ReplicationDumpBatchDeleteError) as err: db.replication.delete_dump_batch(batch_id) assert err.value.error_code in {HTTP_NOT_FOUND, CURSOR_NOT_FOUND}
def test_foxx_development_toggle(db, cluster): if cluster: pytest.skip("Not tested in a cluster setup") service_mount = generate_service_mount() missing_mount = generate_service_mount() # Prep the test service db.foxx.create_service( mount=service_mount, source=service_file, development=False, ) # Test enable development mode service = db.foxx.enable_development(service_mount) assert service["mount"] == service_mount assert service["name"] == service_name assert service["development"] is True # Test enable development mode for missing service with assert_raises(FoxxDevModeEnableError) as err: db.foxx.enable_development(missing_mount) assert err.value.error_code == 3009 # Test disable development mode service = db.foxx.disable_development(service_mount) assert service["mount"] == service_mount assert service["name"] == service_name assert service["development"] is False # Test disable development mode for missing service with assert_raises(FoxxDevModeDisableError) as err: db.foxx.disable_development(missing_mount) assert err.value.error_code == 3009
def test_collection_misc_methods(col, bad_col): # Test get properties properties = col.properties() assert properties['name'] == col.name assert properties['system'] is False # Test get properties with bad collection with assert_raises(CollectionPropertiesError) as err: bad_col.properties() assert err.value.error_code == 1228 # Test configure properties prev_sync = properties['sync'] properties = col.configure( sync=not prev_sync, journal_size=10000000 ) assert properties['name'] == col.name assert properties['system'] is False assert properties['sync'] is not prev_sync # Test configure properties with bad collection with assert_raises(CollectionConfigureError) as err: bad_col.configure(sync=True, journal_size=10000000) assert err.value.error_code == 1228 # Test preconditions assert len(col) == 1 # Test truncate collection assert col.truncate() is True assert len(col) == 0
def test_wal_misc_methods(sys_db, bad_db): try: sys_db.wal.properties() except WALPropertiesError as err: if err.http_code == 501: pytest.skip('WAL not implemented') # Test get properties properties = sys_db.wal.properties() assert 'oversized_ops' in properties assert 'log_size' in properties assert 'historic_logs' in properties assert 'reserve_logs' in properties assert 'throttle_wait' in properties assert 'throttle_limit' in properties # Test get properties with bad database with assert_raises(WALPropertiesError) as err: bad_db.wal.properties() assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} # Test configure properties sys_db.wal.configure(historic_logs=15, oversized_ops=False, log_size=30000000, reserve_logs=5, throttle_limit=0, throttle_wait=16000) properties = sys_db.wal.properties() assert properties['historic_logs'] == 15 assert properties['oversized_ops'] is False assert properties['log_size'] == 30000000 assert properties['reserve_logs'] == 5 assert properties['throttle_limit'] == 0 assert properties['throttle_wait'] == 16000 # Test configure properties with bad database with assert_raises(WALConfigureError) as err: bad_db.wal.configure(log_size=2000000) assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} # Test get transactions result = sys_db.wal.transactions() assert 'count' in result assert 'last_collected' in result # Test get transactions with bad database with assert_raises(WALTransactionListError) as err: bad_db.wal.transactions() assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} # Test flush result = sys_db.wal.flush(garbage_collect=False, sync=False) assert isinstance(result, bool) # Test flush with bad database with assert_raises(WALFlushError) as err: bad_db.wal.flush(garbage_collect=False, sync=False) assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND}
def test_vertex_collection_management(db, graph, bad_graph): # Test create valid "from" vertex collection fvcol_name = generate_col_name() assert not graph.has_vertex_collection(fvcol_name) assert not db.has_collection(fvcol_name) fvcol = graph.create_vertex_collection(fvcol_name) assert graph.has_vertex_collection(fvcol_name) assert db.has_collection(fvcol_name) assert fvcol.name == fvcol_name assert fvcol.graph == graph.name assert fvcol_name in repr(fvcol) assert fvcol_name in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) # Test create duplicate vertex collection with assert_raises(VertexCollectionCreateError) as err: graph.create_vertex_collection(fvcol_name) assert err.value.error_code == 1938 assert fvcol_name in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) # Test create valid "to" vertex collection tvcol_name = generate_col_name() assert not graph.has_vertex_collection(tvcol_name) assert not db.has_collection(tvcol_name) tvcol = graph.create_vertex_collection(tvcol_name) assert graph.has_vertex_collection(tvcol_name) assert db.has_collection(tvcol_name) assert tvcol_name == tvcol_name assert tvcol.graph == graph.name assert tvcol_name in repr(tvcol) assert tvcol_name in graph.vertex_collections() assert tvcol_name in extract('name', db.collections()) # Test list vertex collection via bad database with assert_raises(VertexCollectionListError) as err: bad_graph.vertex_collections() assert err.value.error_code in {11, 1228} # Test delete missing vertex collection with assert_raises(VertexCollectionDeleteError) as err: graph.delete_vertex_collection(generate_col_name()) assert err.value.error_code in {1926, 1928} # Test delete "to" vertex collection with purge option assert graph.delete_vertex_collection(tvcol_name, purge=True) is True assert tvcol_name not in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) assert tvcol_name not in extract('name', db.collections()) assert not graph.has_vertex_collection(tvcol_name) # Test delete "from" vertex collection without purge option assert graph.delete_vertex_collection(fvcol_name, purge=False) is True assert fvcol_name not in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) assert not graph.has_vertex_collection(fvcol_name)
def test_vertex_collection_management(db, graph, bad_graph): # Test create valid "from" vertex collection fvcol_name = generate_col_name() assert not graph.has_vertex_collection(fvcol_name) assert not db.has_collection(fvcol_name) fvcol = graph.create_vertex_collection(fvcol_name) assert graph.has_vertex_collection(fvcol_name) assert db.has_collection(fvcol_name) assert fvcol.name == fvcol_name assert fvcol.graph == graph.name assert fvcol_name in repr(fvcol) assert fvcol_name in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) # Test create duplicate vertex collection with assert_raises(VertexCollectionCreateError) as err: graph.create_vertex_collection(fvcol_name) assert err.value.error_code == 1938 assert fvcol_name in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) # Test create valid "to" vertex collection tvcol_name = generate_col_name() assert not graph.has_vertex_collection(tvcol_name) assert not db.has_collection(tvcol_name) tvcol = graph.create_vertex_collection(tvcol_name) assert graph.has_vertex_collection(tvcol_name) assert db.has_collection(tvcol_name) assert tvcol_name == tvcol_name assert tvcol.graph == graph.name assert tvcol_name in repr(tvcol) assert tvcol_name in graph.vertex_collections() assert tvcol_name in extract('name', db.collections()) # Test list vertex collection via bad fabric with assert_raises(VertexCollectionListError) as err: bad_graph.vertex_collections() assert err.value.error_code == 1228 # Test delete missing vertex collection with assert_raises(VertexCollectionDeleteError) as err: graph.delete_vertex_collection(generate_col_name()) assert err.value.error_code == 1926 # Test delete "to" vertex collection with purge option assert graph.delete_vertex_collection(tvcol_name, purge=True) is True assert tvcol_name not in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) assert tvcol_name not in extract('name', db.collections()) assert not graph.has_vertex_collection(tvcol_name) # Test delete "from" vertex collection without purge option assert graph.delete_vertex_collection(fvcol_name, purge=False) is True assert fvcol_name not in graph.vertex_collections() assert fvcol_name in extract('name', db.collections()) assert not graph.has_vertex_collection(fvcol_name)
def test_auth_superuser_token(client, db_name, root_password, secret): token = generate_jwt(secret) db = client.db("_system", superuser_token=token) bad_db = client.db("_system", superuser_token="bad_token") assert isinstance(db.conn, JwtSuperuserConnection) assert isinstance(db.version(), str) assert isinstance(db.properties(), dict) # # Test get JWT secrets # secrets = db.jwt_secrets() # assert 'active' in secrets # assert 'passive' in secrets # Test get JWT secrets with bad database with assert_raises(JWTSecretListError) as err: bad_db.jwt_secrets() assert err.value.error_code == FORBIDDEN # # Test reload JWT secrets # secrets = db.reload_jwt_secrets() # assert 'active' in secrets # assert 'passive' in secrets # Test reload JWT secrets with bad database with assert_raises(JWTSecretReloadError) as err: bad_db.reload_jwt_secrets() assert err.value.error_code == FORBIDDEN # Test get TLS data result = db.tls() assert isinstance(result, dict) # Test get TLS data with bad database with assert_raises(ServerTLSError) as err: bad_db.tls() assert err.value.error_code == FORBIDDEN # Test reload TLS result = db.reload_tls() assert isinstance(result, dict) # Test reload TLS with bad database with assert_raises(ServerTLSReloadError) as err: bad_db.reload_tls() assert err.value.error_code == FORBIDDEN # # Test get encryption # result = db.encryption() # assert isinstance(result, dict) # Test reload user-defined encryption keys. with assert_raises(ServerEncryptionError) as err: bad_db.encryption() assert err.value.error_code == FORBIDDEN
def test_arangosearch_view_management(db, bad_db, cluster): # Test create arangosearch view view_name = generate_view_name() result = db.create_arangosearch_view(view_name, {'consolidationIntervalMsec': 50000}) assert 'id' in result assert result['name'] == view_name assert result['type'].lower() == 'arangosearch' assert result['consolidation_interval_msec'] == 50000 view_id = result['id'] # Test create duplicate arangosearch view with assert_raises(ViewCreateError) as err: db.create_arangosearch_view(view_name, {'consolidationIntervalMsec': 50000}) assert err.value.error_code == 1207 result = db.views() if not cluster: assert len(result) == 1 view = result[0] assert view['id'] == view_id assert view['name'] == view_name assert view['type'] == 'arangosearch' # Test update arangosearch view view = db.update_arangosearch_view(view_name, {'consolidationIntervalMsec': 70000}) assert view['id'] == view_id assert view['name'] == view_name assert view['type'].lower() == 'arangosearch' assert view['consolidation_interval_msec'] == 70000 # Test update arangosearch view with bad database with assert_raises(ViewUpdateError) as err: bad_db.update_arangosearch_view(view_name, {'consolidationIntervalMsec': 70000}) assert err.value.error_code in {11, 1228} # Test replace arangosearch view view = db.replace_arangosearch_view(view_name, {'consolidationIntervalMsec': 40000}) assert view['id'] == view_id assert view['name'] == view_name assert view['type'] == 'arangosearch' assert view['consolidation_interval_msec'] == 40000 # Test replace arangosearch with bad database with assert_raises(ViewReplaceError) as err: bad_db.replace_arangosearch_view(view_name, {'consolidationIntervalMsec': 70000}) assert err.value.error_code in {11, 1228} # Test delete arangosearch view assert db.delete_view(view_name, ignore_missing=False) is True
def test_aql_cache_management(db, bad_db): # Test get AQL cache properties properties = db.aql.cache.properties() assert "mode" in properties assert "max_results" in properties assert "max_results_size" in properties assert "max_entry_size" in properties assert "include_system" in properties # Test get AQL cache properties with bad database with assert_raises(AQLCachePropertiesError): bad_db.aql.cache.properties() # Test get AQL cache configure properties properties = db.aql.cache.configure( mode="on", max_results=100, max_results_size=10000, max_entry_size=10000, include_system=True, ) assert properties["mode"] == "on" assert properties["max_results"] == 100 assert properties["max_results_size"] == 10000 assert properties["max_entry_size"] == 10000 assert properties["include_system"] is True properties = db.aql.cache.properties() assert properties["mode"] == "on" assert properties["max_results"] == 100 assert properties["max_results_size"] == 10000 assert properties["max_entry_size"] == 10000 assert properties["include_system"] is True # Test get AQL cache configure properties with bad database with assert_raises(AQLCacheConfigureError): bad_db.aql.cache.configure(mode="on") # Test get AQL cache entries result = db.aql.cache.entries() assert isinstance(result, list) # Test get AQL cache entries with bad database with assert_raises(AQLCacheEntriesError) as err: bad_db.aql.cache.entries() assert err.value.error_code in {11, 1228} # Test get AQL cache clear result = db.aql.cache.clear() assert isinstance(result, bool) # Test get AQL cache clear with bad database with assert_raises(AQLCacheClearError) as err: bad_db.aql.cache.clear() assert err.value.error_code in {11, 1228}
def test_analyzer_management(db, bad_db, cluster): analyzer_name = generate_analyzer_name() full_analyzer_name = db.name + '::' + analyzer_name bad_analyzer_name = generate_analyzer_name() # Test create analyzer result = db.create_analyzer(analyzer_name, 'identity', {}) assert result['name'] == full_analyzer_name assert result['type'] == 'identity' assert result['properties'] == {} assert result['features'] == [] # Test create duplicate with bad database with assert_raises(AnalyzerCreateError) as err: bad_db.create_analyzer(analyzer_name, 'identity', {}, []) assert err.value.error_code in {11, 1228} # Test get analyzer result = db.analyzer(analyzer_name) assert result['name'] == full_analyzer_name assert result['type'] == 'identity' assert result['properties'] == {} assert result['features'] == [] # Test get missing analyzer with assert_raises(AnalyzerGetError) as err: db.analyzer(bad_analyzer_name) assert err.value.error_code in {1202} # Test list analyzers result = db.analyzers() assert full_analyzer_name in [a['name'] for a in result] # Test list analyzers with bad database with assert_raises(AnalyzerListError) as err: bad_db.analyzers() assert err.value.error_code in {11, 1228} # Test delete analyzer assert db.delete_analyzer(analyzer_name, force=True) is True assert full_analyzer_name not in [a['name'] for a in db.analyzers()] # Test delete missing analyzer with assert_raises(AnalyzerDeleteError) as err: db.delete_analyzer(analyzer_name) assert err.value.error_code in {1202} # Test delete missing analyzer with ignore_missing set to True assert db.delete_analyzer(analyzer_name, ignore_missing=True) is False
def test_graph_properties(graph, bad_graph, db): assert repr(graph) == '<Graph {}>'.format(graph.name) properties = graph.properties() assert properties['id'] == '_graphs/{}'.format(graph.name) assert properties['name'] == graph.name assert len(properties['edge_definitions']) == 1 assert len(properties['orphan_collections']) == 2 assert 'smart' in properties assert 'smart_field' in properties assert 'shard_count' in properties assert isinstance(properties['revision'], string_types) # Test properties with bad fabric with assert_raises(GraphPropertiesError): bad_graph.properties() new_graph_name = generate_graph_name() new_graph = db.create_graph( new_graph_name, # TODO only possible with enterprise edition # smart=True, # smart_field='foo', # shard_count=2 ) properties = new_graph.properties() assert properties['id'] == '_graphs/{}'.format(new_graph_name) assert properties['name'] == new_graph_name assert properties['edge_definitions'] == [] assert properties['orphan_collections'] == [] assert isinstance(properties['revision'], string_types)
def test_replication_inventory(sys_db, bad_db, cluster): if cluster: pytest.skip("Not tested in a cluster setup") dump_batch = sys_db.replication.create_dump_batch(ttl=1000) dump_batch_id = dump_batch["id"] result = sys_db.replication.inventory( batch_id=dump_batch_id, include_system=True, all_databases=True ) assert isinstance(result, dict) assert "collections" not in result assert "databases" in result assert "state" in result assert "tick" in result result = sys_db.replication.inventory( batch_id=dump_batch_id, include_system=True, all_databases=False ) assert isinstance(result, dict) assert "databases" not in result assert "collections" in result assert "state" in result assert "tick" in result with assert_raises(ReplicationInventoryError) as err: bad_db.replication.inventory(dump_batch_id) assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} sys_db.replication.delete_dump_batch(dump_batch_id)
def test_wal_tail(sys_db, bad_db, cluster): if cluster: pytest.skip("Not tested in a cluster setup") result = sys_db.wal.tail( lower=0, upper=1000000, last_scanned=0, all_databases=True, chunk_size=1000000, syncer_id=None, server_id=None, client_info="test", barrier_id=None, ) assert "content" in result assert "last_tick" in result assert "last_scanned" in result assert "last_included" in result assert isinstance(result["check_more"], bool) assert isinstance(result["from_present"], bool) # Test tick_ranges with bad database with assert_raises(WALTailError) as err: bad_db.wal.tail() assert err.value.http_code == HTTP_UNAUTHORIZED
def test_replication_server_id(sys_db, bad_db): result = sys_db.replication.server_id() assert isinstance(result, str) with assert_raises(ReplicationServerIDError) as err: bad_db.replication.server_id() assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND}
def test_add_fulltext_index(icol): # Test add fulltext index with one attributes result = icol.add_fulltext_index(fields=['attr1'], min_length=10, name='fulltext_index', in_background=True) expected_index = { 'sparse': True, 'type': 'fulltext', 'fields': ['attr1'], 'min_length': 10, 'unique': False, 'name': 'fulltext_index' } for key, value in expected_index.items(): assert result[key] == value assert result['id'] in extract('id', icol.indexes()) # Test add fulltext index with two attributes (should fail) with assert_raises(IndexCreateError) as err: icol.add_fulltext_index(fields=['attr1', 'attr2']) assert err.value.error_code == 10 # Clean up the index icol.delete_index(result['id'])
def test_graph_properties(graph, bad_graph, db): assert repr(graph) == '<Graph {}>'.format(graph.name) properties = graph.properties() assert properties['id'] == '_graphs/{}'.format(graph.name) assert properties['name'] == graph.name assert len(properties['edge_definitions']) == 1 assert 'orphan_collections' in properties assert 'smart' in properties # assert 'smart_field' in properties assert 'shard_count' in properties assert isinstance(properties['revision'], string_types) # Test properties with bad database with assert_raises(GraphPropertiesError): bad_graph.properties() new_graph_name = generate_graph_name() new_graph = db.create_graph( new_graph_name, # TODO only possible with enterprise edition # smart=True, # smart_field='foo', # shard_count=2 ) properties = new_graph.properties() assert properties['id'] == '_graphs/{}'.format(new_graph_name) assert properties['name'] == new_graph_name assert properties['edge_definitions'] == [] assert properties['orphan_collections'] == [] assert isinstance(properties['revision'], string_types)
def test_replication_make_slave(sys_db, bad_db, url, replication): if not replication: pytest.skip("Only tested for replication") sys_db.replication.stop_applier() result = sys_db.replication.make_slave( endpoint="tcp://192.168.1.65:8500", database="test", username="******", password="******", restrict_type="include", restrict_collections=["test"], include_system=False, max_connect_retries=5, connect_timeout=500, request_timeout=500, chunk_size=0, adaptive_polling=False, auto_resync=False, auto_resync_retries=0, initial_sync_max_wait_time=0, connection_retry_wait_time=0, idle_min_wait_time=0, idle_max_wait_time=0, require_from_present=False, verbose=True, ) assert "endpoint" in result assert "database" in result with assert_raises(ReplicationMakeSlaveError) as err: bad_db.replication.make_slave(endpoint=url) assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND}
def test_add_fulltext_index(icol): # Test add fulltext index with one attributes result = icol.add_fulltext_index( fields=["attr1"], min_length=10, name="fulltext_index", in_background=True ) expected_index = { "sparse": True, "type": "fulltext", "fields": ["attr1"], "min_length": 10, "unique": False, "name": "fulltext_index", } for key, value in expected_index.items(): assert result[key] == value assert result["id"] in extract("id", icol.indexes()) # Test add fulltext index with two attributes (should fail) with assert_raises(IndexCreateError) as err: icol.add_fulltext_index(fields=["attr1", "attr2"]) assert err.value.error_code == 10 # Clean up the index icol.delete_index(result["id"])
def test_load_indexes(icol, bad_col): # Test load indexes assert icol.load_indexes() is True # Test load indexes with bad collection with assert_raises(IndexLoadError) as err: bad_col.load_indexes() assert err.value.error_code in {11, 1228}
def test_auth_invalid_method(client, db_name, username, password): with assert_raises(ValueError) as err: client.db(name=db_name, username=username, password=password, verify=True, auth_method='bad_method') assert 'invalid auth_method' in str(err.value)
def test_replication_cluster_inventory(sys_db, bad_db, cluster): if cluster: result = sys_db.replication.cluster_inventory(include_system=True) assert isinstance(result, dict) with assert_raises(ReplicationClusterInventoryError) as err: bad_db.replication.cluster_inventory(include_system=True) assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND}
def test_aql_function_management(db, bad_db): fn_group = 'functions::temperature' fn_name_1 = 'functions::temperature::celsius_to_fahrenheit' fn_body_1 = 'function (celsius) { return celsius * 1.8 + 32; }' fn_name_2 = 'functions::temperature::fahrenheit_to_celsius' fn_body_2 = 'function (fahrenheit) { return (fahrenheit - 32) / 1.8; }' bad_fn_name = 'functions::temperature::should_not_exist' bad_fn_body = 'function (celsius) { invalid syntax }' # Test list AQL functions with bad database with assert_raises(AQLFunctionListError) as err: bad_db.aql.functions() assert err.value.error_code == 1228 # Test create invalid AQL function with assert_raises(AQLFunctionCreateError) as err: db.aql.create_function(bad_fn_name, bad_fn_body) assert err.value.error_code == 1581 # Test create AQL function one db.aql.create_function(fn_name_1, fn_body_1) assert db.aql.functions() == {fn_name_1: fn_body_1} # Test create AQL function one again (idempotency) db.aql.create_function(fn_name_1, fn_body_1) assert db.aql.functions() == {fn_name_1: fn_body_1} # Test create AQL function two db.aql.create_function(fn_name_2, fn_body_2) assert db.aql.functions() == {fn_name_1: fn_body_1, fn_name_2: fn_body_2} # Test delete AQL function one assert db.aql.delete_function(fn_name_1) is True assert db.aql.functions() == {fn_name_2: fn_body_2} # Test delete missing AQL function with assert_raises(AQLFunctionDeleteError) as err: db.aql.delete_function(fn_name_1) assert err.value.error_code == 1582 assert db.aql.delete_function(fn_name_1, ignore_missing=True) is False assert db.aql.functions() == {fn_name_2: fn_body_2} # Test delete AQL function group assert db.aql.delete_function(fn_group, group=True) is True assert db.aql.functions() == {}
def test_user_create_with_new_database(client, sys_db, cluster): if cluster: pytest.skip("Not tested in a cluster setup") db_name = generate_db_name() username1 = generate_username() username2 = generate_username() username3 = generate_username() password1 = generate_string() password2 = generate_string() password3 = generate_string() result = sys_db.create_database( name=db_name, users=[ { "username": username1, "password": password1, "active": True }, { "username": username2, "password": password2, "active": True }, { "username": username3, "password": password3, "active": False }, ], ) assert result is True sys_db.update_permission(username1, permission="rw", database=db_name) sys_db.update_permission(username2, permission="rw", database=db_name) sys_db.update_permission(username3, permission="rw", database=db_name) # Test if the users were created properly usernames = extract("username", sys_db.users()) assert all(u in usernames for u in [username1, username2, username3]) # Test if the first user has access to the database db = client.db(db_name, username1, password1) db.properties() # Test if the second user also has access to the database db = client.db(db_name, username2, password2) db.properties() # Test if the third user has access to the database (should not) db = client.db(db_name, username3, password3) with assert_raises(DatabasePropertiesError) as err: db.properties() assert err.value.http_code == 401
def test_replication_first_tick(sys_db, bad_db, cluster): if cluster: pytest.skip("Not tested in a cluster setup") result = sys_db.replication.logger_first_tick() assert isinstance(result, str) with assert_raises(ReplicationLoggerFirstTickError) as err: bad_db.replication.logger_first_tick() assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND}
def test_cluster_server_role(sys_db, bad_db, cluster): if not cluster: pytest.skip('Only tested in a cluster setup') result = sys_db.cluster.server_role() assert isinstance(result, str) with assert_raises(ClusterServerRoleError) as err: bad_db.cluster.server_role() assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND}
def test_implements_raises_error_if_method_does_not_exist_on_spec(self): with assert_raises(splenda.MethodMismatchException) as cm: @splenda.implements(spec=ServiceToFake) class Fake(object): def some_other_method(self): return 1 self.assertEqual( str(cm.exception), "Fake implements some_other_method. ServiceToFake does not.")
def test_pregel_management(db, graph, cluster): if cluster: pytest.skip("Not tested in a cluster setup") # Test create pregel job job_id = db.pregel.create_job( graph.name, "pagerank", store=False, max_gss=100, thread_count=1, async_mode=False, result_field="result", algorithm_params={"threshold": 0.000001}, ) assert isinstance(job_id, int) # Test create pregel job with unsupported algorithm with assert_raises(PregelJobCreateError) as err: db.pregel.create_job(graph.name, "invalid") assert err.value.error_code in {4, 10, 1600} # Test get existing pregel job job = db.pregel.job(job_id) assert isinstance(job["state"], str) assert isinstance(job["aggregators"], dict) assert isinstance(job["gss"], int) assert isinstance(job["received_count"], int) assert isinstance(job["send_count"], int) assert "total_runtime" in job # Test delete existing pregel job assert db.pregel.delete_job(job_id) is True time.sleep(0.2) with assert_raises(PregelJobGetError) as err: db.pregel.job(job_id) assert err.value.error_code in {4, 10, 1600} # Test delete missing pregel job with assert_raises(PregelJobDeleteError) as err: db.pregel.delete_job(generate_string()) assert err.value.error_code in {4, 10, 1600}
def test_implements_raises_error_if_method_has_different_arguments(self): with assert_raises(splenda.MethodArgumentMismatchException) as cm: @splenda.implements(spec=ServiceToFake) class Fake(object): def method_to_fake_with_args(self, arg1): pass self.assertEqual( str(cm.exception), "Fake implements method_to_fake_with_args with a" " different number of arguments then ServiceToFake." )
def test_aql_cache_management(db, bad_db): # Test get AQL cache properties properties = db.aql.cache.properties() assert 'mode' in properties assert 'limit' in properties # Test get AQL cache properties with bad database with assert_raises(AQLCachePropertiesError): bad_db.aql.cache.properties() # Test get AQL cache configure properties properties = db.aql.cache.configure(mode='on', limit=100) assert properties['mode'] == 'on' assert properties['limit'] == 100 properties = db.aql.cache.properties() assert properties['mode'] == 'on' assert properties['limit'] == 100 # Test get AQL cache configure properties with bad database with assert_raises(AQLCacheConfigureError): bad_db.aql.cache.configure(mode='on') # Test get AQL cache entries result = db.aql.cache.entries() assert isinstance(result, list) # Test get AQL cache entries with bad database with assert_raises(AQLCacheEntriesError) as err: bad_db.aql.cache.entries() assert err.value.error_code in {11, 1228} # Test get AQL cache clear result = db.aql.cache.clear() assert isinstance(result, bool) # Test get AQL cache clear with bad database with assert_raises(AQLCacheClearError) as err: bad_db.aql.cache.clear() assert err.value.error_code in {11, 1228}
def test_database_management(db, sys_db, bad_db): # Test list databases result = sys_db.databases() assert '_system' in result # Test list databases with bad database with assert_raises(DatabaseListError): bad_db.databases() # Test create database db_name = generate_db_name() assert sys_db.has_database(db_name) is False assert sys_db.create_database(db_name) is True assert sys_db.has_database(db_name) is True # Test create duplicate database with assert_raises(DatabaseCreateError) as err: sys_db.create_database(db_name) assert err.value.error_code == 1207 # Test create database without permissions with assert_raises(DatabaseCreateError) as err: db.create_database(db_name) assert err.value.error_code == 1230 # Test delete database without permissions with assert_raises(DatabaseDeleteError) as err: db.delete_database(db_name) assert err.value.error_code == 1230 # Test delete database assert sys_db.delete_database(db_name) is True assert db_name not in sys_db.databases() # Test delete missing database with assert_raises(DatabaseDeleteError) as err: sys_db.delete_database(db_name) assert err.value.error_code in {11, 1228} assert sys_db.delete_database(db_name, ignore_missing=True) is False
def test_pregel_management(db, graph): # Test create pregel job job_id = db.pregel.create_job( graph.name, 'pagerank', store=False, max_gss=100, thread_count=1, async_mode=False, result_field='result', algorithm_params={'threshold': 0.000001} ) assert isinstance(job_id, int) # Test create pregel job with unsupported algorithm with assert_raises(PregelJobCreateError) as err: db.pregel.create_job(graph.name, 'invalid') assert err.value.error_code in {4, 10} # Test get existing pregel job job = db.pregel.job(job_id) assert isinstance(job['state'], string_types) assert isinstance(job['aggregators'], dict) assert isinstance(job['gss'], int) assert isinstance(job['received_count'], int) assert isinstance(job['send_count'], int) assert isinstance(job['total_runtime'], float) # Test delete existing pregel job assert db.pregel.delete_job(job_id) is True with assert_raises(PregelJobGetError) as err: db.pregel.job(job_id) assert err.value.error_code in {4, 10} # Test delete missing pregel job with assert_raises(PregelJobDeleteError) as err: db.pregel.delete_job(generate_string()) assert err.value.error_code in {4, 10}
def test_user_create_with_new_database(client, sys_db): db_name = generate_db_name() username1 = generate_username() username2 = generate_username() username3 = generate_username() password1 = generate_string() password2 = generate_string() password3 = generate_string() result = sys_db.create_database( name=db_name, users=[ {'username': username1, 'password': password1, 'active': True}, {'username': username2, 'password': password2, 'active': True}, {'username': username3, 'password': password3, 'active': False}, ] ) assert result is True sys_db.update_permission(username1, permission='rw', database=db_name) sys_db.update_permission(username2, permission='rw', database=db_name) sys_db.update_permission(username3, permission='rw', database=db_name) # Test if the users were created properly usernames = extract('username', sys_db.users()) assert all(u in usernames for u in [username1, username2, username3]) # Test if the first user has access to the database db = client.db(db_name, username1, password1) db.properties() # Test if the second user also has access to the database db = client.db(db_name, username2, password2) db.properties() # Test if the third user has access to the database (should not) db = client.db(db_name, username3, password3) with assert_raises(DatabasePropertiesError) as err: db.properties() assert err.value.http_code == 401
def test_foxx_service_management(db, bad_db): service_mount = generate_service_mount() missing_mount = generate_service_mount() # Test list services for service in db.foxx.services(): assert 'development' in service assert 'legacy' in service assert 'mount' in service assert 'name' in service assert 'provides' in service assert 'version' in service # Test list services with bad database with assert_raises(FoxxServiceListError) as err: bad_db.foxx.services() assert err.value.error_code in {11, 1228} # Test create service service = db.foxx.create_service( mount=service_mount, source=service_file, config={}, dependencies={}, development=True, setup=True, legacy=True ) assert service['mount'] == service_mount assert service['name'] == 'test' assert service['development'] is True assert service['legacy'] is True assert service['manifest']['configuration'] == {} assert service['manifest']['dependencies'] == {} # Test create duplicate service with assert_raises(FoxxServiceCreateError) as err: db.foxx.create_service(service_mount, 'service.zip') assert err.value.error_code == 3011 # Test get service service = db.foxx.service(service_mount) assert service['mount'] == service_mount assert service['name'] == 'test' assert service['development'] is True assert service['manifest']['configuration'] == {} assert service['manifest']['dependencies'] == {} assert 'checksum' in service assert 'options' in service assert 'path' in service assert 'version' in service # Test get missing service with assert_raises(FoxxServiceGetError) as err: db.foxx.service(missing_mount) assert err.value.error_code == 3009 # Test update service service = db.foxx.update_service( mount=service_mount, source=service_file, config={}, dependencies={}, teardown=True, setup=True, legacy=False ) assert service['mount'] == service_mount assert service['name'] == 'test' assert service['legacy'] is False # Test update missing service with assert_raises(FoxxServiceUpdateError) as err: db.foxx.update_service(missing_mount, 'service.zip') assert err.value.error_code == 3009 # Test replace service service = db.foxx.replace_service( mount=service_mount, source=service_file, config={}, dependencies={}, teardown=True, setup=True, legacy=True, force=False ) assert service['mount'] == service_mount assert service['name'] == 'test' assert service['legacy'] is True # Test replace missing service with assert_raises(FoxxServiceReplaceError) as err: db.foxx.replace_service(missing_mount, 'service.zip') assert err.value.error_code == 3009 assert db.foxx.delete_service(service_mount, teardown=False) is True assert service_mount not in extract('mount', db.foxx.services()) # Test delete missing service with assert_raises(FoxxServiceDeleteError) as err: db.foxx.delete_service(missing_mount, teardown=False) assert err.value.error_code == 3009
def test_traverse(db): # Create test graph, vertex and edge collections school = db.create_graph(generate_graph_name()) profs = school.create_vertex_collection(generate_col_name()) classes = school.create_vertex_collection(generate_col_name()) teaches = school.create_edge_definition( edge_collection=generate_col_name(), from_vertex_collections=[profs.name], to_vertex_collections=[classes.name] ) # Insert test vertices into the graph profs.insert({'_key': 'anna', 'name': 'Professor Anna'}) profs.insert({'_key': 'andy', 'name': 'Professor Andy'}) classes.insert({'_key': 'CSC101', 'name': 'Introduction to CS'}) classes.insert({'_key': 'MAT223', 'name': 'Linear Algebra'}) classes.insert({'_key': 'STA201', 'name': 'Statistics'}) classes.insert({'_key': 'MAT101', 'name': 'Calculus I'}) classes.insert({'_key': 'MAT102', 'name': 'Calculus II'}) # Insert test edges into the graph teaches.insert({ '_from': '{}/anna'.format(profs.name), '_to': '{}/CSC101'.format(classes.name) }) teaches.insert({ '_from': '{}/anna'.format(profs.name), '_to': '{}/STA201'.format(classes.name) }) teaches.insert({ '_from': '{}/anna'.format(profs.name), '_to': '{}/MAT223'.format(classes.name) }) teaches.insert({ '_from': '{}/andy'.format(profs.name), '_to': '{}/MAT101'.format(classes.name) }) teaches.insert({ '_from': '{}/andy'.format(profs.name), '_to': '{}/MAT102'.format(classes.name) }) teaches.insert({ '_from': '{}/andy'.format(profs.name), '_to': '{}/MAT223'.format(classes.name) }) # Traverse the graph with default settings result = school.traverse('{}/anna'.format(profs.name)) visited = extract('_key', result['vertices']) assert visited == ['CSC101', 'MAT223', 'STA201', 'anna'] for path in result['paths']: for vertex in path['vertices']: assert set(vertex) == {'_id', '_key', '_rev', 'name'} for edge in path['edges']: assert set(edge) == {'_id', '_key', '_rev', '_to', '_from'} result = school.traverse('{}/andy'.format(profs.name)) visited = extract('_key', result['vertices']) assert visited == ['MAT101', 'MAT102', 'MAT223', 'andy'] # Traverse the graph with an invalid start vertex with assert_raises(GraphTraverseError): school.traverse('invalid') with assert_raises(GraphTraverseError): bad_col_name = generate_col_name() school.traverse('{}/hanna'.format(bad_col_name)) with assert_raises(GraphTraverseError): school.traverse('{}/anderson'.format(profs.name)) # Travers the graph with max iteration of 0 with assert_raises(GraphTraverseError): school.traverse('{}/andy'.format(profs.name), max_iter=0) # Traverse the graph with max depth of 0 result = school.traverse('{}/andy'.format(profs.name), max_depth=0) assert extract('_key', result['vertices']) == ['andy'] result = school.traverse('{}/anna'.format(profs.name), max_depth=0) assert extract('_key', result['vertices']) == ['anna'] # Traverse the graph with min depth of 2 result = school.traverse('{}/andy'.format(profs.name), min_depth=2) assert extract('_key', result['vertices']) == [] result = school.traverse('{}/anna'.format(profs.name), min_depth=2) assert extract('_key', result['vertices']) == [] # Traverse the graph with DFS and BFS result = school.traverse( {'_id': '{}/anna'.format(profs.name)}, strategy='dfs', direction='any', ) dfs_vertices = extract('_key', result['vertices']) result = school.traverse( {'_id': '{}/anna'.format(profs.name)}, strategy='bfs', direction='any' ) bfs_vertices = extract('_key', result['vertices']) assert sorted(dfs_vertices) == sorted(bfs_vertices) # Traverse the graph with filter function result = school.traverse( {'_id': '{}/andy'.format(profs.name)}, filter_func='if (vertex._key == "MAT101") {return "exclude";} return;' ) assert extract('_key', result['vertices']) == ['MAT102', 'MAT223', 'andy'] # Traverse the graph with global uniqueness (should be same as before) result = school.traverse( {'_id': '{}/andy'.format(profs.name)}, vertex_uniqueness='global', edge_uniqueness='global', filter_func='if (vertex._key == "MAT101") {return "exclude";} return;' ) assert extract('_key', result['vertices']) == ['MAT102', 'MAT223', 'andy'] with assert_raises(DocumentParseError) as err: school.traverse({}) assert err.value.message == 'field "_id" required'
def test_aql_function_management(db, bad_db): fn_group = 'functions::temperature' fn_name_1 = 'functions::temperature::celsius_to_fahrenheit' fn_body_1 = 'function (celsius) { return celsius * 1.8 + 32; }' fn_name_2 = 'functions::temperature::fahrenheit_to_celsius' fn_body_2 = 'function (fahrenheit) { return (fahrenheit - 32) / 1.8; }' bad_fn_name = 'functions::temperature::should_not_exist' bad_fn_body = 'function (celsius) { invalid syntax }' # Test list AQL functions assert db.aql.functions() == [] # Test list AQL functions with bad database with assert_raises(AQLFunctionListError) as err: bad_db.aql.functions() assert err.value.error_code in {11, 1228} # Test create invalid AQL function with assert_raises(AQLFunctionCreateError) as err: db.aql.create_function(bad_fn_name, bad_fn_body) assert err.value.error_code == 1581 # Test create AQL function one assert db.aql.create_function(fn_name_1, fn_body_1) == {'is_new': True} functions = db.aql.functions() assert len(functions) == 1 assert functions[0]['name'] == fn_name_1 assert functions[0]['code'] == fn_body_1 assert 'is_deterministic' in functions[0] # Test create AQL function one again (idempotency) assert db.aql.create_function(fn_name_1, fn_body_1) == {'is_new': False} functions = db.aql.functions() assert len(functions) == 1 assert functions[0]['name'] == fn_name_1 assert functions[0]['code'] == fn_body_1 assert 'is_deterministic' in functions[0] # Test create AQL function two assert db.aql.create_function(fn_name_2, fn_body_2) == {'is_new': True} functions = sorted(db.aql.functions(), key=lambda x: x['name']) assert len(functions) == 2 assert functions[0]['name'] == fn_name_1 assert functions[0]['code'] == fn_body_1 assert functions[1]['name'] == fn_name_2 assert functions[1]['code'] == fn_body_2 assert 'is_deterministic' in functions[0] assert 'is_deterministic' in functions[1] # Test delete AQL function one assert db.aql.delete_function(fn_name_1) == {'deleted': 1} functions = db.aql.functions() assert len(functions) == 1 assert functions[0]['name'] == fn_name_2 assert functions[0]['code'] == fn_body_2 # Test delete missing AQL function with assert_raises(AQLFunctionDeleteError) as err: db.aql.delete_function(fn_name_1) assert err.value.error_code == 1582 assert db.aql.delete_function(fn_name_1, ignore_missing=True) is False functions = db.aql.functions() assert len(functions) == 1 assert functions[0]['name'] == fn_name_2 assert functions[0]['code'] == fn_body_2 # Test delete AQL function group assert db.aql.delete_function(fn_group, group=True) == {'deleted': 1} assert db.aql.functions() == []
def test_aql_query_management(db, bad_db, col, docs): plan_fields = [ 'estimatedNrItems', 'estimatedCost', 'rules', 'variables', 'collections', ] # Test explain invalid query with assert_raises(AQLQueryExplainError) as err: db.aql.explain('INVALID QUERY') assert err.value.error_code == 1501 # Test explain valid query with all_plans set to False plan = db.aql.explain( 'FOR d IN {} RETURN d'.format(col.name), all_plans=False, opt_rules=['-all', '+use-index-range'] ) assert all(field in plan for field in plan_fields) # Test explain valid query with all_plans set to True plans = db.aql.explain( 'FOR d IN {} RETURN d'.format(col.name), all_plans=True, opt_rules=['-all', '+use-index-range'], max_plans=10 ) for plan in plans: assert all(field in plan for field in plan_fields) assert len(plans) < 10 # Test validate invalid query with assert_raises(AQLQueryValidateError) as err: db.aql.validate('INVALID QUERY') assert err.value.error_code == 1501 # Test validate valid query result = db.aql.validate('FOR d IN {} RETURN d'.format(col.name)) assert 'ast' in result assert 'bind_vars' in result assert 'collections' in result assert 'parsed' in result # Test execute invalid AQL query with assert_raises(AQLQueryExecuteError) as err: db.aql.execute('INVALID QUERY') assert err.value.error_code == 1501 # Test execute valid query db.collection(col.name).import_bulk(docs) cursor = db.aql.execute( ''' FOR d IN {col} UPDATE {{_key: d._key, _val: @val }} IN {col} RETURN NEW '''.format(col=col.name), count=True, batch_size=1, ttl=10, bind_vars={'val': 42}, full_count=True, max_plans=1000, optimizer_rules=['+all'], cache=True, memory_limit=1000000, fail_on_warning=False, profile=True, max_transaction_size=100000, max_warning_count=10, intermediate_commit_count=1, intermediate_commit_size=1000, satellite_sync_wait=False, write_collections=[col.name], read_collections=[col.name], stream=False, skip_inaccessible_cols=True ) if db.context == 'transaction': assert cursor.id is None assert cursor.type == 'cursor' assert cursor.batch() is not None assert cursor.has_more() is False assert cursor.count() == len(col) assert cursor.cached() is None assert cursor.statistics() is None assert cursor.profile() is None assert cursor.warnings() is None assert extract('_key', cursor) == extract('_key', docs) assert cursor.close() is None else: assert cursor.id is not None assert cursor.type == 'cursor' assert cursor.batch() is not None assert cursor.has_more() is True assert cursor.count() == len(col) assert cursor.cached() is False assert cursor.statistics() is not None assert cursor.profile() is not None assert cursor.warnings() == [] assert extract('_key', cursor) == extract('_key', docs) assert cursor.close(ignore_missing=True) is False # Test get tracking properties with bad database with assert_raises(AQLQueryTrackingGetError) as err: bad_db.aql.tracking() assert err.value.error_code in {11, 1228} # Test get tracking properties tracking = db.aql.tracking() assert isinstance(tracking['enabled'], bool) assert isinstance(tracking['max_query_string_length'], int) assert isinstance(tracking['max_slow_queries'], int) assert isinstance(tracking['slow_query_threshold'], int) assert isinstance(tracking['track_bind_vars'], bool) assert isinstance(tracking['track_slow_queries'], bool) # Test set tracking properties with bad database with assert_raises(AQLQueryTrackingSetError) as err: bad_db.aql.set_tracking(enabled=not tracking['enabled']) assert err.value.error_code in {11, 1228} assert db.aql.tracking()['enabled'] == tracking['enabled'] # Test set tracking properties new_tracking = db.aql.set_tracking( enabled=not tracking['enabled'], max_query_string_length=4000, max_slow_queries=60, slow_query_threshold=15, track_bind_vars=not tracking['track_bind_vars'], track_slow_queries=not tracking['track_slow_queries'] ) assert new_tracking['enabled'] != tracking['enabled'] assert new_tracking['max_query_string_length'] == 4000 assert new_tracking['max_slow_queries'] == 60 assert new_tracking['slow_query_threshold'] == 15 assert new_tracking['track_bind_vars'] != tracking['track_bind_vars'] assert new_tracking['track_slow_queries'] != tracking['track_slow_queries'] # Make sure to revert the properties new_tracking = db.aql.set_tracking( enabled=True, track_bind_vars=True, track_slow_queries=True ) assert new_tracking['enabled'] is True assert new_tracking['track_bind_vars'] is True assert new_tracking['track_slow_queries'] is True # Kick off some long lasting queries in the background db.begin_async_execution().aql.execute('RETURN SLEEP(100)') db.begin_async_execution().aql.execute('RETURN SLEEP(50)') # Test list queries queries = db.aql.queries() for query in queries: assert 'id' in query assert 'query' in query assert 'started' in query assert 'state' in query assert 'bind_vars' in query assert 'runtime' in query assert len(queries) == 2 # Test list queries with bad database with assert_raises(AQLQueryListError) as err: bad_db.aql.queries() assert err.value.error_code in {11, 1228} # Test kill queries query_id_1, query_id_2 = extract('id', queries) assert db.aql.kill(query_id_1) is True while len(queries) > 1: queries = db.aql.queries() assert query_id_1 not in extract('id', queries) assert db.aql.kill(query_id_2) is True while len(queries) > 0: queries = db.aql.queries() assert query_id_2 not in extract('id', queries) # Test kill missing queries with assert_raises(AQLQueryKillError) as err: db.aql.kill(query_id_1) assert err.value.error_code == 1591 with assert_raises(AQLQueryKillError) as err: db.aql.kill(query_id_2) assert err.value.error_code == 1591 # Test list slow queries assert db.aql.slow_queries() == [] # Test list slow queries with bad database with assert_raises(AQLQueryListError) as err: bad_db.aql.slow_queries() assert err.value.error_code in {11, 1228} # Test clear slow queries assert db.aql.clear_slow_queries() is True # Test clear slow queries with bad database with assert_raises(AQLQueryClearError) as err: bad_db.aql.clear_slow_queries() assert err.value.error_code in {11, 1228}
def test_edge_management(ecol, bad_ecol, edocs, fvcol, fvdocs, tvcol, tvdocs): for vertex in fvdocs: fvcol.insert(vertex) for vertex in tvdocs: tvcol.insert(vertex) edge = edocs[0] key = edge['_key'] # Test insert edge with no key result = ecol.insert({'_from': edge['_from'], '_to': edge['_to']}) assert result['_key'] in ecol assert len(ecol) == 1 ecol.truncate() # Test insert vertex with ID edge_id = ecol.name + '/' + 'foo' ecol.insert({ '_id': edge_id, '_from': edge['_from'], '_to': edge['_to'] }) assert 'foo' in ecol assert edge_id in ecol assert len(ecol) == 1 ecol.truncate() with assert_raises(DocumentParseError) as err: ecol.insert({ '_id': generate_col_name() + '/' + 'foo', '_from': edge['_from'], '_to': edge['_to'] }) assert 'bad collection name' in err.value.message # Test insert first valid edge result = ecol.insert(edge) assert result['_key'] == key assert '_rev' in result assert edge in ecol and key in ecol assert len(ecol) == 1 assert ecol[key]['_from'] == edge['_from'] assert ecol[key]['_to'] == edge['_to'] # Test insert duplicate edge with assert_raises(DocumentInsertError) as err: assert ecol.insert(edge) assert err.value.error_code in {1202, 1210, 1906} assert len(ecol) == 1 edge = edocs[1] key = edge['_key'] # Test insert second valid edge with silent set to True assert ecol.insert(edge, sync=True, silent=True) is True assert edge in ecol and key in ecol assert len(ecol) == 2 assert ecol[key]['_from'] == edge['_from'] assert ecol[key]['_to'] == edge['_to'] # Test insert third valid edge using link method from_vertex = fvcol.get(fvdocs[2]) to_vertex = tvcol.get(tvdocs[2]) result = ecol.link(from_vertex, to_vertex, sync=False) assert result['_key'] in ecol assert len(ecol) == 3 # Test insert fourth valid edge using link method from_vertex = fvcol.get(fvdocs[2]) to_vertex = tvcol.get(tvdocs[0]) assert ecol.link( from_vertex['_id'], to_vertex['_id'], {'_id': ecol.name + '/foo'}, sync=True, silent=True ) is True assert 'foo' in ecol assert len(ecol) == 4 with assert_raises(DocumentParseError) as err: assert ecol.link({}, {}) assert err.value.message == 'field "_id" required' # Test get missing vertex bad_document_key = generate_doc_key() if ecol.context != 'transaction': assert ecol.get(bad_document_key) is None # Test get existing edge by body with "_key" field result = ecol.get({'_key': key}) assert clean_doc(result) == edge # Test get existing edge by body with "_id" field result = ecol.get({'_id': ecol.name + '/' + key}) assert clean_doc(result) == edge # Test get existing edge by key result = ecol.get(key) assert clean_doc(result) == edge # Test get existing edge by ID result = ecol.get(ecol.name + '/' + key) assert clean_doc(result) == edge # Test get existing edge with bad revision old_rev = result['_rev'] with assert_raises(DocumentRevisionError) as err: ecol.get(key, rev=old_rev + '1') assert err.value.error_code in {1903, 1200} # Test get existing edge with bad database with assert_raises(DocumentGetError) as err: bad_ecol.get(key) assert err.value.error_code in {11, 1228} # Test update edge with a single field change assert 'foo' not in ecol.get(key) result = ecol.update({'_key': key, 'foo': 100}) assert result['_key'] == key assert ecol[key]['foo'] == 100 old_rev = ecol[key]['_rev'] # Test update edge with multiple field changes result = ecol.update({'_key': key, 'foo': 200, 'bar': 300}) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 300 old_rev = result['_rev'] # Test update edge with correct revision result = ecol.update({'_key': key, '_rev': old_rev, 'bar': 400}) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 400 old_rev = result['_rev'] if ecol.context != 'transaction': # Test update edge with bad revision new_rev = old_rev + '1' with assert_raises(DocumentRevisionError, DocumentUpdateError): ecol.update({'_key': key, '_rev': new_rev, 'bar': 500}) assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 400 # Test update edge in missing edge collection with assert_raises(DocumentUpdateError) as err: bad_ecol.update({'_key': key, 'bar': 500}) assert err.value.error_code in {11, 1228} assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 400 # Test update edge with sync option result = ecol.update({'_key': key, 'bar': 500}, sync=True) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 500 old_rev = result['_rev'] # Test update edge with silent option assert ecol.update({'_key': key, 'bar': 600}, silent=True) is True assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 600 assert ecol[key]['_rev'] != old_rev old_rev = ecol[key]['_rev'] # Test update edge without keep_none option result = ecol.update({'_key': key, 'bar': None}, keep_none=True) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] is None old_rev = result['_rev'] # Test update edge with keep_none option result = ecol.update({'_key': key, 'foo': None}, keep_none=False) assert result['_key'] == key assert result['_old_rev'] == old_rev assert 'foo' not in ecol[key] assert ecol[key]['bar'] is None # Test replace edge with a single field change edge['foo'] = 100 result = ecol.replace(edge) assert result['_key'] == key assert ecol[key]['foo'] == 100 old_rev = ecol[key]['_rev'] # Test replace edge with silent set to True edge['bar'] = 200 assert ecol.replace(edge, silent=True) is True assert ecol[key]['foo'] == 100 assert ecol[key]['bar'] == 200 assert ecol[key]['_rev'] != old_rev old_rev = ecol[key]['_rev'] # Test replace edge with multiple field changes edge['foo'] = 200 edge['bar'] = 300 result = ecol.replace(edge) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 200 assert ecol[key]['bar'] == 300 old_rev = result['_rev'] # Test replace edge with correct revision edge['foo'] = 300 edge['bar'] = 400 edge['_rev'] = old_rev result = ecol.replace(edge) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 300 assert ecol[key]['bar'] == 400 old_rev = result['_rev'] edge['bar'] = 500 if ecol.context != 'transaction': # Test replace edge with bad revision edge['_rev'] = old_rev + key with assert_raises(DocumentRevisionError, DocumentReplaceError) as err: ecol.replace(edge) assert err.value.error_code in {1200, 1903} assert ecol[key]['foo'] == 300 assert ecol[key]['bar'] == 400 # Test replace edge with bad database with assert_raises(DocumentReplaceError) as err: bad_ecol.replace(edge) assert err.value.error_code in {11, 1228} assert ecol[key]['foo'] == 300 assert ecol[key]['bar'] == 400 # Test replace edge with sync option result = ecol.replace(edge, sync=True, check_rev=False) assert result['_key'] == key assert result['_old_rev'] == old_rev assert ecol[key]['foo'] == 300 assert ecol[key]['bar'] == 500 # Test delete edge with bad revision if ecol.context != 'transaction': old_rev = ecol[key]['_rev'] edge['_rev'] = old_rev + '1' with assert_raises(DocumentRevisionError, DocumentDeleteError) as err: ecol.delete(edge, check_rev=True) assert err.value.error_code in {1200, 1903} edge['_rev'] = old_rev assert edge in ecol # Test delete missing edge with assert_raises(DocumentDeleteError) as err: ecol.delete(bad_document_key, ignore_missing=False) assert err.value.error_code == 1202 if ecol.context != 'transaction': assert not ecol.delete(bad_document_key, ignore_missing=True) # Test delete existing edge with sync set to True assert ecol.delete(edge, sync=True, check_rev=False) is True if ecol.context != 'transaction': assert ecol[edge] is None assert edge not in ecol ecol.truncate()
def test_wal_misc_methods(sys_db, bad_db): try: sys_db.wal.properties() except WALPropertiesError as err: if err.http_code == 501: pytest.skip('WAL not implemented') # Test get properties properties = sys_db.wal.properties() assert 'oversized_ops' in properties assert 'log_size' in properties assert 'historic_logs' in properties assert 'reserve_logs' in properties assert 'throttle_wait' in properties assert 'throttle_limit' in properties # Test get properties with bad database with assert_raises(WALPropertiesError) as err: bad_db.wal.properties() assert err.value.error_code in {11, 1228} # Test configure properties sys_db.wal.configure( historic_logs=15, oversized_ops=False, log_size=30000000, reserve_logs=5, throttle_limit=0, throttle_wait=16000 ) properties = sys_db.wal.properties() assert properties['historic_logs'] == 15 assert properties['oversized_ops'] is False assert properties['log_size'] == 30000000 assert properties['reserve_logs'] == 5 assert properties['throttle_limit'] == 0 assert properties['throttle_wait'] == 16000 # Test configure properties with bad database with assert_raises(WALConfigureError) as err: bad_db.wal.configure(log_size=2000000) assert err.value.error_code in {11, 1228} # Test get transactions result = sys_db.wal.transactions() assert 'count' in result assert 'last_collected' in result # Test get transactions with bad database with assert_raises(WALTransactionListError) as err: bad_db.wal.transactions() assert err.value.error_code in {11, 1228} # Test flush result = sys_db.wal.flush(garbage_collect=False, sync=False) assert isinstance(result, bool) # Test flush with bad database with assert_raises(WALFlushError) as err: bad_db.wal.flush(garbage_collect=False, sync=False) assert err.value.error_code in {11, 1228}
def test_task_management(sys_db, db, bad_db): test_command = 'require("@arangodb").print(params);' # Test create task with random ID task_name = generate_task_name() new_task = db.create_task( name=task_name, command=test_command, params={'foo': 1, 'bar': 2}, offset=1, ) assert new_task['name'] == task_name assert 'print(params)' in new_task['command'] assert new_task['type'] == 'timed' assert new_task['database'] == db.name assert isinstance(new_task['created'], float) assert isinstance(new_task['id'], string_types) # Test get existing task assert db.task(new_task['id']) == new_task # Test create task with specific ID task_name = generate_task_name() task_id = generate_task_id() new_task = db.create_task( name=task_name, command=test_command, params={'foo': 1, 'bar': 2}, offset=1, period=10, task_id=task_id ) assert new_task['name'] == task_name assert new_task['id'] == task_id assert 'print(params)' in new_task['command'] assert new_task['type'] == 'periodic' assert new_task['database'] == db.name assert isinstance(new_task['created'], float) assert db.task(new_task['id']) == new_task # Test create duplicate task with assert_raises(TaskCreateError) as err: db.create_task( name=task_name, command=test_command, params={'foo': 1, 'bar': 2}, task_id=task_id ) assert err.value.error_code == 1851 # Test list tasks for task in sys_db.tasks(): assert task['database'] in db.databases() assert task['type'] in {'periodic', 'timed'} assert isinstance(task['id'], string_types) assert isinstance(task['name'], string_types) assert isinstance(task['created'], float) assert isinstance(task['command'], string_types) # Test list tasks with bad database with assert_raises(TaskListError) as err: bad_db.tasks() assert err.value.error_code in {11, 1228} # Test get missing task with assert_raises(TaskGetError) as err: db.task(generate_task_id()) assert err.value.error_code == 1852 # Test delete existing task assert task_id in extract('id', db.tasks()) assert db.delete_task(task_id) is True assert task_id not in extract('id', db.tasks()) with assert_raises(TaskGetError) as err: db.task(task_id) assert err.value.error_code == 1852 # Test delete missing task with assert_raises(TaskDeleteError) as err: db.delete_task(generate_task_id(), ignore_missing=False) assert err.value.error_code == 1852 assert db.delete_task(task_id, ignore_missing=True) is False
def test_vertex_management(fvcol, bad_fvcol, fvdocs): # Test insert vertex with no key result = fvcol.insert({}) assert result['_key'] in fvcol assert len(fvcol) == 1 fvcol.truncate() # Test insert vertex with ID vertex_id = fvcol.name + '/' + 'foo' fvcol.insert({'_id': vertex_id}) assert 'foo' in fvcol assert vertex_id in fvcol assert len(fvcol) == 1 fvcol.truncate() with assert_raises(DocumentParseError) as err: fvcol.insert({'_id': generate_col_name() + '/' + 'foo'}) assert 'bad collection name' in err.value.message vertex = fvdocs[0] key = vertex['_key'] # Test insert first valid vertex result = fvcol.insert(vertex, sync=True) assert result['_key'] == key assert '_rev' in result assert vertex in fvcol and key in fvcol assert len(fvcol) == 1 assert fvcol[key]['val'] == vertex['val'] # Test insert duplicate vertex with assert_raises(DocumentInsertError) as err: fvcol.insert(vertex) assert err.value.error_code in {1202, 1210} assert len(fvcol) == 1 vertex = fvdocs[1] key = vertex['_key'] # Test insert second valid vertex result = fvcol.insert(vertex) assert result['_key'] == key assert '_rev' in result assert vertex in fvcol and key in fvcol assert len(fvcol) == 2 assert fvcol[key]['val'] == vertex['val'] vertex = fvdocs[2] key = vertex['_key'] # Test insert third valid vertex with silent set to True assert fvcol.insert(vertex, silent=True) is True assert len(fvcol) == 3 assert fvcol[key]['val'] == vertex['val'] # Test get missing vertex if fvcol.context != 'transaction': assert fvcol.get(generate_doc_key()) is None # Test get existing edge by body with "_key" field result = fvcol.get({'_key': key}) assert clean_doc(result) == vertex # Test get existing edge by body with "_id" field result = fvcol.get({'_id': fvcol.name + '/' + key}) assert clean_doc(result) == vertex # Test get existing vertex by key result = fvcol.get(key) assert clean_doc(result) == vertex # Test get existing vertex by ID result = fvcol.get(fvcol.name + '/' + key) assert clean_doc(result) == vertex # Test get existing vertex with bad revision old_rev = result['_rev'] with assert_raises(DocumentRevisionError) as err: fvcol.get(key, rev=old_rev + '1', check_rev=True) assert err.value.error_code in {1903, 1200} # Test get existing vertex with bad database with assert_raises(DocumentGetError) as err: bad_fvcol.get(key) assert err.value.error_code in {11, 1228} # Test update vertex with a single field change assert 'foo' not in fvcol.get(key) result = fvcol.update({'_key': key, 'foo': 100}) assert result['_key'] == key assert fvcol[key]['foo'] == 100 old_rev = fvcol[key]['_rev'] # Test update vertex with silent set to True assert 'bar' not in fvcol[vertex] assert fvcol.update({'_key': key, 'bar': 200}, silent=True) is True assert fvcol[vertex]['bar'] == 200 assert fvcol[vertex]['_rev'] != old_rev old_rev = fvcol[key]['_rev'] # Test update vertex with multiple field changes result = fvcol.update({'_key': key, 'foo': 200, 'bar': 300}) assert result['_key'] == key assert result['_old_rev'] == old_rev assert fvcol[key]['foo'] == 200 assert fvcol[key]['bar'] == 300 old_rev = result['_rev'] # Test update vertex with correct revision result = fvcol.update({'_key': key, '_rev': old_rev, 'bar': 400}) assert result['_key'] == key assert result['_old_rev'] == old_rev assert fvcol[key]['foo'] == 200 assert fvcol[key]['bar'] == 400 old_rev = result['_rev'] # # Test update vertex with bad revision # if fvcol.context != 'transaction': # new_rev = old_rev + '1' # with assert_raises(DocumentRevisionError) as err: # fvcol.update({'_key': key, '_rev': new_rev, 'bar': 500}) # assert err.value.error_code == 1903 # assert fvcol[key]['foo'] == 200 # assert fvcol[key]['bar'] == 400 # Test update vertex in missing vertex collection with assert_raises(DocumentUpdateError) as err: bad_fvcol.update({'_key': key, 'bar': 500}) assert err.value.error_code in {11, 1228} assert fvcol[key]['foo'] == 200 assert fvcol[key]['bar'] == 400 # Test update vertex with sync set to True result = fvcol.update({'_key': key, 'bar': 500}, sync=True) assert result['_key'] == key assert result['_old_rev'] == old_rev assert fvcol[key]['foo'] == 200 assert fvcol[key]['bar'] == 500 old_rev = result['_rev'] # Test update vertex with keep_none set to True result = fvcol.update({'_key': key, 'bar': None}, keep_none=True) assert result['_key'] == key assert result['_old_rev'] == old_rev assert fvcol[key]['foo'] == 200 assert fvcol[key]['bar'] is None old_rev = result['_rev'] # Test update vertex with keep_none set to False result = fvcol.update({'_key': key, 'foo': None}, keep_none=False) assert result['_key'] == key assert result['_old_rev'] == old_rev assert 'foo' not in fvcol[key] assert fvcol[key]['bar'] is None old_rev = result['_rev'] # # Test update vertex with return_new and return_old set to True # result = fvcol.update({'_key': key}, return_new=True, return_old=True) # assert result['_key'] == key # assert result['_old_rev'] == old_rev # assert 'old' in result # assert 'new' in result # assert 'foo' not in fvcol[key] # assert fvcol[key]['bar'] is None # Test replace vertex with a single field change result = fvcol.replace({'_key': key, 'baz': 100}) assert result['_key'] == key assert 'foo' not in fvcol[key] assert 'bar' not in fvcol[key] assert fvcol[key]['baz'] == 100 old_rev = result['_rev'] # Test replace vertex with silent set to True assert fvcol.replace({'_key': key, 'bar': 200}, silent=True) is True assert 'foo' not in fvcol[key] assert 'baz' not in fvcol[vertex] assert fvcol[vertex]['bar'] == 200 assert len(fvcol) == 3 assert fvcol[vertex]['_rev'] != old_rev old_rev = fvcol[vertex]['_rev'] # Test replace vertex with multiple field changes vertex = {'_key': key, 'foo': 200, 'bar': 300} result = fvcol.replace(vertex) assert result['_key'] == key assert result['_old_rev'] == old_rev assert clean_doc(fvcol[key]) == vertex old_rev = result['_rev'] # Test replace vertex with correct revision vertex = {'_key': key, '_rev': old_rev, 'bar': 500} result = fvcol.replace(vertex) assert result['_key'] == key assert result['_old_rev'] == old_rev assert clean_doc(fvcol[key]) == clean_doc(vertex) old_rev = result['_rev'] # Test replace vertex with bad revision if fvcol.context != 'transaction': new_rev = old_rev + '10' vertex = {'_key': key, '_rev': new_rev, 'bar': 600} with assert_raises(DocumentRevisionError, DocumentReplaceError) as err: fvcol.replace(vertex) assert err.value.error_code in {1200, 1903} assert fvcol[key]['bar'] == 500 assert 'foo' not in fvcol[key] # Test replace vertex with bad database with assert_raises(DocumentReplaceError) as err: bad_fvcol.replace({'_key': key, 'bar': 600}) assert err.value.error_code in {11, 1228} assert fvcol[key]['bar'] == 500 assert 'foo' not in fvcol[key] # Test replace vertex with sync set to True vertex = {'_key': key, 'bar': 400, 'foo': 200} result = fvcol.replace(vertex, sync=True) assert result['_key'] == key assert result['_old_rev'] == old_rev assert fvcol[key]['foo'] == 200 assert fvcol[key]['bar'] == 400 # Test delete vertex with bad revision if fvcol.context != 'transaction': old_rev = fvcol[key]['_rev'] vertex['_rev'] = old_rev + '1' with assert_raises(DocumentRevisionError, DocumentDeleteError) as err: fvcol.delete(vertex, check_rev=True) assert err.value.error_code in {1200, 1903} vertex['_rev'] = old_rev assert vertex in fvcol # Test delete missing vertex bad_key = generate_doc_key() with assert_raises(DocumentDeleteError) as err: fvcol.delete(bad_key, ignore_missing=False) assert err.value.error_code == 1202 if fvcol.context != 'transaction': assert fvcol.delete(bad_key, ignore_missing=True) is False # Test delete existing vertex with sync set to True assert fvcol.delete(vertex, sync=True, check_rev=False) is True if fvcol.context != 'transaction': assert fvcol[vertex] is None assert vertex not in fvcol assert len(fvcol) == 2 fvcol.truncate()
def test_user_management(sys_db, bad_db): # Test create user username = generate_username() password = generate_string() assert not sys_db.has_user(username) new_user = sys_db.create_user( username=username, password=password, active=True, extra={'foo': 'bar'}, ) assert new_user['username'] == username assert new_user['active'] is True assert new_user['extra'] == {'foo': 'bar'} assert sys_db.has_user(username) # Test create duplicate user with assert_raises(UserCreateError) as err: sys_db.create_user( username=username, password=password ) assert err.value.error_code == 1702 # Test list users for user in sys_db.users(): assert isinstance(user['username'], string_types) assert isinstance(user['active'], bool) assert isinstance(user['extra'], dict) assert sys_db.user(username) == new_user # Test list users with bad database with assert_raises(UserListError) as err: bad_db.users() assert err.value.error_code in {11, 1228} # Test get user users = sys_db.users() for user in users: assert 'active' in user assert 'extra' in user assert 'username' in user assert username in extract('username', sys_db.users()) # Test get missing user with assert_raises(UserGetError) as err: sys_db.user(generate_username()) assert err.value.error_code == 1703 # Update existing user new_user = sys_db.update_user( username=username, password=password, active=False, extra={'bar': 'baz'}, ) assert new_user['username'] == username assert new_user['active'] is False assert new_user['extra'] == {'bar': 'baz'} assert sys_db.user(username) == new_user # Update missing user with assert_raises(UserUpdateError) as err: sys_db.update_user( username=generate_username(), password=generate_string() ) assert err.value.error_code == 1703 # Replace existing user new_user = sys_db.replace_user( username=username, password=password, active=False, extra={'baz': 'qux'}, ) assert new_user['username'] == username assert new_user['active'] is False assert new_user['extra'] == {'baz': 'qux'} assert sys_db.user(username) == new_user # Replace missing user with assert_raises(UserReplaceError) as err: sys_db.replace_user( username=generate_username(), password=generate_string() ) assert err.value.error_code == 1703 # Delete an existing user assert sys_db.delete_user(username) is True # Delete a missing user with assert_raises(UserDeleteError) as err: sys_db.delete_user(username, ignore_missing=False) assert err.value.error_code == 1703 assert sys_db.delete_user(username, ignore_missing=True) is False
def test_foxx_misc_functions(db, bad_db): service_mount = generate_service_mount() missing_mount = generate_service_mount() # Prep the test service db.foxx.create_service( mount=service_mount, source=service_file, ) # Test get service readme assert 'Apache 2' in db.foxx.readme(service_mount) # Test get missing service readme with assert_raises(FoxxReadmeGetError) as err: db.foxx.readme(missing_mount) assert err.value.error_code == 3009 # Test get service swagger swagger = db.foxx.swagger(service_mount) assert 'swagger' in swagger assert 'paths' in swagger assert 'info' in swagger assert 'base_path' in swagger # Test get missing service swagger with assert_raises(FoxxSwaggerGetError) as err: db.foxx.swagger(missing_mount) assert err.value.error_code == 3009 # Test download service assert isinstance(db.foxx.download(service_mount), string_types) # Test download missing service with assert_raises(FoxxDownloadError) as err: db.foxx.download(missing_mount) assert err.value.error_code == 3009 # Test commit service state assert db.foxx.commit(replace=True) is True assert db.foxx.commit(replace=False) is True # Test commit service state with bad database with assert_raises(FoxxCommitError) as err: bad_db.foxx.commit(replace=True) assert err.value.error_code in {11, 1228} # Test list service scripts scripts = db.foxx.scripts(service_mount) assert 'setup' in scripts assert 'teardown' in scripts # Test list missing service scripts with assert_raises(FoxxScriptListError) as err: db.foxx.scripts(missing_mount) assert err.value.error_code == 3009 # Test run service script assert db.foxx.run_script(service_mount, 'setup', []) == {} assert db.foxx.run_script(service_mount, 'teardown', []) == {} # Test run missing service script with assert_raises(FoxxScriptRunError) as err: db.foxx.run_script(service_mount, 'invalid', ()) assert err.value.error_code == 3016 # Test run tests on service result_string = db.foxx.run_tests( mount=service_mount, reporter='suite', idiomatic=True ) result_json = json.loads(result_string) assert 'stats' in result_json assert 'tests' in result_json result_string = db.foxx.run_tests( mount=service_mount, reporter='stream', output_format='x-ldjson' ) for result_part in result_string.split('\r\n'): if len(result_part) == 0: continue assert result_part.startswith('[') assert result_part.endswith(']') result_string = db.foxx.run_tests( mount=service_mount, reporter='stream', output_format='text' ) assert result_string.startswith('[[') assert result_string.endswith(']]') result_string = db.foxx.run_tests( mount=service_mount, reporter='xunit', output_format='xml' ) assert result_string.strip().startswith('<') assert result_string.strip().endswith('>') # Test run tests on missing service with assert_raises(FoxxTestRunError) as err: db.foxx.run_tests(missing_mount) assert err.value.error_code == 3009
def test_graph_management(db, bad_db): # Test create graph graph_name = generate_graph_name() assert db.has_graph(graph_name) is False graph = db.create_graph(graph_name) assert db.has_graph(graph_name) is True assert graph.name == graph_name assert graph.db_name == db.name # Test create duplicate graph with assert_raises(GraphCreateError) as err: db.create_graph(graph_name) assert err.value.error_code == 1925 # Test get graph result = db.graph(graph_name) assert result.name == graph.name assert result.db_name == graph.db_name # Test get graphs result = db.graphs() for entry in result: assert 'revision' in entry assert 'edge_definitions' in entry assert 'orphan_collections' in entry assert graph_name in extract('name', db.graphs()) # Test get graphs with bad database with assert_raises(GraphListError) as err: bad_db.graphs() assert err.value.error_code in {11, 1228} # Test delete graph assert db.delete_graph(graph_name) is True assert graph_name not in extract('name', db.graphs()) # Test delete missing graph with assert_raises(GraphDeleteError) as err: db.delete_graph(graph_name) assert err.value.error_code == 1924 assert db.delete_graph(graph_name, ignore_missing=True) is False # Create a graph with vertex and edge collections and delete the graph graph = db.create_graph(graph_name) ecol_name = generate_col_name() fvcol_name = generate_col_name() tvcol_name = generate_col_name() graph.create_vertex_collection(fvcol_name) graph.create_vertex_collection(tvcol_name) graph.create_edge_definition( edge_collection=ecol_name, from_vertex_collections=[fvcol_name], to_vertex_collections=[tvcol_name] ) collections = extract('name', db.collections()) assert fvcol_name in collections assert tvcol_name in collections assert ecol_name in collections db.delete_graph(graph_name) collections = extract('name', db.collections()) assert fvcol_name in collections assert tvcol_name in collections assert ecol_name in collections # Create a graph with vertex and edge collections and delete all graph = db.create_graph(graph_name) graph.create_edge_definition( edge_collection=ecol_name, from_vertex_collections=[fvcol_name], to_vertex_collections=[tvcol_name] ) db.delete_graph(graph_name, drop_collections=True) collections = extract('name', db.collections()) assert fvcol_name not in collections assert tvcol_name not in collections assert ecol_name not in collections
def test_vertex_edges(db, bad_db): graph_name = generate_graph_name() vcol_name = generate_col_name() ecol_name = generate_col_name() # Prepare test documents anna = {'_id': '{}/anna'.format(vcol_name)} dave = {'_id': '{}/dave'.format(vcol_name)} josh = {'_id': '{}/josh'.format(vcol_name)} mary = {'_id': '{}/mary'.format(vcol_name)} tony = {'_id': '{}/tony'.format(vcol_name)} # Create test graph, vertex and edge collections school = db.create_graph(graph_name) vcol = school.create_vertex_collection(vcol_name) ecol = school.create_edge_definition( edge_collection=ecol_name, from_vertex_collections=[vcol_name], to_vertex_collections=[vcol_name] ) # Insert test vertices into the graph vcol.insert(anna) vcol.insert(dave) vcol.insert(josh) vcol.insert(mary) vcol.insert(tony) # Insert test edges into the graph ecol.link(anna, dave) ecol.link(josh, dave) ecol.link(mary, dave) ecol.link(tony, dave) ecol.link(dave, anna) # Test edges with default direction (both) result = ecol.edges(dave) assert 'stats' in result assert 'filtered' in result['stats'] assert 'scanned_index' in result['stats'] assert len(result['edges']) == 5 result = ecol.edges(anna) assert len(result['edges']) == 2 # Test edges with direction set to "in" result = ecol.edges(dave, direction='in') assert len(result['edges']) == 4 result = ecol.edges(anna, direction='in') assert len(result['edges']) == 1 # Test edges with direction set to "out" result = ecol.edges(dave, direction='out') assert len(result['edges']) == 1 result = ecol.edges(anna, direction='out') assert len(result['edges']) == 1 bad_graph = bad_db.graph(graph_name) with assert_raises(EdgeListError) as err: bad_graph.edge_collection(ecol_name).edges(dave) assert err.value.error_code in {11, 1228}
def test_permission_management(client, sys_db, bad_db): username = generate_username() password = generate_string() db_name = generate_db_name() col_name_1 = generate_col_name() col_name_2 = generate_col_name() sys_db.create_database( name=db_name, users=[{ 'username': username, 'password': password, 'active': True }] ) db = client.db(db_name, username, password) assert isinstance(sys_db.permissions(username), dict) # Test list permissions with bad database with assert_raises(PermissionListError) as err: bad_db.permissions(username) assert err.value.error_code in {11, 1228} # Test get permission with bad database with assert_raises(PermissionGetError) as err: bad_db.permission(username, db_name) assert err.value.error_code in {11, 1228} # The user should not have read and write permissions assert sys_db.permission(username, db_name) == 'none' assert sys_db.permission(username, db_name, col_name_1) == 'none' with assert_raises(CollectionCreateError) as err: db.create_collection(col_name_1) assert err.value.http_code == 401 with assert_raises(CollectionListError) as err: db.collections() assert err.value.http_code == 401 # Test update permission (database level) with bad database with assert_raises(PermissionUpdateError): bad_db.update_permission(username, 'ro', db_name) assert sys_db.permission(username, db_name) == 'none' # Test update permission (database level) to read only and verify access assert sys_db.update_permission(username, 'ro', db_name) is True assert sys_db.permission(username, db_name) == 'ro' with assert_raises(CollectionCreateError) as err: db.create_collection(col_name_2) assert err.value.http_code == 403 assert col_name_1 not in extract('name', db.collections()) assert col_name_2 not in extract('name', db.collections()) # Test reset permission (database level) with bad database with assert_raises(PermissionResetError) as err: bad_db.reset_permission(username, db_name) assert err.value.error_code in {11, 1228} assert sys_db.permission(username, db_name) == 'ro' # Test reset permission (database level) and verify access assert sys_db.reset_permission(username, db_name) is True assert sys_db.permission(username, db_name) == 'none' with assert_raises(CollectionCreateError) as err: db.create_collection(col_name_1) assert err.value.http_code == 401 with assert_raises(CollectionListError) as err: db.collections() assert err.value.http_code == 401 # Test update permission (database level) and verify access assert sys_db.update_permission(username, 'rw', db_name) is True assert sys_db.permission(username, db_name, col_name_2) == 'rw' assert db.create_collection(col_name_1) is not None assert db.create_collection(col_name_2) is not None assert col_name_1 in extract('name', db.collections()) assert col_name_2 in extract('name', db.collections()) col_1 = db.collection(col_name_1) col_2 = db.collection(col_name_2) # Verify that user has read and write access to both collections assert isinstance(col_1.properties(), dict) assert isinstance(col_1.insert({}), dict) assert isinstance(col_2.properties(), dict) assert isinstance(col_2.insert({}), dict) # Test update permission (collection level) to read only and verify access assert sys_db.update_permission(username, 'ro', db_name, col_name_1) assert sys_db.permission(username, db_name, col_name_1) == 'ro' assert isinstance(col_1.properties(), dict) with assert_raises(DocumentInsertError) as err: col_1.insert({}) assert err.value.http_code == 403 assert isinstance(col_2.properties(), dict) assert isinstance(col_2.insert({}), dict) # Test update permission (collection level) to none and verify access assert sys_db.update_permission(username, 'none', db_name, col_name_1) assert sys_db.permission(username, db_name, col_name_1) == 'none' with assert_raises(CollectionPropertiesError) as err: col_1.properties() assert err.value.http_code == 403 with assert_raises(DocumentInsertError) as err: col_1.insert({}) assert err.value.http_code == 403 assert isinstance(col_2.properties(), dict) assert isinstance(col_2.insert({}), dict) # Test reset permission (collection level) assert sys_db.reset_permission(username, db_name, col_name_1) is True assert sys_db.permission(username, db_name, col_name_1) == 'rw' assert isinstance(col_1.properties(), dict) assert isinstance(col_1.insert({}), dict) assert isinstance(col_2.properties(), dict) assert isinstance(col_2.insert({}), dict)
def test_view_management(db, bad_db): # Test create view view_name = generate_view_name() bad_view_name = generate_view_name() view_type = 'arangosearch' view_properties = { 'consolidationIntervalMsec': 50000, # 'consolidationPolicy': {'segmentThreshold': 200} } result = db.create_view(view_name, view_type, view_properties) assert 'id' in result assert result['name'] == view_name assert result['type'] == view_type assert result['consolidationIntervalMsec'] == 50000 view_id = result['id'] # Test create duplicate view with assert_raises(ViewCreateError) as err: db.create_view(view_name, view_type, view_properties) assert err.value.error_code == 1207 # Test list views result = db.views() assert len(result) == 1 view = result[0] assert view['id'] == view_id assert view['name'] == view_name assert view['type'] == view_type # Test list view with bad database with assert_raises(ViewListError) as err: bad_db.views() assert err.value.error_code in {11, 1228} # Test get view view = db.view(view_name) assert view['id'] == view_id assert view['name'] == view_name assert view['type'] == view_type assert view['consolidationIntervalMsec'] == 50000 # Test get missing view with assert_raises(ViewGetError) as err: db.view(bad_view_name) assert err.value.error_code == 1203 # Test update view view = db.update_view(view_name, {'consolidationIntervalMsec': 70000}) assert view['id'] == view_id assert view['name'] == view_name assert view['type'] == view_type assert view['consolidationIntervalMsec'] == 70000 # Test update with bad database with assert_raises(ViewUpdateError) as err: bad_db.update_view(view_name, {'consolidationIntervalMsec': 80000}) assert err.value.error_code in {11, 1228} # Test replace view view = db.replace_view(view_name, {'consolidationIntervalMsec': 40000}) assert view['id'] == view_id assert view['name'] == view_name assert view['type'] == view_type assert view['consolidationIntervalMsec'] == 40000 # Test replace with bad database with assert_raises(ViewReplaceError) as err: bad_db.replace_view(view_name, {'consolidationIntervalMsec': 7000}) assert err.value.error_code in {11, 1228} # Test rename view new_view_name = generate_view_name() assert db.rename_view(view_name, new_view_name) is True result = db.views() assert len(result) == 1 view = result[0] assert view['id'] == view_id assert view['name'] == new_view_name # Test rename missing view with assert_raises(ViewRenameError) as err: db.rename_view(bad_view_name, view_name) assert err.value.error_code == 1203 # Test delete view assert db.delete_view(new_view_name) is True assert len(db.views()) == 0 # Test delete missing view with assert_raises(ViewDeleteError) as err: db.delete_view(new_view_name) assert err.value.error_code == 1203 # Test delete missing view with ignore_missing set to True assert db.delete_view(view_name, ignore_missing=True) is False
def test_database_misc_methods(db, bad_db): # Test get properties properties = db.properties() assert 'id' in properties assert 'path' in properties assert properties['name'] == db.name assert properties['system'] is False # Test get properties with bad database with assert_raises(DatabasePropertiesError) as err: bad_db.properties() assert err.value.error_code in {11, 1228} # Test get server version assert isinstance(db.version(), string_types) # Test get server version with bad database with assert_raises(ServerVersionError) as err: bad_db.version() assert err.value.error_code in {11, 1228} # Test get server details details = db.details() assert 'architecture' in details assert 'server-version' in details # Test get server details with bad database with assert_raises(ServerDetailsError) as err: bad_db.details() assert err.value.error_code in {11, 1228} # Test get server required database version version = db.required_db_version() assert isinstance(version, string_types) # Test get server target version with bad database with assert_raises(ServerRequiredDBVersionError): bad_db.required_db_version() # Test get server statistics statistics = db.statistics(description=False) assert isinstance(statistics, dict) assert 'time' in statistics assert 'system' in statistics assert 'server' in statistics # Test get server statistics with description description = db.statistics(description=True) assert isinstance(description, dict) assert 'figures' in description assert 'groups' in description # Test get server statistics with bad database with assert_raises(ServerStatisticsError) as err: bad_db.statistics() assert err.value.error_code in {11, 1228} # Test get server role assert db.role() in { 'SINGLE', 'COORDINATOR', 'PRIMARY', 'SECONDARY', 'UNDEFINED' } # Test get server role with bad database with assert_raises(ServerRoleError) as err: bad_db.role() assert err.value.error_code in {11, 1228} # Test get server status status = db.status() assert 'host' in status assert 'operation_mode' in status assert 'server_info' in status assert 'read_only' in status['server_info'] assert 'write_ops_enabled' in status['server_info'] assert 'version' in status # Test get status with bad database with assert_raises(ServerStatusError) as err: bad_db.status() assert err.value.error_code in {11, 1228} # Test get server time assert isinstance(db.time(), datetime) # Test get server time with bad database with assert_raises(ServerTimeError) as err: bad_db.time() assert err.value.error_code in {11, 1228} # Test echo (get last request) last_request = db.echo() assert 'protocol' in last_request assert 'user' in last_request assert 'requestType' in last_request assert 'rawRequestBody' in last_request # Test echo with bad database with assert_raises(ServerEchoError) as err: bad_db.echo() assert err.value.error_code in {11, 1228} # Test read_log with default parameters log = db.read_log(upto='fatal') assert 'lid' in log assert 'level' in log assert 'text' in log assert 'total_amount' in log # Test read_log with specific parameters log = db.read_log( level='error', start=0, size=100000, offset=0, search='test', sort='desc', ) assert 'lid' in log assert 'level' in log assert 'text' in log assert 'total_amount' in log # Test read_log with bad database with assert_raises(ServerReadLogError) as err: bad_db.read_log() assert err.value.error_code in {11, 1228} # Test reload routing assert isinstance(db.reload_routing(), bool) # Test reload routing with bad database with assert_raises(ServerReloadRoutingError) as err: bad_db.reload_routing() assert err.value.error_code in {11, 1228} # Test get log levels assert isinstance(db.log_levels(), dict) # Test get log levels with bad database with assert_raises(ServerLogLevelError) as err: bad_db.log_levels() assert err.value.error_code in {11, 1228} # Test set log levels new_levels = { 'agency': 'DEBUG', 'collector': 'INFO', 'threads': 'WARNING' } result = db.set_log_levels(**new_levels) for key, value in new_levels.items(): assert result[key] == value for key, value in db.log_levels().items(): assert result[key] == value # Test set log levels with bad database with assert_raises(ServerLogLevelSetError): bad_db.set_log_levels(**new_levels) # Test get server endpoints with assert_raises(ServerEndpointsError) as err: db.endpoints() assert err.value.error_code in [11] # Test get server endpoints with bad database with assert_raises(ServerEndpointsError) as err: bad_db.endpoints() assert err.value.error_code in {11, 1228} # Test get storage engine engine = db.engine() assert engine['name'] in ['mmfiles', 'rocksdb'] assert 'supports' in engine # Test get storage engine with bad database with assert_raises(ServerEngineError) as err: bad_db.engine() assert err.value.error_code in {11, 1228}
def test_edge_definition_management(db, graph, bad_graph): ecol_name = generate_col_name() assert not graph.has_edge_definition(ecol_name) assert not graph.has_edge_collection(ecol_name) assert not db.has_collection(ecol_name) ecol = graph.create_edge_definition(ecol_name, [], []) assert graph.has_edge_definition(ecol_name) assert graph.has_edge_collection(ecol_name) assert db.has_collection(ecol_name) assert isinstance(ecol, EdgeCollection) ecol = graph.edge_collection(ecol_name) assert ecol.name == ecol_name assert ecol.name in repr(ecol) assert ecol.graph == graph.name assert { 'edge_collection': ecol_name, 'from_vertex_collections': [], 'to_vertex_collections': [] } in graph.edge_definitions() assert ecol_name in extract('name', db.collections()) # Test create duplicate edge definition with assert_raises(EdgeDefinitionCreateError) as err: graph.create_edge_definition(ecol_name, [], []) assert err.value.error_code == 1920 # Test create edge definition with existing vertex collections fvcol_name = generate_col_name() tvcol_name = generate_col_name() ecol_name = generate_col_name() ecol = graph.create_edge_definition( edge_collection=ecol_name, from_vertex_collections=[fvcol_name], to_vertex_collections=[tvcol_name] ) assert ecol.name == ecol_name assert { 'edge_collection': ecol_name, 'from_vertex_collections': [fvcol_name], 'to_vertex_collections': [tvcol_name] } in graph.edge_definitions() assert ecol_name in extract('name', db.collections()) vertex_collections = graph.vertex_collections() assert fvcol_name in vertex_collections assert tvcol_name in vertex_collections # Test create edge definition with missing vertex collection bad_vcol_name = generate_col_name() ecol_name = generate_col_name() ecol = graph.create_edge_definition( edge_collection=ecol_name, from_vertex_collections=[bad_vcol_name], to_vertex_collections=[bad_vcol_name] ) assert graph.has_edge_definition(ecol_name) assert graph.has_edge_collection(ecol_name) assert ecol.name == ecol_name assert { 'edge_collection': ecol_name, 'from_vertex_collections': [bad_vcol_name], 'to_vertex_collections': [bad_vcol_name] } in graph.edge_definitions() assert bad_vcol_name in graph.vertex_collections() assert bad_vcol_name in extract('name', db.collections()) assert bad_vcol_name in extract('name', db.collections()) # Test list edge definition with bad database with assert_raises(EdgeDefinitionListError) as err: bad_graph.edge_definitions() assert err.value.error_code in {11, 1228} # Test replace edge definition (happy path) ecol = graph.replace_edge_definition( edge_collection=ecol_name, from_vertex_collections=[tvcol_name], to_vertex_collections=[fvcol_name] ) assert isinstance(ecol, EdgeCollection) assert ecol.name == ecol_name assert { 'edge_collection': ecol_name, 'from_vertex_collections': [tvcol_name], 'to_vertex_collections': [fvcol_name] } in graph.edge_definitions() # Test replace missing edge definition bad_ecol_name = generate_col_name() with assert_raises(EdgeDefinitionReplaceError): graph.replace_edge_definition( edge_collection=bad_ecol_name, from_vertex_collections=[], to_vertex_collections=[fvcol_name] ) # Test delete missing edge definition with assert_raises(EdgeDefinitionDeleteError) as err: graph.delete_edge_definition(bad_ecol_name) assert err.value.error_code == 1930 # Test delete existing edge definition with purge assert graph.delete_edge_definition(ecol_name, purge=True) is True assert ecol_name not in \ extract('edge_collection', graph.edge_definitions()) assert not graph.has_edge_definition(ecol_name) assert not graph.has_edge_collection(ecol_name) assert ecol_name not in extract('name', db.collections())