def test_add_delete_user_relation(self) -> None: table = Fixtures.next_table() self.get_proxy().put_table(table=table) user = Fixtures.next_user() self.get_proxy().put_user(data=user) self.get_proxy().add_resource_relation_by_user( id=f'{table.key}', user_id=f'{user.user_id}', relation_type=UserResourceRel.read, resource_type=ResourceType.Table) res: Dict[str, List[Table]] = self.get_proxy().get_table_by_user_relation( user_email=f'{user.user_id}', relation_type=UserResourceRel.read) self.assertEqual(1, len(res['table'])) relations = self.get_relationship(node_type1='User', node_key1=f'{user.user_id}', node_type2='Table', node_key2=checkNotNone(table.key)) self.assertEqual(1, len(relations)) # Now delete the relation self.get_proxy().delete_resource_relation_by_user( id=f'{table.key}', user_id=f'{user.user_id}', relation_type=UserResourceRel.read, resource_type=ResourceType.Table) res2: Dict[str, List[Table]] = self.get_proxy().get_table_by_user_relation( user_email=f'{user.user_id}', relation_type=UserResourceRel.read) self.assertEqual(0, len(res2['table']))
def test_rt_table_with_owner(self) -> None: user = Fixtures.next_user(is_active=True) self.get_proxy().put_user(data=user) application = Fixtures.next_application(application_id=user.user_id) expected = Fixtures.next_table(application=application) self.get_proxy().put_table(table=expected) actual: Table = self.get_proxy().get_table( table_uri=checkNotNone(expected.key)) self.assertEqual(user.user_id, actual.owners[0].user_id)
def test_rt_table_with_non_existent_app(self) -> None: application = Fixtures.next_application() # purposefully don't insert application expected_table = Fixtures.next_table(application=application) self.get_proxy().put_table(table=expected_table) actual_table: Table = self.get_proxy().get_table( table_uri=checkNotNone(expected_table.key)) self.assertEqual(actual_table.table_writer, None) self.assertEqual(actual_table.owners, [])
def test_link_dangling_to_rt(self) -> None: db_name = Fixtures.next_database() database_uri = f'database://{db_name}' cluster_uri = f'{db_name}://acluster' self.assertEqual( self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.count()), 0) self._upsert(label=VertexTypes.Database, key=database_uri, name='test') self.assertEqual( self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.count()), 1) self.assertEqual( self._get(label=VertexTypes.Cluster, key=cluster_uri, extra_traversal=__.count()), 0) with self.assertRaises(StopIteration): self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster_uri, edge_label=EdgeTypes.Cluster, aproperty='hi')
def test_safe_get_with_objects(self) -> None: app = Fixtures.next_application() app2 = Fixtures.next_application() fake_result = {key: [value] for key, value in app.__dict__.items()} fake_result2 = {key: [value] for key, value in app2.__dict__.items()} transform = self.get_proxy()._convert_to_application self.assertEqual( _safe_get([{ 'a': [fake_result] }], 'a', transform=transform), app) self.assertCountEqual( _safe_get_list([{ 'a': [fake_result, fake_result2] }], 'a', transform=transform), [app, app2])
def test_put_programmatic_table_description(self) -> None: table: Table = Fixtures.next_table() table.programmatic_descriptions = [] self.get_proxy().put_table(table=table) expected_description: ProgrammaticDescription = Fixtures.next_description( ) self.get_proxy().put_programmatic_table_description( table_uri=checkNotNone(table.key), description=expected_description) actual_table = self.get_proxy().get_table( table_uri=checkNotNone(table.key)) self.assertEqual([expected_description], actual_table.programmatic_descriptions) # confirm that this runs without failing self.get_proxy().put_programmatic_table_description( table_uri=checkNotNone(Fixtures.next_table().key), description=Fixtures.next_description())
def test_get_latest_updated_ts(self) -> None: application = Fixtures.next_application() self.get_proxy().put_app(data=application) table = Fixtures.next_table(application=application) table_uri: str = checkNotNone(table.key) self.get_proxy().put_table(table=table) res = self.get_proxy().get_latest_updated_ts() self.assertEqual(type(res), int) actual: Table = self.get_proxy().get_table(table_uri=table_uri) self.assertEqual(actual.last_updated_timestamp, res) # try posting the same table again and make sure the timestamp updates time.sleep(1) self.get_proxy().put_table(table=table) res2 = self.get_proxy().get_latest_updated_ts() self.assertNotEqual(res, res2) actual = self.get_proxy().get_table(table_uri=table_uri) self.assertEqual(actual.last_updated_timestamp, res2)
def test_get_popular_tables(self) -> None: application = Fixtures.next_application() self.get_proxy().put_app(data=application) # Add 10 tables tables: List[Table] = [ Fixtures.next_table(application=application) for _ in range(10) ] self.get_proxy().post_tables(tables=tables) user = Fixtures.next_user() self.get_proxy().put_user(data=user) # add reads to 6 of them, expecting that only the top five will be "popular" expected_popular_tables = [] reads = 0 for i in range(6): table_name: str = checkNotNone(tables[i].name) table_uri: str = checkNotNone(tables[i].key) self.get_proxy().add_read_count(table_uri=table_uri, user_id=f'{user.user_id}', read_count=reads) if reads > 0: expected_popular_tables.append(table_name) reads += 1000 # ensure popular tables returns those 5 we added actual_popular_tables = self.get_proxy().get_popular_tables( num_entries=5) self.assertEqual(len(actual_popular_tables), 5) popular_tables = [] for table in tables: if table.name in expected_popular_tables: popular_tables.append( PopularTable(database=table.database, cluster=table.cluster, schema=table.schema, name=table.name, description=table.description)) self.assertEqual(sorted(actual_popular_tables), sorted(popular_tables))
def test_owner_rt(self) -> None: application = Fixtures.next_application() self.get_proxy().put_app(data=application) table = Fixtures.next_table(application=application) self.get_proxy().put_table(table=table) user = Fixtures.next_user() self.get_proxy().put_user(data=user) user_id: str = user.user_id or 'test' self.get_proxy().add_owner(table_uri=checkNotNone(table.key), owner=user_id) table = self.get_proxy().get_table(table_uri=checkNotNone(table.key)) self.assertEqual([user_id], [u.user_id for u in table.owners]) self.get_proxy().delete_owner(table_uri=checkNotNone(table.key), owner=user_id) no_owner_table: Table = self.get_proxy().get_table( table_uri=checkNotNone(table.key)) self.assertEqual([], no_owner_table.owners) relations = self.get_relationship(node_type1='User', node_key1=user_id, node_type2='Table', node_key2=checkNotNone(table.key)) self.assertEqual(0, len(relations))
def test_rt_table(self) -> None: """ it'd be nice to check that the result could be deserialized as a client of the metadata_service would """ expected = Fixtures.next_table() expected.description = '"hello!" said no one' expected.tags.sort() self.get_proxy().put_table(table=expected) actual: Table = self.get_proxy().get_table( table_uri=checkNotNone(expected.key)) actual.last_updated_timestamp = None actual.tags.sort() self.assertEqual(expected, actual)
def test_tag_rt(self) -> None: table = Fixtures.next_table() self.get_proxy().put_table(table=table) test_tag_detail = TagDetail(tag_name='a', tag_count=1) self.get_proxy().add_tag(id=checkNotNone(table.key), tag=test_tag_detail.tag_name, tag_type='default', resource_type=ResourceType.Table) tags_added = self.get_proxy().get_tags() self.assertIn(test_tag_detail, tags_added) self.get_proxy().delete_tag(id=checkNotNone(table.key), tag=test_tag_detail.tag_name, tag_type='default', resource_type=ResourceType.Table) tags_removed = self.get_proxy().get_tags() self.assertNotIn(test_tag_detail, tags_removed) relations = self.get_relationship(node_type1='Table', node_key1=checkNotNone(table.key), node_type2='Tag', node_key2=test_tag_detail.tag_name) self.assertEqual(0, len(relations))
def test_upsert_rt(self) -> None: # test that we will insert db_name = Fixtures.next_database() database_uri = f'database://{db_name}' exists = self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.count()) self.assertEqual(exists, 0) self._upsert(label=VertexTypes.Database, key=database_uri, name='test') exists = self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.count()) self.assertEqual(exists, 1) # test that we will not insert (_get will explode if more than one vertex matches) self._upsert(label=VertexTypes.Database, key=database_uri, name='test') vertexValueMap = self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.valueMap(), get=FromResultSet.toList) self.assertIsNotNone(vertexValueMap)
def test_set_shard_works(self) -> None: expected = Fixtures.next_string() shard_set_explicitly(expected) actual = get_shard() self.assertEqual(expected, actual)
def test_upsert_thrice(self) -> None: executor = mock.Mock(wraps=self.get_proxy().query_executor()) # test that we will insert db_name = Fixtures.next_database() database_uri = f'database://{db_name}' vertex_type = VertexType( label=VertexTypes.Database.value.label, properties=VertexTypes.Database.value.properties + tuple([Property(name='foo', type=GremlinType.String)])) exists = self._get(label=vertex_type, key=database_uri, extra_traversal=__.count()) self.assertEqual(exists, 0) _upsert(executor=executor, g=self.get_proxy().g, key_property_name=self.get_proxy().key_property_name, label=vertex_type, key=database_uri, name='test', foo='bar') exists = self._get(label=vertex_type, key=database_uri, extra_traversal=__.count()) self.assertEqual(exists, 1) id = self._get(label=vertex_type, key=database_uri, extra_traversal=__.id()) executor.reset_mock() _upsert(executor=executor, g=self.get_proxy().g, key_property_name=self.get_proxy().key_property_name, label=vertex_type, key=database_uri, name='test') exists = self._get(label=vertex_type, key=database_uri, extra_traversal=__.count()) self.assertEqual(exists, 1) self.assertEqual(executor.call_count, 2) # first one is the get: self.assertEqual(executor.call_args_list[0][1]['query'].bytecode, __.V(id).valueMap(True).bytecode) # the second one should be like self.assertEqual(executor.call_args_list[1][1]['query'].bytecode, __.V(id).id().bytecode) executor.reset_mock() _upsert(executor=executor, g=self.get_proxy().g, key_property_name=self.get_proxy().key_property_name, label=vertex_type, key=database_uri, name='test2', foo=None) exists = self._get(label=vertex_type, key=database_uri, extra_traversal=__.count()) self.assertEqual(exists, 1) self.assertEqual(executor.call_count, 2) # first one is the get: self.assertEqual(executor.call_args_list[0][1]['query'].bytecode, __.V(id).valueMap(True).bytecode) # the second one should be like self.assertEqual( executor.call_args_list[1][1]['query'].bytecode, __.V(id).sideEffect(__.properties('foo').drop()).property( Cardinality.single, 'name', 'test2').id().bytecode)
def test_link_rt(self) -> None: db_name = Fixtures.next_database() database_uri = f'database://{db_name}' cluster_uri = f'{db_name}://acluster' self.assertEqual( self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.count()), 0) self.assertEqual( self._get(label=VertexTypes.Cluster, key=cluster_uri, extra_traversal=__.count()), 0) self._upsert(label=VertexTypes.Database, key=database_uri, name='test') self._upsert(label=VertexTypes.Cluster, key=cluster_uri, name='acluster') self.assertEqual( self._get(label=VertexTypes.Database, key=database_uri, extra_traversal=__.count()), 1) self.assertEqual( self._get(label=VertexTypes.Cluster, key=cluster_uri, extra_traversal=__.count()), 1) # use non-standard EdgeType, so aproperty doesn't explode edge_type = EdgeType(label='CLUSTER', properties=tuple([ Property(name='created', type=GremlinType.Date, required=True), Property(name='aproperty', type=GremlinType.String) ])) # link self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster_uri, edge_label=edge_type, aproperty='hi') rel = self.get_relationship( node_type1=VertexTypes.Database.value.label, node_key1=database_uri, node_type2=VertexTypes.Cluster.value.label, node_key2=cluster_uri) self.assertEqual(len(rel), 1) self.assertEqual(set(rel[0].keys()), set(['created', 'aproperty'])) self.assertEqual(rel[0].get('aproperty'), 'hi') # repeat but with aproperty unset (e.g. like we want to use _link with expired) self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster_uri, edge_label=edge_type, aproperty=None) rel = self.get_relationship( node_type1=VertexTypes.Database.value.label, node_key1=database_uri, node_type2=VertexTypes.Cluster.value.label, node_key2=cluster_uri) self.assertEqual(len(rel), 1) self.assertEqual(set(rel[0].keys()), set(['created'])) self.assertEqual(rel[0].get('aproperty'), None)
def test_edges(self) -> None: db_name = Fixtures.next_database() database_uri = f'database://{db_name}' cluster1_uri = f'{db_name}://acluster' cluster2_uri = f'{db_name}://bcluster' self.assertNotEqual(cluster1_uri, cluster2_uri) cluster_vertex_type = VertexType.construct_type( label=VertexTypes.Cluster.value.label, properties=tuple([ Property(name='aproperty', type=GremlinType.String), Property(name='b', type=GremlinType.String), Property(name='c', type=GremlinType.String) ])) self._upsert(label=VertexTypes.Database, key=database_uri, name='test') self._upsert(label=cluster_vertex_type, key=cluster1_uri, aproperty='one', b='b') self._upsert(label=cluster_vertex_type, key=cluster2_uri, aproperty='two', c='c') cluster_edge_type = EdgeType(label=EdgeTypes.Cluster.value.label, properties=tuple([ Property(name='created', type=GremlinType.Date, required=True), Property(name='aproperty', type=GremlinType.String), Property(name='b', type=GremlinType.String), Property(name='c', type=GremlinType.String) ])) self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster1_uri, edge_label=cluster_edge_type, aproperty='won', b='bee') self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster2_uri, edge_label=cluster_edge_type, aproperty='too', c='sea') # get the one e1 = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, edge_label=None, aproperty='won').id().toList() self.assertEqual(len(e1), 1) # get the other e2 = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, edge_label=None, aproperty='too').id().toList() self.assertEqual(len(e2), 1) self.assertNotEqual(e1[0], e2[0]) # get both edges e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, edge_label=EdgeTypes.Cluster).id().toList() self.assertTrue(e == e1 + e2 or e == e2 + e1) # get both edges e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, edge_label=None, aproperty=within('won', 'too')).id().toList() self.assertTrue(e == e1 + e2 or e == e2 + e1) # get the one e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster1_uri, edge_label=None).id().toList() self.assertEqual(e, e1) # get the other e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster2_uri, edge_label=None).id().toList() self.assertEqual(e, e2) # get the one e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, vertex2_properties=dict(aproperty='one'), edge_label=None).id().toList() self.assertEqual(e, e1) # get the other e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, vertex2_properties=dict(aproperty='two'), edge_label=None).id().toList() self.assertEqual(e, e2) # get the one e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, vertex2_properties=dict(c=None), edge_label=None).id().toList() self.assertEqual(e, e1) # get the other e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, vertex2_properties=dict(b=None), edge_label=None).id().toList() self.assertEqual(e, e2) # get none e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=None, vertex2_key=None, edge_label=EdgeTypes.Schema).id().toList() self.assertEqual(len(e), 0) # get none e = _edges_from(g=self.get_proxy().g, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Schema, vertex2_key=None, edge_label=None).id().toList() self.assertEqual(len(e), 0)
def test_expire_link(self) -> None: db_name = Fixtures.next_database() database_uri = f'database://{db_name}' database2_uri = f'database://{db_name}2' cluster_uri = f'{db_name}://acluster' cluster2_uri = f'{db_name}2://acluster' self._upsert(label=VertexTypes.Database, key=database_uri, name='test') self._upsert(label=VertexTypes.Database, key=database2_uri, name='test1') self._upsert(label=VertexTypes.Cluster, key=cluster_uri, name='test2') self._upsert(label=VertexTypes.Cluster, key=cluster2_uri, name='test3') # use non-standard EdgeType, so aproperty doesn't explode edge_type = EdgeType(label='CLUSTER', properties=tuple([ Property(name='created', type=GremlinType.Date, required=True), Property(name='aproperty', type=GremlinType.String) ])) self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster_uri, edge_label=edge_type, aproperty='hi1') self._link(vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster2_uri, edge_label=edge_type, aproperty='hi2') self._link(vertex1_label=VertexTypes.Database, vertex1_key=database2_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster_uri, edge_label=edge_type, aproperty='hi3') self._link(vertex1_label=VertexTypes.Database, vertex1_key=database2_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster2_uri, edge_label=edge_type, aproperty='hi4') with self.get_proxy().query_executor() as executor: _expire_link(executor=executor, g=self.get_proxy().g, key_property_name=self.key_property_name, edge_label=edge_type, vertex1_label=VertexTypes.Database, vertex1_key=database_uri, vertex2_label=VertexTypes.Cluster, vertex2_key=cluster_uri) # db -> cluster link was expired properly rel = self.get_relationship( node_type1=VertexTypes.Database.value.label, node_key1=database_uri, node_type2=VertexTypes.Cluster.value.label, node_key2=cluster_uri) self.assertEqual(_safe_get(rel), None) # db is still linked to its other cluster (not expired) rel = self.get_relationship( node_type1=VertexTypes.Database.value.label, node_key1=database_uri, node_type2=VertexTypes.Cluster.value.label, node_key2=cluster2_uri) self.assertIsNone(_safe_get(rel).get('expired')) # cluster is still linked to its other db (not expired) rel = self.get_relationship( node_type1=VertexTypes.Database.value.label, node_key1=database2_uri, node_type2=VertexTypes.Cluster.value.label, node_key2=cluster_uri) self.assertIsNone(_safe_get(rel).get('expired'))