def test_retry_policy_does_not_mark_null_locations_unavailable(self): self.original_get_database_account = cosmos_client_connection.CosmosClientConnection.GetDatabaseAccount cosmos_client_connection.CosmosClientConnection.GetDatabaseAccount = self.mock_get_database_account client = cosmos_client_connection.CosmosClientConnection( self.DEFAULT_ENDPOINT, {'masterKey': self.MASTER_KEY}, None, documents.ConsistencyLevel.Eventual) endpoint_manager = global_endpoint_manager._GlobalEndpointManager( client) self.original_mark_endpoint_unavailable_for_read_function = endpoint_manager.mark_endpoint_unavailable_for_read endpoint_manager.mark_endpoint_unavailable_for_read = self._mock_mark_endpoint_unavailable_for_read self.original_mark_endpoint_unavailable_for_write_function = endpoint_manager.mark_endpoint_unavailable_for_write endpoint_manager.mark_endpoint_unavailable_for_write = self._mock_mark_endpoint_unavailable_for_write self.original_resolve_service_endpoint = endpoint_manager.resolve_service_endpoint endpoint_manager.resolve_service_endpoint = self._mock_resolve_service_endpoint # Read and write counters count the number of times the endpoint manager's # mark_endpoint_unavailable_for_read() and mark_endpoint_unavailable_for_read() # functions were called. When a 'None' location is returned by resolve_service_endpoint(), # these functions should not be called self._read_counter = 0 self._write_counter = 0 request = RequestObject(http_constants.ResourceType.Document, documents._OperationType.Read) endpointDiscovery_retry_policy = _endpoint_discovery_retry_policy.EndpointDiscoveryRetryPolicy( documents.ConnectionPolicy(), endpoint_manager, request) endpointDiscovery_retry_policy.ShouldRetry( errors.CosmosHttpResponseError( status_code=http_constants.StatusCodes.FORBIDDEN)) self.assertEqual(self._read_counter, 0) self.assertEqual(self._write_counter, 0) self._read_counter = 0 self._write_counter = 0 request = RequestObject(http_constants.ResourceType.Document, documents._OperationType.Create) endpointDiscovery_retry_policy = _endpoint_discovery_retry_policy.EndpointDiscoveryRetryPolicy( documents.ConnectionPolicy(), endpoint_manager, request) endpointDiscovery_retry_policy.ShouldRetry( errors.CosmosHttpResponseError( status_code=http_constants.StatusCodes.FORBIDDEN)) self.assertEqual(self._read_counter, 0) self.assertEqual(self._write_counter, 0) endpoint_manager.mark_endpoint_unavailable_for_read = self.original_mark_endpoint_unavailable_for_read_function endpoint_manager.mark_endpoint_unavailable_for_write = self.original_mark_endpoint_unavailable_for_write_function cosmos_client_connection.CosmosClientConnection.GetDatabaseAccount = self.original_get_database_account
def test_globaldb_preferred_locations(self): connection_policy = documents.ConnectionPolicy() connection_policy.EnableEndpointDiscovery = True client = cosmos_client_connection.CosmosClientConnection( Test_globaldb_tests.host, {'masterKey': Test_globaldb_tests.masterKey}, connection_policy) document_definition = { 'id': 'doc', 'name': 'sample document', 'key': 'value' } created_document = client.CreateItem(self.test_coll['_self'], document_definition) self.assertEqual(created_document['id'], document_definition['id']) # Delay to get these resources replicated to read location due to Eventual consistency time.sleep(5) client.ReadItem(created_document['_self']) content_location = str( client.last_response_headers[HttpHeaders.ContentLocation]) content_location_url = urlparse(content_location) write_location_url = urlparse(Test_globaldb_tests.write_location_host) # If no preferred locations is set, we return the write endpoint as ReadEndpoint for better latency performance self.assertEqual(str(content_location_url.hostname), str(write_location_url.hostname)) self.assertEqual(client.ReadEndpoint, Test_globaldb_tests.write_location_host) connection_policy.PreferredLocations = [ Test_globaldb_tests.read_location2 ] client = cosmos_client_connection.CosmosClientConnection( Test_globaldb_tests.host, {'masterKey': Test_globaldb_tests.masterKey}, connection_policy) document_definition['id'] = 'doc2' created_document = client.CreateItem(self.test_coll['_self'], document_definition) # Delay to get these resources replicated to read location due to Eventual consistency time.sleep(5) client.ReadItem(created_document['_self']) content_location = str( client.last_response_headers[HttpHeaders.ContentLocation]) content_location_url = urlparse(content_location) read_location2_url = urlparse(Test_globaldb_tests.read_location2_host) # Test that the preferred location is set as ReadEndpoint instead of default write endpoint when no preference is set self.assertEqual(str(content_location_url.hostname), str(read_location2_url.hostname)) self.assertEqual(client.ReadEndpoint, Test_globaldb_tests.read_location2_host)
def test_globaldb_endpoint_discovery(self): connection_policy = documents.ConnectionPolicy() connection_policy.EnableEndpointDiscovery = False read_location_client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.read_location_host, Test_globaldb_tests.masterKey, connection_policy) document_definition = { 'id': 'doc', 'name': 'sample document', 'key': 'value'} # Create Document will fail for the read location client since it has EnableEndpointDiscovery set to false, and hence the request will directly go to # the endpoint that was used to create the client instance(which happens to be a read endpoint) self.__AssertHTTPFailureWithStatus( StatusCodes.FORBIDDEN, SubStatusCodes.WRITE_FORBIDDEN, read_location_client.CreateItem, self.test_coll['_self'], document_definition) # Query databases will pass for the read location client as it's a GET operation list(read_location_client.QueryDatabases({ 'query': 'SELECT * FROM root r WHERE r.id=@id', 'parameters': [ { 'name':'@id', 'value': self.test_db['id'] } ] })) connection_policy.EnableEndpointDiscovery = True read_location_client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.read_location_host, Test_globaldb_tests.masterKey, connection_policy) # CreateDocument call will go to the WriteEndpoint as EnableEndpointDiscovery is set to True and client will resolve the right endpoint based on the operation created_document = read_location_client.CreateItem(self.test_coll['_self'], document_definition) self.assertEqual(created_document['id'], document_definition['id'])
def test_streaming_failover(self): self.OriginalExecuteFunction = retry_utility._ExecuteFunction retry_utility._ExecuteFunction = self._MockExecuteFunctionEndpointDiscover connection_policy = documents.ConnectionPolicy() connection_policy.PreferredLocations = self.preferred_regional_endpoints connection_policy.DisableSSLVerification = True self.original_get_database_account = cosmos_client.CosmosClient.GetDatabaseAccount cosmos_client.CosmosClient.GetDatabaseAccount = self.mock_get_database_account client = cosmos_client.CosmosClient(self.DEFAULT_ENDPOINT, {'masterKey': self.MASTER_KEY}, connection_policy) document_definition = { 'id': 'doc', 'name': 'sample document', 'key': 'value'} created_document = {} try : created_document = client.CreateItem("dbs/mydb/colls/mycoll", {'id':'new Doc'}) self.fail() except ConnectionError as err: print("Connection error occurred as expected.") self.assertDictEqual(created_document, {}) self.assertEqual(self.counter, 7) for i in range(0,6): if i % 2 == 0: self.assertEqual(self.endpoint_sequence[i], self.READ_ENDPOINT1) else: self.assertEqual(self.endpoint_sequence[i], self.READ_ENDPOINT2) cosmos_client.CosmosClient.GetDatabaseAccount = self.original_get_database_account retry_utility._ExecuteFunction = self.OriginalExecuteFunction
def __init__(self): self.account_endpoint = Configurations.ENDPOINT self.account_key = Configurations.ACCOUNT_KEY self.regions = Configurations.REGIONS.split(';') self.database_name = Configurations.DATABASE_NAME self.manual_collection_name = Configurations.MANUAL_COLLECTION_NAME self.lww_collection_name = Configurations.LWW_COLLECTION_NAME self.udp_collection_name = Configurations.UDP_COLLECTION_NAME self.basic_collection_name = Configurations.BASIC_COLLECTION_NAME self.workers = [] self.conflict_worker = ConflictWorker(self.database_name, self.basic_collection_name, self.manual_collection_name, self.lww_collection_name, self.udp_collection_name) self.pool = ThreadPool(processes=len(self.regions)) for region in self.regions: connection_policy = documents.ConnectionPolicy() connection_policy.UseMultipleWriteLocations = True connection_policy.PreferredLocations = [region] client = cosmos_client.CosmosClient( self.account_endpoint, {'masterKey': self.account_key}, connection_policy, documents.ConsistencyLevel.Session) self.workers.append( Worker(client, self.database_name, self.basic_collection_name)) self.conflict_worker.add_client(client)
def setUpClass(cls): global server global connection_policy server = Server(cls.testDbName, cls.serverPort) server.start() connection_policy = documents.ConnectionPolicy() connection_policy.ProxyConfiguration = documents.ProxyConfiguration() connection_policy.ProxyConfiguration.Host = 'http://127.0.0.1'
def ObtainClient(): connection_policy = documents.ConnectionPolicy() connection_policy.SSLConfiguration = documents.SSLConfiguration() # Try to setup the cacert.pem # connection_policy.SSLConfiguration.SSLCaCerts = CaCertPath # Else, disable verification urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) connection_policy.SSLConfiguration.SSLCaCerts = False return cosmos_client.CosmosClient(HOST, MASTER_KEY, "Session", connection_policy=connection_policy)
def test_globaldb_endpoint_discovery_retry_policy(self): connection_policy = documents.ConnectionPolicy() connection_policy.EnableEndpointDiscovery = True write_location_client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_mock_tests.write_location_host, Test_globaldb_mock_tests.masterKey, connection_policy) self.assertEqual(write_location_client._global_endpoint_manager.WriteEndpoint, Test_globaldb_mock_tests.write_location_host) self.MockCreateDatabase(write_location_client, { 'id': 'mock database' }) self.assertEqual(write_location_client._global_endpoint_manager.WriteEndpoint, Test_globaldb_mock_tests.read_location_host)
def create_spy_client(self, use_multiple_write_locations, enable_endpoint_discovery, is_preferred_locations_list_empty): self.preferred_locations = ["location1", "location2", "location3", "location4"] connectionPolicy = documents.ConnectionPolicy() connectionPolicy.DisableSSLVerification = True connectionPolicy.PreferredLocations = [] if is_preferred_locations_list_empty else self.preferred_locations connectionPolicy.EnableEndpointDiscovery = enable_endpoint_discovery connectionPolicy.UseMultipleWriteLocations = use_multiple_write_locations client = cosmos_client.CosmosClient(self.DEFAULT_ENDPOINT, {'masterKey': "SomeKeyValue"}, connectionPolicy) return client
def initialize(self, use_multiple_write_locations, enable_endpoint_discovery, is_preferred_locations_list_empty): self.database_account = self.create_database_account(use_multiple_write_locations) preferred_locations = ["location1", "location2", "location3"] self.preferred_locations = [] if is_preferred_locations_list_empty else preferred_locations self.location_cache = LocationCache( self.preferred_locations, self.DEFAULT_ENDPOINT, enable_endpoint_discovery, use_multiple_write_locations, self.REFRESH_TIME_INTERVAL_IN_MS) self.location_cache.perform_on_database_account_read(self.database_account) connectionPolicy = documents.ConnectionPolicy() connectionPolicy.PreferredLocations = self.preferred_locations client = cosmos_client.CosmosClient("", {}, connectionPolicy) self.global_endpoint_manager = client._global_endpoint_manager
def test_globaldb_read_write_endpoints(self): connection_policy = documents.ConnectionPolicy() connection_policy.EnableEndpointDiscovery = False client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.host, Test_globaldb_tests.masterKey, connection_policy) document_definition = { 'id': 'doc', 'name': 'sample document', 'key': 'value'} # When EnableEndpointDiscovery is False, WriteEndpoint is set to the endpoint passed while creating the client instance created_document = client.CreateItem(self.test_coll['_self'], document_definition) self.assertEqual(client.WriteEndpoint, Test_globaldb_tests.host) # Delay to get these resources replicated to read location due to Eventual consistency time.sleep(5) client.ReadItem(created_document['_self']) content_location = str(client.last_response_headers[HttpHeaders.ContentLocation]) content_location_url = urlparse(content_location) host_url = urlparse(Test_globaldb_tests.host) # When EnableEndpointDiscovery is False, ReadEndpoint is set to the endpoint passed while creating the client instance self.assertEqual(str(content_location_url.hostname), str(host_url.hostname)) self.assertEqual(client.ReadEndpoint, Test_globaldb_tests.host) connection_policy.EnableEndpointDiscovery = True document_definition['id'] = 'doc2' client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.host, Test_globaldb_tests.masterKey, connection_policy) # When EnableEndpointDiscovery is True, WriteEndpoint is set to the write endpoint created_document = client.CreateItem(self.test_coll['_self'], document_definition) self.assertEqual(client.WriteEndpoint, Test_globaldb_tests.write_location_host) # Delay to get these resources replicated to read location due to Eventual consistency time.sleep(5) client.ReadItem(created_document['_self']) content_location = str(client.last_response_headers[HttpHeaders.ContentLocation]) content_location_url = urlparse(content_location) write_location_url = urlparse(Test_globaldb_tests.write_location_host) # If no preferred locations is set, we return the write endpoint as ReadEndpoint for better latency performance self.assertEqual(str(content_location_url.hostname), str(write_location_url.hostname)) self.assertEqual(client.ReadEndpoint, Test_globaldb_tests.write_location_host)
def test_globaldb_database_account_unavailable(self): connection_policy = documents.ConnectionPolicy() connection_policy.EnableEndpointDiscovery = True client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_mock_tests.host, Test_globaldb_mock_tests.masterKey, connection_policy) self.assertEqual(client._global_endpoint_manager.WriteEndpoint, Test_globaldb_mock_tests.write_location_host) self.assertEqual(client._global_endpoint_manager.ReadEndpoint, Test_globaldb_mock_tests.write_location_host) global_endpoint_manager._GlobalEndpointManager._GetDatabaseAccountStub = self.MockGetDatabaseAccountStub client._global_endpoint_manager.DatabaseAccountAvailable = False client._global_endpoint_manager.RefreshEndpointList() self.assertEqual(client._global_endpoint_manager.WriteEndpoint, Test_globaldb_mock_tests.host) self.assertEqual(client._global_endpoint_manager.ReadEndpoint, Test_globaldb_mock_tests.host)
class _test_config(object): host = os.getenv('ACCOUNT_HOST', 'https://localhost:443') masterKey = os.getenv( 'ACCOUNT_KEY', 'C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==' ) connectionPolicy = documents.ConnectionPolicy() connectionPolicy.DisableSSLVerification = True global_host = '[YOUR_GLOBAL_ENDPOINT_HERE]' write_location_host = '[YOUR_WRITE_ENDPOINT_HERE]' read_location_host = '[YOUR_READ_ENDPOINT_HERE]' read_location2_host = '[YOUR_READ_ENDPOINT2_HERE]' global_masterKey = '[YOUR_KEY_HERE]' write_location = '[YOUR_WRITE_LOCATION_HERE]' read_location = '[YOUR_READ_LOCATION_HERE]' read_location2 = '[YOUR_READ_LOCATION2_HERE]'
async def test_passes_cosmos_client_options(self): settings_with_options = get_settings() connection_policy = documents.ConnectionPolicy() connection_policy.DisableSSLVerification = True settings_with_options.cosmos_client_options = { "connection_policy": connection_policy, "consistency_level": documents.ConsistencyLevel.Eventual, } client = CosmosDbPartitionedStorage(settings_with_options) await client.initialize() assert client.client.connection_policy.DisableSSLVerification is True assert ( client.client.default_headers["x-ms-consistency-level"] == documents.ConsistencyLevel.Eventual )
def __init__(self, cosmos_account_uri, cosmos_account_key, cosmos_database_id, cosmos_disable_tls=False): connection_policy = documents.ConnectionPolicy() connection_policy.ConnectionRetryConfiguration = Retry( total=5, connect=5, backoff_factor=0.1, ) if cosmos_disable_tls: connection_policy.SSLConfiguration = documents.SSLConfiguration() connection_policy.SSLConfiguration.SSLCaCerts = False self.cosmos = cosmos_client.CosmosClient( cosmos_account_uri, {'masterKey': cosmos_account_key}, connection_policy) self.database_id = cosmos_database_id
def test_streaming_failover(self): self.OriginalExecuteFunction = retry_utility._ExecuteFunction retry_utility._ExecuteFunction = self._MockExecuteFunctionEndpointDiscover connection_policy = documents.ConnectionPolicy() connection_policy.PreferredLocations = self.preferred_regional_endpoints connection_policy.DisableSSLVerification = True self.original_get_database_account = cosmos_client.CosmosClient.GetDatabaseAccount cosmos_client.CosmosClient.GetDatabaseAccount = self.mock_get_database_account client = cosmos_client.CosmosClient( self.DEFAULT_ENDPOINT, {'masterKey': self.MASTER_KEY}, connection_policy, documents.ConsistencyLevel.Eventual) document_definition = { 'id': 'doc', 'name': 'sample document', 'key': 'value' } created_document = {} created_document = client.CreateItem("dbs/mydb/colls/mycoll", document_definition) self.assertDictEqual(created_document, {}) self.assertDictEqual(client.last_response_headers, {}) self.assertEqual(self.counter, 10) # First request is an initial read collection. # Next 8 requests hit forbidden write exceptions and the endpoint retry policy keeps # flipping the resolved endpoint between the 2 write endpoints. # The 10th request returns the actual read document. for i in range(0, 8): if i % 2 == 0: self.assertEqual(self.endpoint_sequence[i], self.WRITE_ENDPOINT1) else: self.assertEqual(self.endpoint_sequence[i], self.WRITE_ENDPOINT2) cosmos_client.CosmosClient.GetDatabaseAccount = self.original_get_database_account retry_utility._ExecuteFunction = self.OriginalExecuteFunction
def test_globaldb_endpoint_assignments(self): connection_policy = documents.ConnectionPolicy() connection_policy.EnableEndpointDiscovery = False client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.host, Test_globaldb_tests.masterKey, connection_policy) # When EnableEndpointDiscovery is set to False, both Read and Write Endpoints point to endpoint passed while creating the client instance self.assertEqual(client._global_endpoint_manager.WriteEndpoint, Test_globaldb_tests.host) self.assertEqual(client._global_endpoint_manager.ReadEndpoint, Test_globaldb_tests.host) connection_policy.EnableEndpointDiscovery = True client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.host, Test_globaldb_tests.masterKey, connection_policy) # If no preferred locations is set, we return the write endpoint as ReadEndpoint for better latency performance, write endpoint is set as expected self.assertEqual(client._global_endpoint_manager.WriteEndpoint, Test_globaldb_tests.write_location_host) self.assertEqual(client._global_endpoint_manager.ReadEndpoint, Test_globaldb_tests.write_location_host) connection_policy.PreferredLocations = [Test_globaldb_tests.read_location2] client = cosmos_client_connection.CosmosClientConnection(Test_globaldb_tests.host, Test_globaldb_tests.masterKey, connection_policy) # Test that the preferred location is set as ReadEndpoint instead of default write endpoint when no preference is set self.assertEqual(client._global_endpoint_manager.WriteEndpoint, Test_globaldb_tests.write_location_host) self.assertEqual(client._global_endpoint_manager.ReadEndpoint, Test_globaldb_tests.read_location2_host)
class _test_config(object): #[SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Cosmos DB Emulator Key")] masterKey = os.getenv( 'ACCOUNT_KEY', 'C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==' ) host = os.getenv('ACCOUNT_HOST', 'https://localhost:443') connectionPolicy = documents.ConnectionPolicy() connectionPolicy.DisableSSLVerification = True global_host = '[YOUR_GLOBAL_ENDPOINT_HERE]' write_location_host = '[YOUR_WRITE_ENDPOINT_HERE]' read_location_host = '[YOUR_READ_ENDPOINT_HERE]' read_location2_host = '[YOUR_READ_ENDPOINT2_HERE]' global_masterKey = '[YOUR_KEY_HERE]' write_location = '[YOUR_WRITE_LOCATION_HERE]' read_location = '[YOUR_READ_LOCATION_HERE]' read_location2 = '[YOUR_READ_LOCATION2_HERE]' THROUGHPUT_FOR_5_PARTITIONS = 30000 THROUGHPUT_FOR_1_PARTITION = 400 TEST_DATABASE_ID = "Python SDK Test Database " + str(uuid.uuid4()) TEST_COLLECTION_SINGLE_PARTITION_ID = "Single Partition Test Collection" TEST_COLLECTION_MULTI_PARTITION_ID = "Multi Partition Test Collection" TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_ID = "Multi Partition Test Collection With Custom PK" TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY = "id" TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY = "pk" TEST_DATABASE = None TEST_COLLECTION_SINGLE_PARTITION = None TEST_COLLECTION_MULTI_PARTITION = None TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK = None IS_MULTIMASTER_ENABLED = False @classmethod def create_database_if_not_exist(cls, client): if cls.TEST_DATABASE is not None: return cls.TEST_DATABASE cls.try_delete_database(client) cls.TEST_DATABASE = client.CreateDatabase({'id': cls.TEST_DATABASE_ID}) cls.IS_MULTIMASTER_ENABLED = client.GetDatabaseAccount( )._EnableMultipleWritableLocations return cls.TEST_DATABASE @classmethod def try_delete_database(cls, client): try: client.DeleteDatabase("dbs/" + cls.TEST_DATABASE_ID) except errors.HTTPFailure as e: if e.status_code != StatusCodes.NOT_FOUND: raise e @classmethod def create_single_partition_collection_if_not_exist(cls, client): if cls.TEST_COLLECTION_SINGLE_PARTITION is None: cls.TEST_COLLECTION_SINGLE_PARTITION = cls.create_collection_with_required_throughput( client, cls.THROUGHPUT_FOR_1_PARTITION, None) cls.remove_all_documents(client, cls.TEST_COLLECTION_SINGLE_PARTITION, None) return cls.TEST_COLLECTION_SINGLE_PARTITION @classmethod def create_multi_partition_collection_if_not_exist(cls, client): if cls.TEST_COLLECTION_MULTI_PARTITION is None: cls.TEST_COLLECTION_MULTI_PARTITION = cls.create_collection_with_required_throughput( client, cls.THROUGHPUT_FOR_5_PARTITIONS, True) cls.remove_all_documents(client, cls.TEST_COLLECTION_MULTI_PARTITION, True) return cls.TEST_COLLECTION_MULTI_PARTITION @classmethod def create_multi_partition_collection_with_custom_pk_if_not_exist( cls, client): if cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK is None: cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK = cls.create_collection_with_required_throughput( client, cls.THROUGHPUT_FOR_5_PARTITIONS, False) cls.remove_all_documents( client, cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK, False) return cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK @classmethod def create_collection_with_required_throughput(cls, client, throughput, use_id_as_partition_key): database = cls.create_database_if_not_exist(client) options = {'offerThroughput': throughput} document_collection = {} if throughput == cls.THROUGHPUT_FOR_1_PARTITION: collection_id = cls.TEST_COLLECTION_SINGLE_PARTITION_ID else: if use_id_as_partition_key: collection_id = cls.TEST_COLLECTION_MULTI_PARTITION_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY else: collection_id = cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY document_collection['partitionKey'] = { 'paths': ['/' + partition_key], 'kind': 'Hash' } document_collection['id'] = collection_id document_collection = client.CreateContainer(database['_self'], document_collection, options) return document_collection @classmethod def remove_all_documents(cls, client, document_collection, use_id_as_partition_key): while True: query_iterable = client.ReadItems(document_collection['_self']) read_documents = list(query_iterable) try: for document in read_documents: options = {} if use_id_as_partition_key is not None: if use_id_as_partition_key: options['partitionKey'] = document[ cls. TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY] else: if cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY in document: options['partitionKey'] = document[ cls. TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY] else: options['partitionKey'] = {} client.DeleteItem(document['_self'], options) if cls.IS_MULTIMASTER_ENABLED: # sleep to ensure deletes are propagated for multimaster enabled accounts time.sleep(2) break except errors.HTTPFailure as e: print("Error occurred while deleting documents:" + str(e) + " \nRetrying...")
def test_globaldb_update_locations_cache(self): client = cosmos_client_connection.CosmosClientConnection( Test_globaldb_tests.host, {'masterKey': Test_globaldb_tests.masterKey}) writable_locations = [{ 'name': Test_globaldb_tests.write_location, 'databaseAccountEndpoint': Test_globaldb_tests.write_location_host }] readable_locations = [{ 'name': Test_globaldb_tests.read_location, 'databaseAccountEndpoint': Test_globaldb_tests.read_location_host }, { 'name': Test_globaldb_tests.read_location2, 'databaseAccountEndpoint': Test_globaldb_tests.read_location2_host }] write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # If no preferred locations is set, we return the write endpoint as ReadEndpoint for better latency performance, write endpoint is set as expected self.assertEqual(write_endpoint, Test_globaldb_tests.write_location_host) self.assertEqual(read_endpoint, Test_globaldb_tests.write_location_host) writable_locations = [] readable_locations = [] write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # If writable_locations and readable_locations are empty, both Read and Write Endpoints point to endpoint passed while creating the client instance self.assertEqual(write_endpoint, Test_globaldb_tests.host) self.assertEqual(read_endpoint, Test_globaldb_tests.host) writable_locations = [{ 'name': Test_globaldb_tests.write_location, 'databaseAccountEndpoint': Test_globaldb_tests.write_location_host }] readable_locations = [] write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # If there are no readable_locations, we use the write endpoint as ReadEndpoint self.assertEqual(write_endpoint, Test_globaldb_tests.write_location_host) self.assertEqual(read_endpoint, Test_globaldb_tests.write_location_host) writable_locations = [] readable_locations = [{ 'name': Test_globaldb_tests.read_location, 'databaseAccountEndpoint': Test_globaldb_tests.read_location_host }] write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # If there are no writable_locations, both Read and Write Endpoints point to endpoint passed while creating the client instance self.assertEqual(write_endpoint, Test_globaldb_tests.host) self.assertEqual(read_endpoint, Test_globaldb_tests.host) writable_locations = [{ 'name': Test_globaldb_tests.write_location, 'databaseAccountEndpoint': Test_globaldb_tests.write_location_host }] readable_locations = [{ 'name': Test_globaldb_tests.read_location, 'databaseAccountEndpoint': Test_globaldb_tests.read_location_host }, { 'name': Test_globaldb_tests.read_location2, 'databaseAccountEndpoint': Test_globaldb_tests.read_location2_host }] connection_policy = documents.ConnectionPolicy() connection_policy.PreferredLocations = [ Test_globaldb_tests.read_location2 ] client = cosmos_client_connection.CosmosClientConnection( Test_globaldb_tests.host, {'masterKey': Test_globaldb_tests.masterKey}, connection_policy) write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # Test that the preferred location is set as ReadEndpoint instead of default write endpoint when no preference is set self.assertEqual(write_endpoint, Test_globaldb_tests.write_location_host) self.assertEqual(read_endpoint, Test_globaldb_tests.read_location2_host) writable_locations = [{ 'name': Test_globaldb_tests.write_location, 'databaseAccountEndpoint': Test_globaldb_tests.write_location_host }, { 'name': Test_globaldb_tests.read_location2, 'databaseAccountEndpoint': Test_globaldb_tests.read_location2_host }] readable_locations = [{ 'name': Test_globaldb_tests.read_location, 'databaseAccountEndpoint': Test_globaldb_tests.read_location_host }] connection_policy = documents.ConnectionPolicy() connection_policy.PreferredLocations = [ Test_globaldb_tests.read_location2 ] client = cosmos_client_connection.CosmosClientConnection( Test_globaldb_tests.host, {'masterKey': Test_globaldb_tests.masterKey}, connection_policy) write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # Test that the preferred location is chosen from the WriteLocations if it's not present in the ReadLocations self.assertEqual(write_endpoint, Test_globaldb_tests.write_location_host) self.assertEqual(read_endpoint, Test_globaldb_tests.read_location2_host) writable_locations = [{ 'name': Test_globaldb_tests.write_location, 'databaseAccountEndpoint': Test_globaldb_tests.write_location_host }] readable_locations = [{ 'name': Test_globaldb_tests.read_location, 'databaseAccountEndpoint': Test_globaldb_tests.read_location_host }, { 'name': Test_globaldb_tests.read_location2, 'databaseAccountEndpoint': Test_globaldb_tests.read_location2_host }] connection_policy.EnableEndpointDiscovery = False client = cosmos_client_connection.CosmosClientConnection( Test_globaldb_tests.host, {'masterKey': Test_globaldb_tests.masterKey}, connection_policy) write_endpoint, read_endpoint = client._global_endpoint_manager.UpdateLocationsCache( writable_locations, readable_locations) # If EnableEndpointDiscovery is False, both Read and Write Endpoints point to endpoint passed while creating the client instance self.assertEqual(write_endpoint, Test_globaldb_tests.host) self.assertEqual(read_endpoint, Test_globaldb_tests.host)
import json import random import azure.cosmos.cosmos_client as cosmos_client import azure.cosmos.documents as doc from azure.cosmos import CosmosClient, PartitionKey, exceptions config = { 'ENDPOINT': 'https://cosmosdb2-team3.documents.azure.com:443/', 'PRIMARYKEY': 'uDw5eCXYABEpiz16QTKvd07GhCt68D9g9pNye6shFbalntrmHS1gI9NXCY0m8TOHjGYwtqjY7X2j19mq3Wt7hQ==', 'DATABASE': 'AbeTemp', 'CONTAINER': 'Cart' } # Initialize the Cosmos client connection_policy = doc.ConnectionPolicy() # Disable in production connection_policy.DisableSSLVerification = "true" client = cosmos_client.CosmosClient(url=config['ENDPOINT'], credential=config['PRIMARYKEY']) database_name = config['DATABASE'] database = client.get_database_client(database_name) container_name = config['CONTAINER'] try: container = database.create_container( id=container_name, partition_key=PartitionKey(path="/UserId")) except exceptions.CosmosResourceExistsError: container = database.get_container_client(container_name)
class _test_config(object): #[SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Cosmos DB Emulator Key")] masterKey = os.getenv( 'ACCOUNT_KEY', 'C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==' ) host = os.getenv('ACCOUNT_HOST', 'https://localhost:443/') connection_str = os.getenv( 'ACCOUNT_CONNECTION_STR', 'AccountEndpoint={};AccountKey={};'.format(host, masterKey)) connectionPolicy = documents.ConnectionPolicy() connectionPolicy.DisableSSLVerification = True global_host = '[YOUR_GLOBAL_ENDPOINT_HERE]' write_location_host = '[YOUR_WRITE_ENDPOINT_HERE]' read_location_host = '[YOUR_READ_ENDPOINT_HERE]' read_location2_host = '[YOUR_READ_ENDPOINT2_HERE]' global_masterKey = '[YOUR_KEY_HERE]' write_location = '[YOUR_WRITE_LOCATION_HERE]' read_location = '[YOUR_READ_LOCATION_HERE]' read_location2 = '[YOUR_READ_LOCATION2_HERE]' THROUGHPUT_FOR_5_PARTITIONS = 30000 THROUGHPUT_FOR_1_PARTITION = 400 TEST_DATABASE_ID = "Python SDK Test Database " + str(uuid.uuid4()) TEST_COLLECTION_SINGLE_PARTITION_ID = "Single Partition Test Collection" TEST_COLLECTION_MULTI_PARTITION_ID = "Multi Partition Test Collection" TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_ID = "Multi Partition Test Collection With Custom PK" TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY = "id" TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY = "pk" TEST_DATABASE = None TEST_COLLECTION_SINGLE_PARTITION = None TEST_COLLECTION_MULTI_PARTITION = None TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK = None IS_MULTIMASTER_ENABLED = False @classmethod def create_database_if_not_exist(cls, client): # type: (CosmosClient) -> Database if cls.TEST_DATABASE is not None: return cls.TEST_DATABASE cls.try_delete_database(client) cls.TEST_DATABASE = client.create_database(cls.TEST_DATABASE_ID) cls.IS_MULTIMASTER_ENABLED = client.get_database_account( )._EnableMultipleWritableLocations return cls.TEST_DATABASE @classmethod def try_delete_database(cls, client): # type: (CosmosClient) -> None try: client.delete_database(cls.TEST_DATABASE_ID) except exceptions.CosmosHttpResponseError as e: if e.status_code != StatusCodes.NOT_FOUND: raise e @classmethod def create_single_partition_collection_if_not_exist(cls, client): # type: (CosmosClient) -> Container if cls.TEST_COLLECTION_SINGLE_PARTITION is None: cls.TEST_COLLECTION_SINGLE_PARTITION = cls.create_collection_with_required_throughput( client, cls.THROUGHPUT_FOR_1_PARTITION, False) cls.remove_all_documents(cls.TEST_COLLECTION_SINGLE_PARTITION, False) return cls.TEST_COLLECTION_SINGLE_PARTITION @classmethod def create_multi_partition_collection_if_not_exist(cls, client): # type: (CosmosClient) -> Container if cls.TEST_COLLECTION_MULTI_PARTITION is None: cls.TEST_COLLECTION_MULTI_PARTITION = cls.create_collection_with_required_throughput( client, cls.THROUGHPUT_FOR_5_PARTITIONS, False) cls.remove_all_documents(cls.TEST_COLLECTION_MULTI_PARTITION, False) return cls.TEST_COLLECTION_MULTI_PARTITION @classmethod def create_multi_partition_collection_with_custom_pk_if_not_exist( cls, client): # type: (CosmosClient) -> Container if cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK is None: cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK = cls.create_collection_with_required_throughput( client, cls.THROUGHPUT_FOR_5_PARTITIONS, True) cls.remove_all_documents( cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK, True) return cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK @classmethod def create_collection_with_required_throughput(cls, client, throughput, use_custom_partition_key): # type: (CosmosClient, int, boolean) -> Container database = cls.create_database_if_not_exist(client) if throughput == cls.THROUGHPUT_FOR_1_PARTITION: collection_id = cls.TEST_COLLECTION_SINGLE_PARTITION_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY else: if use_custom_partition_key: collection_id = cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY else: collection_id = cls.TEST_COLLECTION_MULTI_PARTITION_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY document_collection = database.create_container( id=collection_id, partition_key=PartitionKey(path='/' + partition_key, kind='Hash'), offer_throughput=throughput) return document_collection @classmethod def remove_all_documents(cls, document_collection, use_custom_partition_key): # type: (Container, boolean) -> None while True: query_iterable = document_collection.query_items( query="Select * from c", enable_cross_partition_query=True) read_documents = list(query_iterable) try: for document in read_documents: partition_key = 'dummy_pk' if not use_custom_partition_key: partition_key = document[ cls.TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY] else: if cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY in document: partition_key = document[ cls. TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY] else: partition_key = NonePartitionKeyValue document_collection.delete_item( item=document, partition_key=partition_key) if cls.IS_MULTIMASTER_ENABLED: # sleep to ensure deletes are propagated for multimaster enabled accounts time.sleep(2) break except exceptions.CosmosHttpResponseError as e: print("Error occurred while deleting documents:" + str(e) + " \nRetrying...")