def test_collection_and_document_ttl_values(self): ttl = 5 created_collection = self.created_db.create_container( id='test_collection_and_document_ttl_values1' + str(uuid.uuid4()), default_ttl=ttl, partition_key=PartitionKey(path='/id', kind='Hash') ) created_collection_properties = created_collection.read() self.assertEqual(created_collection_properties['defaultTtl'], ttl) collection_id = 'test_collection_and_document_ttl_values4' + str(uuid.uuid4()) ttl = -10 # -10 is an unsupported value for defaultTtl. Valid values are -1 or a non-zero positive 32-bit integer value self.__AssertHTTPFailureWithStatus( StatusCodes.BAD_REQUEST, self.created_db.create_container, collection_id, PartitionKey(path='/id', kind='Hash'), None, ttl) document_definition = { 'id': 'doc1' + str(uuid.uuid4()), 'name': 'sample document', 'key': 'value', 'ttl' : 0} # 0 is an unsupported value for ttl. Valid values are -1 or a non-zero positive 32-bit integer value self.__AssertHTTPFailureWithStatus( StatusCodes.BAD_REQUEST, created_collection.create_item, document_definition) document_definition['id'] = 'doc2' + str(uuid.uuid4()) document_definition['ttl'] = None # None is an unsupported value for ttl. Valid values are -1 or a non-zero positive 32-bit integer value self.__AssertHTTPFailureWithStatus( StatusCodes.BAD_REQUEST, created_collection.create_item, document_definition) document_definition['id'] = 'doc3' + str(uuid.uuid4()) document_definition['ttl'] = -10 # -10 is an unsupported value for ttl. Valid values are -1 or a non-zero positive 32-bit integer value self.__AssertHTTPFailureWithStatus( StatusCodes.BAD_REQUEST, created_collection.create_item, document_definition) self.created_db.delete_container(container=created_collection)
def create_collection(self, client, created_db): # type: (CosmosClient, Database) -> Container created_collection = created_db.create_container( id='orderby_tests collection ' + str(uuid.uuid4()), indexing_policy={ 'includedPaths':[ { 'path':'/', 'indexes':[ { 'kind':'Range', 'dataType':'Number' }, { 'kind':'Range', 'dataType':'String' } ] } ] }, partition_key=PartitionKey(path='/id', kind='Hash'), offer_throughput=30000 ) return created_collection
def run_sample(): client = cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY}) try: # setup database for this sample db = client.create_database_if_not_exists(id=DATABASE_ID) # setup container for this sample container = db.create_container_if_not_exists( id=CONTAINER_ID, partition_key=PartitionKey(path='/id', kind='Hash')) create_items(container) read_item(container, 'SalesOrder1') read_items(container) query_items(container, 'SalesOrder1') replace_item(container, 'SalesOrder1') upsert_item(container, 'SalesOrder1') delete_item(container, 'SalesOrder1') # cleanup database after sample try: client.delete_database(db) except exceptions.CosmosResourceNotFoundError: pass except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) finally: print("\nrun_sample done")
def _create_collection(cls, created_db): # type: (Database) -> Container created_collection = created_db.create_container( id='aggregate tests collection ' + str(uuid.uuid4()), indexing_policy={ 'includedPaths': [ { 'path': '/', 'indexes': [ { 'kind': 'Range', 'dataType': 'Number' }, { 'kind': 'Range', 'dataType': 'String' } ] } ] }, partition_key=PartitionKey( path='/{}'.format(_config.PARTITION_KEY), kind=documents.PartitionKind.Hash, ), offer_throughput=10100 ) return created_collection
def create_collection(cls, created_db): created_collection = created_db.create_container( id='query_execution_context_tests collection ' + str(uuid.uuid4()), partition_key=PartitionKey(path='/id', kind='Hash')) return created_collection
def create_collection_if_not_exist_no_custom_throughput(cls, client): # type: (CosmosClient) -> Container database = cls.create_database_if_not_exist(client) collection_id = cls.TEST_COLLECTION_SINGLE_PARTITION_ID document_collection = database.create_container_if_not_exists( id=collection_id, partition_key=PartitionKey(path="/id")) return document_collection
def get_cosmos_container(database, container_id): try: container = database.create_container_if_not_exists( id=container_id, partition_key=PartitionKey(path='/id', kind='Hash')) return container except exceptions.CosmosHttpResponseError: print('Container with id \'{0}\' was found'.format(container_id))
def run_sample(): with IDisposable( cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY})) as client: try: # setup database for this sample try: db = client.create_database(id=DATABASE_ID) except errors.HTTPFailure as e: if e.status_code == 409: pass else: raise errors.HTTPFailure(e.status_code) # setup container for this sample try: container = db.create_container(id=CONTAINER_ID, partition_key=PartitionKey( path='/id', kind='Hash')) print('Container with id \'{0}\' created'.format(CONTAINER_ID)) except errors.HTTPFailure as e: if e.status_code == 409: print('Container with id \'{0}\' was found'.format( CONTAINER_ID)) else: raise errors.HTTPFailure(e.status_code) ItemManagement.CreateItems(container) ItemManagement.ReadItem(container, 'SalesOrder1') ItemManagement.ReadItems(container) ItemManagement.QueryItems(container, 'SalesOrder1') ItemManagement.ReplaceItem(container, 'SalesOrder1') ItemManagement.UpsertItem(container, 'SalesOrder1') ItemManagement.DeleteItem(container, 'SalesOrder1') # cleanup database after sample try: client.delete_database(db) except errors.CosmosError as e: if e.status_code == 404: pass else: raise errors.HTTPFailure(e.status_code) except errors.HTTPFailure as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) finally: print("\nrun_sample done")
def test_document_ttl_with_negative_one_defaultTtl(self): created_collection = self.created_db.create_container( id='test_document_ttl_with_negative_one_defaultTtl collection' + str(uuid.uuid4()), default_ttl=-1, partition_key=PartitionKey(path='/id', kind='Hash')) document_definition = { 'id': 'doc1' + str(uuid.uuid4()), 'name': 'sample document', 'key': 'value' } # the created document's ttl value would be -1 inherited from the collection's defaultTtl and this document will never expire created_document1 = created_collection.create_item( body=document_definition) # This document is also set to never expire explicitly document_definition['id'] = 'doc2' + str(uuid.uuid4()) document_definition['ttl'] = -1 created_document2 = created_collection.create_item( body=document_definition) document_definition['id'] = 'doc3' + str(uuid.uuid4()) document_definition['ttl'] = 2 created_document3 = created_collection.create_item( body=document_definition) time.sleep(4) # the created document should be gone now as it's ttl value is set to 2 which overrides the collections's defaultTtl value(-1) self.__AssertHTTPFailureWithStatus(StatusCodes.NOT_FOUND, created_collection.read_item, created_document3['id'], created_document3['id']) # The documents with id doc1 and doc2 will never expire read_document = created_collection.read_item( item=created_document1['id'], partition_key=created_document1['id']) self.assertEqual(created_document1['id'], read_document['id']) read_document = created_collection.read_item( item=created_document2['id'], partition_key=created_document2['id']) self.assertEqual(created_document2['id'], read_document['id']) self.created_db.delete_container(container=created_collection)
def connect(self): """ Initiate a connection to cosmos DB """ # connect with the cosmos DB client self.client = cosmos_client.CosmosClient( self.HOST, {'masterKey': self.MASTER_KEY}, user_agent="CosmosDB", user_agent_overwrite=True) # get the database connection object self.database = self.client.create_database_if_not_exists( id=self.database_name) # get the container connection object self.container = self.database.create_container_if_not_exists( id=self.container_name, partition_key=PartitionKey(path="/patient_details/image_name"), offer_throughput=400 )
def test_document_ttl_with_no_defaultTtl(self): created_collection = created_collection = self.created_db.create_container( id='test_document_ttl_with_no_defaultTtl collection' + str(uuid.uuid4()), partition_key=PartitionKey(path='/id', kind='Hash') ) document_definition = { 'id': 'doc1' + str(uuid.uuid4()), 'name': 'sample document', 'key': 'value', 'ttl' : 5} created_document = created_collection.create_item(body=document_definition) time.sleep(7) # Created document still exists even after ttl time has passed since the TTL is disabled at collection level(no defaultTtl property defined) read_document = created_collection.read_item(item=created_document['id'], partition_key=created_document['id']) self.assertEqual(created_document['id'], read_document['id']) self.created_db.delete_container(container=created_collection)
def create_collection_with_required_throughput(cls, client, throughput, use_custom_partition_key): # type: (CosmosClient, int, boolean) -> Container database = cls.create_database_if_not_exist(client) if throughput == cls.THROUGHPUT_FOR_1_PARTITION: collection_id = cls.TEST_COLLECTION_SINGLE_PARTITION_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY else: if use_custom_partition_key: collection_id = cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_WITH_CUSTOM_PK_PARTITION_KEY else: collection_id = cls.TEST_COLLECTION_MULTI_PARTITION_ID partition_key = cls.TEST_COLLECTION_MULTI_PARTITION_PARTITION_KEY document_collection = database.create_container( id=collection_id, partition_key=PartitionKey(path='/' + partition_key, kind='Hash'), offer_throughput=throughput) return document_collection
def create_db(): client = cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY}, user_agent="CosmosDBDotnetQuickstart", user_agent_overwrite=True) try: db = client.create_database(id=DATABASE_ID) except exceptions.CosmosResourceExistsError: print("Database {} already exists".format(DATABASE_ID)) db = client.get_database_client(DATABASE_ID) try: container = db.create_container( id=CONTAINER_ID, partition_key=PartitionKey(path='/user_id'), offer_throughput=400) print("Container with id \'{0}\' created".format(CONTAINER_ID)) except exceptions.CosmosResourceExistsError: print('Container with id \'{0}\' was found'.format(CONTAINER_ID)) container = db.get_container_client(CONTAINER_ID) return db, container
def run_sample(): client = cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY}, user_agent="CosmosDBDotnetQuickstart", user_agent_overwrite=True) try: # setup database for this sample try: db = client.create_database(id=DATABASE_ID) except exceptions.CosmosResourceExistsError: pass # setup container for this sample try: container = db.create_container(id=CONTAINER_ID, partition_key=PartitionKey(path='/account_number'), offer_throughput=400) print('Container with id \'{0}\' created'.format(CONTAINER_ID)) except exceptions.CosmosResourceExistsError: print('Container with id \'{0}\' was found'.format(CONTAINER_ID)) scale_container(container) create_items(container) read_item(container, 'SalesOrder1', 'Account1') read_items(container) query_items(container, 'Account1') replace_item(container, 'SalesOrder1', 'Account1') upsert_item(container, 'SalesOrder1', 'Account1') delete_item(container, 'SalesOrder1', 'Account1') # cleanup database after sample try: client.delete_database(db) except exceptions.CosmosResourceNotFoundError: pass except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) finally: print("\nrun_sample done")
def initContainer(self, partition_path): client = cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY}, user_agent="CosmosDBDotnetQuickstart", user_agent_overwrite=True) try: # setup database for this sample try: db = client.create_database_if_not_exists(id=DATABASE_ID) except exceptions.CosmosResourceExistsError: pass try: container = db.create_container_if_not_exists( id=CONTAINER_ID, partition_key=PartitionKey(path=partition_path), offer_throughput=400) print('Container with id \'{0}\' created'.format(CONTAINER_ID)) except exceptions.CosmosResourceExistsError: print('Container with id \'{0}\' was found'.format(CONTAINER_ID)) scale_container(container) # cleanup database after sample # try: # client.delete_database(db) # # except exceptions.CosmosResourceNotFoundError: # pass except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) finally: print("\nrun_sample done") return container
# ---------------------------------------------------------------------------------------------------------- # Sample - how to get and use resource token that allows restricted access to data # ---------------------------------------------------------------------------------------------------------- # Note: # # This sample creates a Container to your database account. # Each time a Container is created the account will be billed for 1 hour of usage based on # the provisioned throughput (RU/s) of that account. # ---------------------------------------------------------------------------------------------------------- HOST = config.settings["host"] MASTER_KEY = config.settings["master_key"] DATABASE_ID = config.settings["database_id"] CONTAINER_ID = config.settings["container_id"] PARTITION_KEY = PartitionKey(path="/username") # User that you want to give access to USERNAME, USERNAME_2 = "user", "user2" CONTAINER_ALL_PERMISSION = "CONTAINER_ALL_PERMISSION" PARTITION_READ_PERMISSION = "PARTITION_READ_PERMISSION" DOCUMENT_ALL_PERMISSION = "DOCUMENT_ALL_PERMISSION" def create_user_if_not_exists(db, username): try: user = db.create_user(body={"id": username}) except exceptions.CosmosResourceExistsError: user = db.get_user_client(username)
def create_Container(db, id): """ Execute the most basic Create of container. This will create a container with 400 RUs throughput and default indexing policy """ partition_key = PartitionKey(path='/id', kind='Hash') print("\n2.1 Create Container - Basic") try: db.create_container(id=id, partition_key=partition_key) print('Container with id \'{0}\' created'.format(id)) except errors.HTTPFailure as e: if e.status_code == 409: print('A container with id \'{0}\' already exists'.format(id)) else: raise errors.HTTPFailure(e.status_code) print("\n2.2 Create Container - With custom index policy") try: coll = { "id": "container_custom_index_policy", "indexingPolicy": { "indexingMode": "lazy", "automatic": False } } container = db.create_container( id=coll['id'], partition_key=partition_key, indexing_policy=coll['indexingPolicy']) print('Container with id \'{0}\' created'.format(container.id)) print('IndexPolicy Mode - \'{0}\''.format( container.properties['indexingPolicy']['indexingMode'])) print('IndexPolicy Automatic - \'{0}\''.format( container.properties['indexingPolicy']['automatic'])) except errors.CosmosError as e: if e.status_code == 409: print('A container with id \'{0}\' already exists'.format( container['id'])) else: raise errors.HTTPFailure(e.status_code) print("\n2.3 Create Container - With custom offer throughput") try: coll = {"id": "container_custom_throughput"} container = db.create_container(id=coll['id'], partition_key=partition_key, offer_throughput=400) print('Container with id \'{0}\' created'.format(container.id)) except errors.HTTPFailure as e: if e.status_code == 409: print('A container with id \'{0}\' already exists'.format( container.id)) else: raise errors.HTTPFailure(e.status_code) print("\n2.4 Create Container - With Unique keys") try: container = db.create_container( id="container_unique_keys", partition_key=partition_key, unique_key_policy={ 'uniqueKeys': [{ 'paths': ['/field1/field2', '/field3'] }] }) unique_key_paths = container.properties['uniqueKeyPolicy'][ 'uniqueKeys'][0]['paths'] print('Container with id \'{0}\' created'.format(container.id)) print('Unique Key Paths - \'{0}\', \'{1}\''.format( unique_key_paths[0], unique_key_paths[1])) except errors.HTTPFailure as e: if e.status_code == 409: print('A container with id \'{0}\' already exists'.format( container.id)) else: raise errors.HTTPFailure(e.status_code) print("\n2.5 Create Collection - With Partition key V2 (Default)") try: container = db.create_container(id="collection_partition_key_v2", partition_key=PartitionKey( path='/id', kind='Hash')) print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format( container.properties['partitionKey'])) except errors.CosmosError as e: if e.status_code == 409: print('A container with id \'{0}\' already exists'.format( container.id)) else: raise errors.HTTPFailure(e.status_code) print("\n2.6 Create Collection - With Partition key V1") try: container = db.create_container(id="collection_partition_key_v1", partition_key=PartitionKey( path='/id', kind='Hash', version=1)) print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format( container.properties['partitionKey'])) except errors.CosmosError as e: if e.status_code == 409: print('A container with id \'{0}\' already exists'.format( container.id)) else: raise errors.HTTPFailure(e.status_code)
def test_multi_orderby_queries(self): indexingPolicy = { "indexingMode": "consistent", "automatic": True, "includedPaths": [{ "path": "/*", "indexes": [] }], "excludedPaths": [{ "path": "/\"_etag\"/?" }], "compositeIndexes": [[{ "path": "/numberField", "order": "ascending" }, { "path": "/stringField", "order": "descending" }], [{ "path": "/numberField", "order": "descending" }, { "path": "/stringField", "order": "ascending" }, { "path": "/numberField2", "order": "descending" }, { "path": "/stringField2", "order": "ascending" }], [{ "path": "/numberField", "order": "descending" }, { "path": "/stringField", "order": "ascending" }, { "path": "/boolField", "order": "descending" }, { "path": "/nullField", "order": "ascending" }], [{ "path": "/stringField", "order": "ascending" }, { "path": "/shortStringField", "order": "ascending" }, { "path": "/mediumStringField", "order": "ascending" }, { "path": "/longStringField", "order": "ascending" }]] } options = {'offerThroughput': 25100} created_container = self.database.create_container( id='multi_orderby_container' + str(uuid.uuid4()), indexing_policy=indexingPolicy, partition_key=PartitionKey(path='/pk', kind='Hash'), request_options=options) number_of_items = 4 number_of_items = 5 self.create_random_items(created_container, number_of_items, number_of_items) bool_vals = [True, False] composite_indexes = indexingPolicy['compositeIndexes'] for composite_index in composite_indexes: # for every order for invert in bool_vals: # for normal and inverted order for has_top in bool_vals: # with and without top for has_filter in bool_vals: # with and without filter # Generate a multi order by from that index orderby_items = [] select_items = [] for composite_path in composite_index: is_desc = True if composite_path[ 'order'] == "descending" else False if invert: is_desc = not is_desc is_desc_string = "DESC" if is_desc else "ASC" composite_path_name = composite_path[ 'path'].replace("/", "") orderby_items_string = "root." + composite_path_name + " " + is_desc_string select_items_string = "root." + composite_path_name orderby_items.append(orderby_items_string) select_items.append(select_items_string) top_count = 10 select_item_builder = "" for select_item in select_items: select_item_builder += select_item + "," select_item_builder = select_item_builder[:-1] orderby_item_builder = "" for orderby_item in orderby_items: orderby_item_builder += orderby_item + "," orderby_item_builder = orderby_item_builder[:-1] top_string = "TOP " + str(top_count) if has_top else "" where_string = "WHERE root." + self.NUMBER_FIELD + " % 2 = 0" if has_filter else "" query = "SELECT " + top_string + " [" + select_item_builder + "] " + \ "FROM root " + where_string + " " + \ "ORDER BY " + orderby_item_builder #nosec expected_ordered_list = self.top( self.sort(self.filter(self.items, has_filter), composite_index, invert), has_top, top_count) result_ordered_list = list( created_container.query_items( query=query, enable_cross_partition_query=True)) self.validate_results(expected_ordered_list, result_ordered_list, composite_index)
def create_container(db, id): """ Execute basic container creation. This will create containers with 400 RUs with different indexing, partitioning, and storage options """ partition_key = PartitionKey(path='/id', kind='Hash') print("\n2.1 Create Container - Basic") try: db.create_container(id=id, partition_key=partition_key) print('Container with id \'{0}\' created'.format(id)) except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(id)) print("\n2.2 Create Container - With custom index policy") try: coll = { "id": id + "_container_custom_index_policy", "indexingPolicy": { "automatic": False } } container = db.create_container(id=coll['id'], partition_key=partition_key, indexing_policy=coll['indexingPolicy']) properties = container.read() print('Container with id \'{0}\' created'.format(container.id)) print('IndexPolicy Mode - \'{0}\''.format( properties['indexingPolicy']['indexingMode'])) print('IndexPolicy Automatic - \'{0}\''.format( properties['indexingPolicy']['automatic'])) except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(coll['id'])) print("\n2.3 Create Container - With custom provisioned throughput") try: container = db.create_container(id=id + "_container_custom_throughput", partition_key=partition_key, offer_throughput=400) print('Container with id \'{0}\' created'.format(container.id)) except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(coll['id'])) print("\n2.4 Create Container - With Unique keys") try: container = db.create_container(id=id + "_container_unique_keys", partition_key=partition_key, unique_key_policy={ 'uniqueKeys': [{ 'paths': ['/field1/field2', '/field3'] }] }) properties = container.read() unique_key_paths = properties['uniqueKeyPolicy']['uniqueKeys'][0][ 'paths'] print('Container with id \'{0}\' created'.format(container.id)) print('Unique Key Paths - \'{0}\', \'{1}\''.format( unique_key_paths[0], unique_key_paths[1])) except exceptions.CosmosResourceExistsError: print('A container with id \'container_unique_keys\' already exists') print("\n2.5 Create Container - With Partition key V2 (Default)") try: container = db.create_container(id=id + "_container_partition_key_v2", partition_key=PartitionKey( path='/id', kind='Hash')) properties = container.read() print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: print( 'A container with id \'container_partition_key_v2\' already exists' ) print("\n2.6 Create Container - With Partition key V1") try: container = db.create_container(id=id + "_container_partition_key_v1", partition_key=PartitionKey(path='/id', kind='Hash', version=1)) properties = container.read() print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: print( 'A container with id \'container_partition_key_v1\' already exists' ) print("\n2.7 Create Container - With analytical store enabled") try: container = db.create_container(id=id + "_container_analytical_store", partition_key=PartitionKey( path='/id', kind='Hash'), analytical_storage_ttl=-1) properties = container.read() print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: print( 'A container with id \'_container_analytical_store\' already exists' )
def test_document_ttl_misc(self): created_collection = created_collection = self.created_db.create_container( id='test_document_ttl_with_no_defaultTtl collection' + str(uuid.uuid4()), partition_key=PartitionKey(path='/id', kind='Hash'), default_ttl=8 ) document_definition = { 'id': 'doc1' + str(uuid.uuid4()), 'name': 'sample document', 'key': 'value'} created_document = created_collection.create_item(body=document_definition) time.sleep(10) # the created document cannot be deleted since it should already be gone now self.__AssertHTTPFailureWithStatus( StatusCodes.NOT_FOUND, created_collection.read_item, created_document['id'], created_document['id'] ) # We can create a document with the same id after the ttl time has expired created_document = created_collection.create_item(body=document_definition) self.assertEqual(created_document['id'], document_definition['id']) time.sleep(3) # Upsert the document after 3 secs to reset the document's ttl document_definition['key'] = 'value2' upserted_docment = created_collection.upsert_item(body=document_definition) time.sleep(7) # Upserted document still exists after 10 secs from document creation time(with collection's defaultTtl set to 8) since it's ttl was reset after 3 secs by upserting it read_document = created_collection.read_item(item=upserted_docment['id'], partition_key=upserted_docment['id']) self.assertEqual(upserted_docment['id'], read_document['id']) time.sleep(3) # the upserted document should be gone now after 10 secs from the last write(upsert) of the document self.__AssertHTTPFailureWithStatus( StatusCodes.NOT_FOUND, created_collection.read_item, upserted_docment['id'],\ upserted_docment['id'] ) documents = list(created_collection.query_items( query='SELECT * FROM root r', enable_cross_partition_query=True )) self.assertEqual(0, len(documents)) # Removes defaultTtl property from collection to disable ttl at collection level replaced_collection = self.created_db.replace_container( container=created_collection, partition_key=PartitionKey(path='/id', kind='Hash'), default_ttl=None ) document_definition['id'] = 'doc2' + str(uuid.uuid4()) created_document = created_collection.create_item(body=document_definition) time.sleep(5) # Created document still exists even after ttl time has passed since the TTL is disabled at collection level read_document = created_collection.read_item(item=created_document['id'], partition_key=created_document['id']) self.assertEqual(created_document['id'], read_document['id']) self.created_db.delete_container(container=created_collection)
# Container create and delete util for Cosmos DB CONFIG = { "ENDPOINT": "https://cosmos-ml.documents.azure.com:443/", "PRIMARYKEY": "Xk2aRRmk45Ix6CJH72ZgzcbV0uQn4Ln2gYnAfdPY4gxi65X2odyA9BdIxlCWBkiWquodWSyHY7mFce1L5X9Nzg==", "DATABASE": "pipeline", # Prolly looks more like a name to you "CONTAINER": "custom_od" # Prolly looks more like a name to you # "CONTAINER": "object_detection" # Prolly looks more like a name to you } url = CONFIG['ENDPOINT'] key = CONFIG['PRIMARYKEY'] client = cosmos_client.CosmosClient(url, {'masterKey': key}) database_id = CONFIG["DATABASE"] container_id = CONFIG["CONTAINER"] # partition_key=PartitionKey(path='/language') database = client.get_database_client(database_id) containers = database.list_containers() #database.delete_container(CONFIG['CONTAINER']) print("done") database.create_container_if_not_exists( id='custom_od', partition_key=PartitionKey(path="/category"))
def test_document_ttl_with_positive_defaultTtl(self): created_collection = self.created_db.create_container( id='test_document_ttl_with_positive_defaultTtl collection' + str(uuid.uuid4()), default_ttl=5, partition_key=PartitionKey(path='/id', kind='Hash') ) document_definition = { 'id': 'doc1' + str(uuid.uuid4()), 'name': 'sample document', 'key': 'value'} created_document = created_collection.create_item(body=document_definition) time.sleep(7) # the created document should be gone now as it's ttl value would be same as defaultTtl value of the collection self.__AssertHTTPFailureWithStatus( StatusCodes.NOT_FOUND, created_collection.read_item, document_definition['id'], document_definition['id'] ) document_definition['id'] = 'doc2' + str(uuid.uuid4()) document_definition['ttl'] = -1 created_document = created_collection.create_item(body=document_definition) time.sleep(5) # the created document should NOT be gone as it's ttl value is set to -1(never expire) which overrides the collections's defaultTtl value read_document = created_collection.read_item(item=document_definition['id'], partition_key=document_definition['id']) self.assertEqual(created_document['id'], read_document['id']) document_definition['id'] = 'doc3' + str(uuid.uuid4()) document_definition['ttl'] = 2 created_document = created_collection.create_item(body=document_definition) time.sleep(4) # the created document should be gone now as it's ttl value is set to 2 which overrides the collections's defaultTtl value(5) self.__AssertHTTPFailureWithStatus( StatusCodes.NOT_FOUND, created_collection.read_item, created_document['id'], created_document['id'] ) document_definition['id'] = 'doc4' + str(uuid.uuid4()) document_definition['ttl'] = 8 created_document = created_collection.create_item(body=document_definition) time.sleep(6) # the created document should NOT be gone as it's ttl value is set to 8 which overrides the collections's defaultTtl value(5) read_document = created_collection.read_item(item=created_document['id'], partition_key=created_document['id']) self.assertEqual(created_document['id'], read_document['id']) time.sleep(4) # the created document should be gone now as we have waited for (6+4) secs which is greater than documents's ttl value of 8 self.__AssertHTTPFailureWithStatus( StatusCodes.NOT_FOUND, created_collection.read_item, created_document['id'], created_document['id'] ) self.created_db.delete_container(container=created_collection)
url = key = client = cosmos_client.CosmosClient(url, {'masterKey': key}) database = client.get_database_client('amazon-jobs') container_definition = { 'id': 'indeed_jobs', 'partitionKey': { 'paths': ['/job_id'] } } container = database.create_container_if_not_exists( id='indeed', partition_key=PartitionKey(path="/id"), offer_throughput=400 ) database_id = 'amazon-jobs' container_id= 'indeed' container_client = database.get_container_client(container_id) for i in range(1, 10): container_client.upsert_item({ 'id': 'item{0}'.format(i), 'productName': 'Widget', 'productModel': 'Model {0}'.format(i) } )
def create_container(self, db): collectionName = "Planets" print("Creating '{0}' collection...".format(collectionName)) partition_key = PartitionKey(path="/id", kind="Hash") return db.create_container(id="Planets", partition_key=partition_key)
import azure.cosmos.documents as documents import azure.cosmos.aio.cosmos_client as cosmos_client import azure.cosmos.exceptions as exceptions from azure.cosmos.partition_key import PartitionKey import urllib3 from requests.utils import DEFAULT_CA_BUNDLE_PATH as CaCertPath import asyncio import config HOST = config.settings['host'] MASTER_KEY = config.settings['master_key'] DATABASE_ID = config.settings['database_id'] CONTAINER_ID = "index-samples" PARTITION_KEY = PartitionKey(path='/id', kind='Hash') # A typical container has the following properties within it's indexingPolicy property # indexingMode # automatic # includedPaths # excludedPaths # # We can toggle 'automatic' to eiher be True or False depending upon whether we want to have indexing over all columns by default or not. # # We can provide options while creating documents. indexingDirective is one such, # by which we can tell whether it should be included or excluded in the index of the parent container. # indexingDirective can be either 'Include', 'Exclude' or 'Default' # To run this Demo, please provide your own CA certs file or download one from # http://curl.haxx.se/docs/caextract.html # Setup the certificate file in .pem format.
import azure.cosmos.documents as documents import azure.cosmos.cosmos_client as cosmos_client import azure.cosmos.exceptions as exceptions from azure.cosmos.partition_key import PartitionKey import datetime import config HOST = config.settings['host'] MASTER_KEY = config.settings['master_key'] DATABASE_ID = config.settings['database_id'] CONTAINER_ID = config.settings['container_id'] client = cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY}, user_agent="CosmosDBDotnetQuickstart", user_agent_overwrite=True) database_name = 'trainingdb' database = client.create_database_if_not_exists(id=database_name) container_name = 'TrainingContainer' container = database.create_container_if_not_exists( id=container_name, partition_key=PartitionKey(path="/value"), offer_throughput=400) kv_item = {'id': 'Lastname', 'value': 'Kukreja'} container.create_item(body=kv_item)
def _validate_tentative_write_headers(self): self.OriginalExecuteFunction = retry_utility._ExecuteFunction retry_utility._ExecuteFunction = self._MockExecuteFunction connectionPolicy = MultiMasterTests.connectionPolicy connectionPolicy.UseMultipleWriteLocations = True client = cosmos_client.CosmosClient( MultiMasterTests.host, {'masterKey': MultiMasterTests.masterKey}, "Session", connectionPolicy) created_db = client.create_database(id='multi_master_tests ' + str(uuid.uuid4())) created_collection = created_db.create_container( id='test_db', partition_key=PartitionKey(path='/pk', kind='Hash')) document_definition = { 'id': 'doc' + str(uuid.uuid4()), 'pk': 'pk', 'name': 'sample document', 'operation': 'insertion' } created_document = created_collection.create_item( body=document_definition) sproc_definition = { 'id': 'sample sproc' + str(uuid.uuid4()), 'serverScript': 'function() {var x = 10;}' } sproc = created_collection.scripts.create_stored_procedure( body=sproc_definition) created_collection.scripts.execute_stored_procedure(sproc=sproc['id'], partition_key='pk') created_collection.read_item(item=created_document, partition_key='pk') created_document['operation'] = 'replace' replaced_document = created_collection.replace_item( item=created_document['id'], body=created_document) replaced_document['operation'] = 'upsert' upserted_document = created_collection.upsert_item( body=replaced_document) created_collection.delete_item(item=upserted_document, partition_key='pk') client.delete_database(created_db) print(len(self.last_headers)) is_allow_tentative_writes_set = self.EnableMultipleWritableLocations == True # Create Database self.assertEqual(self.last_headers[0], is_allow_tentative_writes_set) # Create Container self.assertEqual(self.last_headers[1], is_allow_tentative_writes_set) # Create Document - Makes one initial call to fetch collection self.assertEqual(self.last_headers[2], is_allow_tentative_writes_set) self.assertEqual(self.last_headers[3], is_allow_tentative_writes_set) # Create Stored procedure self.assertEqual(self.last_headers[4], is_allow_tentative_writes_set) # Execute Stored procedure self.assertEqual(self.last_headers[5], is_allow_tentative_writes_set) # Read Document self.assertEqual(self.last_headers[6], is_allow_tentative_writes_set) # Replace Document self.assertEqual(self.last_headers[7], is_allow_tentative_writes_set) # Upsert Document self.assertEqual(self.last_headers[8], is_allow_tentative_writes_set) # Delete Document self.assertEqual(self.last_headers[9], is_allow_tentative_writes_set) # Delete Database self.assertEqual(self.last_headers[10], is_allow_tentative_writes_set) retry_utility._ExecuteFunction = self.OriginalExecuteFunction
def test_distinct(self): created_database = self.config.create_database_if_not_exist( self.client) distinct_field = 'distinct_field' pk_field = "pk" different_field = "different_field" created_collection = created_database.create_container( id='collection with composite index ' + str(uuid.uuid4()), partition_key=PartitionKey(path="/pk", kind="Hash"), indexing_policy={ "compositeIndexes": [[{ "path": "/" + pk_field, "order": "ascending" }, { "path": "/" + distinct_field, "order": "ascending" }], [{ "path": "/" + distinct_field, "order": "ascending" }, { "path": "/" + pk_field, "order": "ascending" }]] }) documents = [] for i in range(5): j = i while j > i - 5: document_definition = { pk_field: i, 'id': str(uuid.uuid4()), distinct_field: j } documents.append( created_collection.create_item(body=document_definition)) document_definition = { pk_field: i, 'id': str(uuid.uuid4()), distinct_field: j } documents.append( created_collection.create_item(body=document_definition)) document_definition = {pk_field: i, 'id': str(uuid.uuid4())} documents.append( created_collection.create_item(body=document_definition)) j -= 1 padded_docs = self._pad_with_none(documents, distinct_field) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s from c ORDER BY c.%s' % (distinct_field, distinct_field), #nosec results=self._get_distinct_docs( self._get_order_by_docs(padded_docs, distinct_field, None), distinct_field, None, True), is_select=False, fields=[distinct_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s, c.%s from c ORDER BY c.%s, c.%s' % (distinct_field, pk_field, pk_field, distinct_field), #nosec results=self._get_distinct_docs( self._get_order_by_docs(padded_docs, pk_field, distinct_field), distinct_field, pk_field, True), is_select=False, fields=[distinct_field, pk_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s, c.%s from c ORDER BY c.%s, c.%s' % (distinct_field, pk_field, distinct_field, pk_field), #nosec results=self._get_distinct_docs( self._get_order_by_docs(padded_docs, distinct_field, pk_field), distinct_field, pk_field, True), is_select=False, fields=[distinct_field, pk_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct value c.%s from c ORDER BY c.%s' % (distinct_field, distinct_field), #nosec results=self._get_distinct_docs( self._get_order_by_docs(padded_docs, distinct_field, None), distinct_field, None, True), is_select=False, fields=[distinct_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s from c' % (distinct_field), #nosec results=self._get_distinct_docs(padded_docs, distinct_field, None, False), is_select=True, fields=[distinct_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s, c.%s from c' % (distinct_field, pk_field), #nosec results=self._get_distinct_docs(padded_docs, distinct_field, pk_field, False), is_select=True, fields=[distinct_field, pk_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct value c.%s from c' % (distinct_field), #nosec results=self._get_distinct_docs(padded_docs, distinct_field, None, True), is_select=True, fields=[distinct_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s from c ORDER BY c.%s' % (different_field, different_field), #nosec results=[], is_select=True, fields=[different_field]) self._validate_distinct( created_collection=created_collection, query='SELECT distinct c.%s from c' % (different_field), #nosec results=['None'], is_select=True, fields=[different_field]) created_database.delete_container(created_collection.id)
async def create_container(db, id): """ Execute basic container creation. This will create containers with 400 RUs with different indexing, partitioning, and storage options """ partition_key = PartitionKey(path='/id', kind='Hash') print("\n2.1 Create Container - Basic") try: await db.create_container(id=id, partition_key=partition_key) print('Container with id \'{0}\' created'.format(id)) except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(id)) # Alternatively, you can also use the create_container_if_not_exists method to avoid using a try catch # This method attempts to read the container first, and based on the result either creates or returns # the existing container. Due to the additional overhead from attempting a read, it is recommended # to use the create_container() method if you know the container doesn't already exist. await db.create_container_if_not_exists(id=id, partition_key=partition_key) print("\n2.2 Create Container - With custom index policy") coll = { "id": id+"_container_custom_index_policy", "indexingPolicy": { "automatic": False } } container = await db.create_container_if_not_exists( id=coll['id'], partition_key=partition_key, indexing_policy=coll['indexingPolicy'] ) properties = await container.read() print('Container with id \'{0}\' created'.format(container.id)) print('IndexPolicy Mode - \'{0}\''.format(properties['indexingPolicy']['indexingMode'])) print('IndexPolicy Automatic - \'{0}\''.format(properties['indexingPolicy']['automatic'])) print("\n2.3 Create Container - With custom provisioned throughput") try: container = await db.create_container( id=id+"_container_custom_throughput", partition_key=partition_key, offer_throughput=400 ) print('Container with id \'{0}\' created'.format(container.id)) except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(coll['id'])) print("\n2.4 Create Container - With Unique keys") try: container = await db.create_container( id= id+"_container_unique_keys", partition_key=partition_key, unique_key_policy={'uniqueKeys': [{'paths': ['/field1/field2', '/field3']}]} ) properties = await container.read() unique_key_paths = properties['uniqueKeyPolicy']['uniqueKeys'][0]['paths'] print('Container with id \'{0}\' created'.format(container.id)) print('Unique Key Paths - \'{0}\', \'{1}\''.format(unique_key_paths[0], unique_key_paths[1])) except exceptions.CosmosResourceExistsError: print('A container with id \'container_unique_keys\' already exists') print("\n2.5 Create Container - With Partition key V2 (Default)") try: container = await db.create_container( id=id+"_container_partition_key_v2", partition_key=PartitionKey(path='/id', kind='Hash') ) properties = await container.read() print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: print('A container with id \'container_partition_key_v2\' already exists') print("\n2.6 Create Container - With Partition key V1") try: container = await db.create_container( id=id+"_container_partition_key_v1", partition_key=PartitionKey(path='/id', kind='Hash', version=1) ) properties = await container.read() print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: print('A container with id \'container_partition_key_v1\' already exists') except Exception: print("Skipping this step, account does not have Synapse Link activated") print("\n2.7 Create Container - With analytical store enabled") if 'localhost:8081' in HOST: print("Skipping step since emulator does not support this yet") else: try: container = await db.create_container( id=id+"_container_analytical_store", partition_key=PartitionKey(path='/id', kind='Hash'),analytical_storage_ttl=-1 ) properties = await container.read() print('Container with id \'{0}\' created'.format(container.id)) print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: print('A container with id \'_container_analytical_store\' already exists') except Exception: print('Creating container with analytical storage can only happen in synapse link activated accounts, skipping step')