Ejemplo n.º 1
0
    def test_create_partition_limit(self, connect, collection, args):
        """
        target: test create partitions, check status returned
        method: call function: create_partition for 4097 times
        expected: exception raised
        """
        threads_num = 8
        threads = []
        if args["handler"] == "HTTP":
            pytest.skip("skip in http mode")

        def create(connect, threads_num):
            for i in range(ut.max_partition_num // threads_num):
                tag_tmp = ut.gen_unique_str()
                connect.create_partition(collection, tag_tmp)

        for i in range(threads_num):
            m = ut.get_milvus(host=args["ip"],
                              port=args["port"],
                              handler=args["handler"])
            t = threading.Thread(target=create, args=(m, threads_num))
            threads.append(t)
            t.start()
        for t in threads:
            t.join()
        tag_tmp = ut.gen_unique_str()
        with pytest.raises(Exception) as e:
            connect.create_partition(collection, tag_tmp)
Ejemplo n.º 2
0
 def test_create_partition_insert_same_tags_two_collections(
         self, connect, collection):
     """
     target: test create two partitions, and insert vectors with the same tag to
             each collection, check status returned
     method: call function: create_partition
     expected: status ok, collection length is correct
     """
     connect.create_partition(collection, default_tag)
     collection_new = ut.gen_unique_str()
     connect.create_collection(collection_new, default_fields)
     connect.create_partition(collection_new, default_tag)
     result = connect.insert(collection,
                             default_entities,
                             partition_name=default_tag)
     assert len(result.primary_keys) == default_nb
     new_result = connect.insert(collection_new,
                                 default_entities,
                                 partition_name=default_tag)
     assert len(new_result.primary_keys) == default_nb
     connect.flush([collection, collection_new])
     res = connect.get_collection_stats(collection)
     assert res["row_count"] == default_nb
     res = connect.get_collection_stats(collection_new)
     assert res["row_count"] == default_nb
Ejemplo n.º 3
0
 def test_insert_ids_fields(self, connect, get_filter_field,
                            get_vector_field):
     """
     target: test create normal collection with different fields, insert entities into id with ids
     method: create collection with diff fields: metric/field_type/..., insert, and count
     expected: row count correct
     """
     nb = 5
     filter_field = get_filter_field
     vector_field = get_vector_field
     collection_name = ut.gen_unique_str("test_collection")
     fields = {
         "fields": [ut.gen_primary_field(), filter_field, vector_field],
         "auto_id": False
     }
     connect.create_collection(collection_name, fields)
     ids = [i for i in range(nb)]
     entities = ut.gen_entities_by_fields(fields["fields"], nb,
                                          ut.default_dim, ids)
     log.info(entities)
     result = connect.insert(collection_name, entities)
     assert result.primary_keys == ids
     connect.flush([collection_name])
     stats = connect.get_collection_stats(collection_name)
     assert stats[row_count] == nb
Ejemplo n.º 4
0
 def _test_insert_during_flushing_multi_partitions(self, connect, collection, args):
     """
     target: flushing will recover
     method: call function: create collection/partition, then insert/flushing, restart server and assert row count
     expected: row count equals 0
     """
     # disable_autoflush()
     partitions_num = 2
     partitions = []
     for i in range(partitions_num):
         tag_tmp = gen_unique_str()
         partitions.append(tag_tmp)
         connect.create_partition(collection, tag_tmp)
         ids = connect.bulk_insert(collection, big_entities, partition_name=tag_tmp)
     connect.flush([collection], _async=True)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     if res_count < big_nb:
         # restart server
         assert restart_server(args["service_name"])
         # assert row count again
         new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
         res_count_2 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_2)
         timeout = 300
         start_time = time.time()
         while new_connect.count_entities(collection) != big_nb * 2 and (time.time() - start_time < timeout):
             time.sleep(10)
             logging.getLogger().info(new_connect.count_entities(collection))
         res_count_3 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_3)
         assert res_count_3 == big_nb * 2
Ejemplo n.º 5
0
 def test_insert_collection_not_existed(self, connect):
     """
     target: test insert, with collection not existed
     method: insert entity into a random named collection
     expected: raise a BaseException
     """
     collection_name = ut.gen_unique_str(uid)
     with pytest.raises(BaseException) as e:
         connect.insert(collection_name, default_entities)
Ejemplo n.º 6
0
 def test_insert_partition_not_existed(self, connect, collection):
     """
     target: test insert entities in collection created before
     method: create collection and insert entities in it, with the not existed partition_name param
     expected: error raised
     """
     tag = ut.gen_unique_str()
     with pytest.raises(Exception) as e:
         connect.insert(collection, default_entities, partition_name=tag)
Ejemplo n.º 7
0
 def test_create_different_partition_names(self, connect, collection):
     """
     target: test create partition twice with different names
     method: call function: create_partition, and again
     expected: status ok
     """
     connect.create_partition(collection, default_tag)
     tag_name = ut.gen_unique_str()
     connect.create_partition(collection, tag_name)
     assert ut.compare_list_elements(connect.list_partitions(collection),
                                     [default_tag, tag_name, '_default'])
Ejemplo n.º 8
0
 def test_insert_async_invalid_params(self, connect):
     """
     target: test insert vectors with different length of vectors
     method: set different vectors as insert method params
     expected: length of ids is equal to the length of vectors
     """
     collection_new = ut.gen_unique_str()
     future = connect.insert(collection_new, default_entities, _async=True)
     future.done()
     with pytest.raises(Exception) as e:
         result = future.result()
Ejemplo n.º 9
0
 def test_show_multi_partitions(self, connect, collection):
     """
     target: test show partitions, check status and partitions returned
     method: create partitions first, then call function: list_partitions
     expected: status ok, partitions correct
     """
     tag_new = ut.gen_unique_str()
     connect.create_partition(collection, default_tag)
     connect.create_partition(collection, tag_new)
     res = connect.list_partitions(collection)
     assert ut.compare_list_elements(res,
                                     [default_tag, tag_new, '_default'])
Ejemplo n.º 10
0
 def test_drop_collection_insert_entity_another(self, connect, collection):
     """
     target: test insert vector to collection_1 after collection_2 deleted
     method: delete collection_2 and insert vector to collection_1
     expected: row count equals the length of entities inserted
     """
     collection_name = ut.gen_unique_str(uid)
     connect.create_collection(collection_name, default_fields)
     connect.drop_collection(collection)
     result = connect.insert(collection_name, default_entity)
     connect.flush([collection_name])
     assert len(result.primary_keys) == 1
Ejemplo n.º 11
0
 def test_insert_entity_sleep_search_entity_another(self, connect,
                                                    collection):
     """
     target: test insert entity to collection_1 after search collection_2 a while
     method: search collection, sleep, and insert entity
     expected: status ok
     """
     collection_name = ut.gen_unique_str(uid)
     connect.create_collection(collection_name, default_fields)
     result = connect.insert(collection, default_entity)
     connect.flush([collection])
     connect.load_collection(collection_name)
     res = connect.search(collection_name, **default_single_query)
     assert len(res[0]) == 0
Ejemplo n.º 12
0
 def test_insert_entity_search_entity_another(self, connect, collection):
     """
     target: test insert entity to collection_1 after search collection_2
     method: search collection and insert entity
     expected: status ok
     """
     collection_name = ut.gen_unique_str(uid)
     connect.create_collection(collection_name, default_fields)
     result = connect.insert(collection, default_entity)
     connect.flush([collection])
     connect.load_collection(collection_name)
     res = connect.search(collection_name, **default_single_query)
     stats = connect.get_collection_stats(collection)
     assert stats[row_count] == 1
Ejemplo n.º 13
0
 def test_insert_entity_sleep_create_index_another(self, connect,
                                                   collection,
                                                   get_simple_index):
     """
     target: test insert vector to collection_2 after build index for collection_1 for a while
     method: build index and insert vector
     expected: status ok
     """
     collection_name = ut.gen_unique_str(uid)
     connect.create_collection(collection_name, default_fields)
     result = connect.insert(collection, default_entity)
     connect.flush([collection])
     connect.create_index(collection_name, field_name, get_simple_index)
     stats = connect.get_collection_stats(collection)
     assert stats[row_count] == 1
Ejemplo n.º 14
0
def binary_id_collection(request, connect):
    ori_collection_name = getattr(request.module, "collection_id", "test")
    collection_name = gen_unique_str(ori_collection_name)
    try:
        fields = gen_binary_default_fields(auto_id=False)
        connect.create_collection(collection_name, fields, consistency_level=CONSISTENCY_STRONG)
    except Exception as e:
        pytest.exit(str(e))

    def teardown():
        if connect.has_collection(collection_name):
            connect.drop_collection(collection_name, timeout=delete_timeout)

    request.addfinalizer(teardown)
    assert connect.has_collection(collection_name)
    return collection_name
Ejemplo n.º 15
0
def collection(request, connect):
    ori_collection_name = getattr(request.module, "collection_id", "test")
    collection_name = gen_unique_str(ori_collection_name)
    try:
        default_fields = gen_default_fields()
        connect.create_collection(collection_name, default_fields)
    except Exception as e:
        pytest.exit(str(e))

    def teardown():
        if connect.has_collection(collection_name):
            connect.drop_collection(collection_name, timeout=delete_timeout)

    request.addfinalizer(teardown)
    assert connect.has_collection(collection_name)
    return collection_name
Ejemplo n.º 16
0
 def test_create_index_insert_entity_another(self, connect, collection,
                                             get_simple_index):
     """
     target: test insert vector to collection_2 after build index for collection_1
     method: build index and insert vector
     expected: status ok
     """
     collection_name = ut.gen_unique_str(uid)
     connect.create_collection(collection_name, default_fields)
     connect.create_index(collection, field_name, get_simple_index)
     result = connect.insert(collection_name, default_entity)
     assert len(result.primary_keys) == 1
     if get_simple_index["index_type"] != "FLAT":
         index = connect.describe_index(collection, "")
         ut.create_target_index(get_simple_index, field_name)
         assert index == get_simple_index
     connect.drop_collection(collection_name)
Ejemplo n.º 17
0
 def test_create_partition_collection_not_existed(self, connect):
     """
     target: verify the response when creating a partition with a non_existing collection
     method: create a partition with a non_existing collection
     expected: raise an exception
     """
     collection_name = ut.gen_unique_str()
     try:
         connect.create_partition(collection_name, default_tag)
     except Exception as e:
         code = getattr(
             e, 'code', "The exception does not contain the field of code.")
         assert code == 1
         message = getattr(
             e, 'message',
             "The exception does not contain the field of message.")
         assert message == "CreatePartition failed: can't find collection: %s" % collection_name
Ejemplo n.º 18
0
 def test_drop_partition_name_not_existed_A(self, connect, collection):
     """
     target: test drop partition, but collection not existed
     method: create partitions first, then call function: drop_partition
     expected: status not ok
     """
     connect.create_partition(collection, default_tag)
     new_collection = ut.gen_unique_str()
     try:
         connect.drop_partition(new_collection, default_tag)
     except Exception as e:
         code = getattr(
             e, 'code', "The exception does not contain the field of code.")
         assert code == 1
         message = getattr(
             e, 'message',
             "The exception does not contain the field of message.")
         assert message == "DropPartition failed: can't find collection: %s" % new_collection
Ejemplo n.º 19
0
 def test_insert_entity_create_index_another(self, connect, collection,
                                             get_simple_index):
     """
     target: test insert vector to collection_2 after build index for collection_1
     method: build index and insert vector
     expected: status ok
     """
     collection_name = ut.gen_unique_str(uid)
     connect.create_collection(collection_name, default_fields)
     result = connect.insert(collection, default_entity)
     connect.flush([collection])
     connect.create_index(collection_name, field_name, get_simple_index)
     if get_simple_index["index_type"] != "FLAT":
         index = connect.describe_index(collection_name, "")
         ut.create_target_index(get_simple_index, field_name)
         assert index == get_simple_index
     stats = connect.get_collection_stats(collection)
     assert stats[row_count] == 1
Ejemplo n.º 20
0
 def test_insert_entity_multi_collections(self, connect):
     """
     target: test insert entities
     method: create 10 collections and insert entities into them in turn
     expected: row count
     """
     collection_num = 10
     collection_list = []
     for i in range(collection_num):
         collection_name = ut.gen_unique_str(uid)
         collection_list.append(collection_name)
         connect.create_collection(collection_name, default_fields)
         result = connect.insert(collection_name, default_entities)
         connect.flush([collection_name])
         assert len(result.primary_keys) == default_nb
         stats = connect.get_collection_stats(collection_name)
         assert stats[row_count] == default_nb
     for i in range(collection_num):
         connect.drop_collection(collection_list[i])
Ejemplo n.º 21
0
 def _test_insert_during_flushing_multi_collections(self, connect, args):
     """
     target: flushing will recover
     method: call function: create collections, then insert/flushing, restart server and assert row count
     expected: row count equals 0
     """
     # disable_autoflush()
     collection_num = 2
     collection_list = []
     for i in range(collection_num):
         collection_name = gen_unique_str(uid)
         collection_list.append(collection_name)
         connect.create_collection(collection_name, default_fields)
         ids = connect.bulk_insert(collection_name, big_entities)
     connect.flush(collection_list, _async=True)
     res_count = connect.count_entities(collection_list[-1])
     logging.getLogger().info(res_count)
     if res_count < big_nb:
         # restart server
         assert restart_server(args["service_name"])
         # assert row count again
         new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
         res_count_2 = new_connect.count_entities(collection_list[-1])
         logging.getLogger().info(res_count_2)
         timeout = 300
         start_time = time.time()
         while time.time() - start_time < timeout:
             count_list = []
             break_flag = True
             for index, name in enumerate(collection_list):
                 tmp_count = new_connect.count_entities(name)
                 count_list.append(tmp_count)
                 logging.getLogger().info(count_list)
                 if tmp_count != big_nb:
                     break_flag = False
                     break
             if break_flag == True:
                 break
             time.sleep(10)
         for name in collection_list:
             assert new_connect.count_entities(name) == big_nb
Ejemplo n.º 22
0
 def create(connect, threads_num):
     for i in range(ut.max_partition_num // threads_num):
         tag_tmp = ut.gen_unique_str()
         connect.create_partition(collection, tag_tmp)