예제 #1
0
 def test_create_partition_insert_same_tags_two_collections(
         self, connect, collection):
     """
     target: test create two partitions, and insert vectors with the same tag to
             each collection, check status returned
     method: call function: create_partition
     expected: status ok, collection length is correct
     """
     connect.create_partition(collection, default_tag)
     collection_new = ut.gen_unique_str()
     connect.create_collection(collection_new, default_fields)
     connect.create_partition(collection_new, default_tag)
     result = connect.insert(collection,
                             default_entities,
                             partition_name=default_tag)
     assert len(result.primary_keys) == default_nb
     new_result = connect.insert(collection_new,
                                 default_entities,
                                 partition_name=default_tag)
     assert len(new_result.primary_keys) == default_nb
     connect.flush([collection, collection_new])
     res = connect.get_collection_stats(collection)
     assert res["row_count"] == default_nb
     res = connect.get_collection_stats(collection_new)
     assert res["row_count"] == default_nb
예제 #2
0
    def test_create_partition_limit(self, connect, collection, args):
        """
        target: test create partitions, check status returned
        method: call function: create_partition for 4097 times
        expected: exception raised
        """
        threads_num = 8
        threads = []
        if args["handler"] == "HTTP":
            pytest.skip("skip in http mode")

        def create(connect, threads_num):
            for i in range(ut.max_partition_num // threads_num):
                tag_tmp = ut.gen_unique_str()
                connect.create_partition(collection, tag_tmp)

        for i in range(threads_num):
            m = ut.get_milvus(host=args["ip"],
                              port=args["port"],
                              handler=args["handler"])
            t = threading.Thread(target=create, args=(m, threads_num))
            threads.append(t)
            t.start()
        for t in threads:
            t.join()
        tag_tmp = ut.gen_unique_str()
        with pytest.raises(Exception) as e:
            connect.create_partition(collection, tag_tmp)
예제 #3
0
 def test_query_not_existed_partition(self, connect, collection):
     """
     target: test query on a not existed partition
     method: query on not existed partition
     expected: raise exception
     """
     connect.load_partitions(collection, [ut.default_partition_name])
     tag = ut.gen_unique_str()
     with pytest.raises(Exception):
         connect.query(collection, default_term_expr, partition_names=[tag])
예제 #4
0
 def test_create_different_partition_names(self, connect, collection):
     """
     target: test create partition twice with different names
     method: call function: create_partition, and again
     expected: status ok
     """
     connect.create_partition(collection, default_tag)
     tag_name = ut.gen_unique_str()
     connect.create_partition(collection, tag_name)
     assert ut.compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
예제 #5
0
 def test_show_multi_partitions(self, connect, collection):
     """
     target: test show partitions, check status and partitions returned
     method: create partitions first, then call function: list_partitions
     expected: status ok, partitions correct
     """
     tag_new = ut.gen_unique_str()
     connect.create_partition(collection, default_tag)
     connect.create_partition(collection, tag_new)
     res = connect.list_partitions(collection)
     assert ut.compare_list_elements(res, [default_tag, tag_new, '_default'])
예제 #6
0
 def test_create_partition_collection_not_existed(self, connect):
     """
     target: verify the response when creating a partition with a non_existing collection
     method: create a partition with a non_existing collection
     expected: raise an exception
     """
     collection_name = ut.gen_unique_str()
     try:
         connect.create_partition(collection_name, default_tag)
     except Exception as e:
         code = getattr(e, 'code', "The exception does not contain the field of code.")
         assert code == 1
         message = getattr(e, 'message', "The exception does not contain the field of message.")
         assert message == "create partition failed: can't find collection: %s" % collection_name
예제 #7
0
 def test_drop_partition_name_not_existed_A(self, connect, collection):
     """
     target: test drop partition, but collection not existed
     method: create partitions first, then call function: drop_partition
     expected: status not ok
     """
     connect.create_partition(collection, default_tag)
     new_collection = ut.gen_unique_str()
     try:
         connect.drop_partition(new_collection, default_tag)
     except Exception as e:
         code = getattr(e, 'code', "The exception does not contain the field of code.")
         assert code == 1
         message = getattr(e, 'message', "The exception does not contain the field of message.")
         assert message == "DropPartition failed: can't find collection: %s" % new_collection
예제 #8
0
파일: conftest.py 프로젝트: haojunyu/milvus
def binary_id_collection(request, connect):
    ori_collection_name = getattr(request.module, "collection_id", "test")
    collection_name = gen_unique_str(ori_collection_name)
    try:
        fields = gen_binary_default_fields(auto_id=False)
        connect.create_collection(collection_name, fields)
    except Exception as e:
        pytest.exit(str(e))

    def teardown():
        if connect.has_collection(collection_name):
            connect.drop_collection(collection_name, timeout=delete_timeout)

    request.addfinalizer(teardown)
    assert connect.has_collection(collection_name)
    return collection_name
예제 #9
0
 def _test_insert_during_flushing_multi_collections(self, connect, args):
     """
     target: flushing will recover
     method: call function: create collections, then insert/flushing, restart server and assert row count
     expected: row count equals 0
     """
     # disable_autoflush()
     collection_num = 2
     collection_list = []
     for i in range(collection_num):
         collection_name = gen_unique_str(uid)
         collection_list.append(collection_name)
         connect.create_collection(collection_name, default_fields)
         ids = connect.bulk_insert(collection_name, big_entities)
     connect.flush(collection_list, _async=True)
     res_count = connect.count_entities(collection_list[-1])
     logging.getLogger().info(res_count)
     if res_count < big_nb:
         # restart server
         assert restart_server(args["service_name"])
         # assert row count again
         new_connect = get_milvus(args["ip"],
                                  args["port"],
                                  handler=args["handler"])
         res_count_2 = new_connect.count_entities(collection_list[-1])
         logging.getLogger().info(res_count_2)
         timeout = 300
         start_time = time.time()
         while time.time() - start_time < timeout:
             count_list = []
             break_flag = True
             for index, name in enumerate(collection_list):
                 tmp_count = new_connect.count_entities(name)
                 count_list.append(tmp_count)
                 logging.getLogger().info(count_list)
                 if tmp_count != big_nb:
                     break_flag = False
                     break
             if break_flag == True:
                 break
             time.sleep(10)
         for name in collection_list:
             assert new_connect.count_entities(name) == big_nb
예제 #10
0
 def _test_insert_during_flushing_multi_partitions(self, connect,
                                                   collection, args):
     """
     target: flushing will recover
     method: call function: create collection/partition, then insert/flushing, restart server and assert row count
     expected: row count equals 0
     """
     # disable_autoflush()
     partitions_num = 2
     partitions = []
     for i in range(partitions_num):
         tag_tmp = gen_unique_str()
         partitions.append(tag_tmp)
         connect.create_partition(collection, tag_tmp)
         ids = connect.bulk_insert(collection,
                                   big_entities,
                                   partition_name=tag_tmp)
     connect.flush([collection], _async=True)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     if res_count < big_nb:
         # restart server
         assert restart_server(args["service_name"])
         # assert row count again
         new_connect = get_milvus(args["ip"],
                                  args["port"],
                                  handler=args["handler"])
         res_count_2 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_2)
         timeout = 300
         start_time = time.time()
         while new_connect.count_entities(collection) != big_nb * 2 and (
                 time.time() - start_time < timeout):
             time.sleep(10)
             logging.getLogger().info(
                 new_connect.count_entities(collection))
         res_count_3 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_3)
         assert res_count_3 == big_nb * 2
예제 #11
0
 def create(connect, threads_num):
     for i in range(ut.max_partition_num // threads_num):
         tag_tmp = ut.gen_unique_str()
         connect.create_partition(collection, tag_tmp)