Example #1
0
    def test_collection_insert_rows_count_multi_threading(
            self, args, collection):
        """
        target: test collection rows_count is correct or not with multi threading
        method: create collection and insert entities in it(idmap),
                assert the value returned by count_entities method is equal to length of entities
        expected: the count is equal to the length of entities
        """
        if args["handler"] == "HTTP":
            pytest.skip("Skip test in http mode")
        thread_num = 8
        threads = []
        milvus = ut.get_milvus(host=args["ip"],
                               port=args["port"],
                               handler=args["handler"],
                               try_connect=False)

        def insert(thread_i):
            log.info("In thread-%d" % thread_i)
            result = milvus.insert(collection, default_entities)
            milvus.flush([collection])

        for i in range(thread_num):
            x = threading.Thread(target=insert, args=(i, ))
            threads.append(x)
            x.start()
        for th in threads:
            th.join()
        stats = milvus.get_collection_stats(collection)
        assert stats[row_count] == thread_num * default_nb
Example #2
0
 def _test_during_indexing(self, connect, collection, args):
     """
     target: flushing will recover
     method: call function: create collection, then indexing, restart server and assert row count
     expected: row count equals nb, server contitue to build index after restart
     """
     # disable_autoflush()
     loop = 5
     for i in range(loop):
         ids = connect.bulk_insert(collection, big_entities)
     connect.flush([collection])
     connect.create_index(collection, field_name, default_index, _async=True)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     stats = connect.get_collection_stats(collection)
     # logging.getLogger().info(stats)
     # restart server
     assert restart_server(args["service_name"])
     # assert row count again
     new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
     res_count_2 = new_connect.count_entities(collection)
     logging.getLogger().info(res_count_2)
     assert res_count_2 == loop * big_nb
     status = new_connect._cmd("status")
     assert json.loads(status)["indexing"] == True
Example #3
0
 def _test_insert_during_flushing(self, connect, collection, args):
     """
     target: flushing will recover
     method: call function: create collection, then insert/flushing, restart server and assert row count
     expected: row count equals 0
     """
     # disable_autoflush()
     ids = connect.bulk_insert(collection, big_entities)
     connect.flush([collection], _async=True)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     if res_count < big_nb:
         # restart server
         assert restart_server(args["service_name"])
         # assert row count again
         new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"])
         res_count_2 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_2)
         timeout = 300
         start_time = time.time()
         while new_connect.count_entities(collection) != big_nb and (time.time() - start_time < timeout):
             time.sleep(10)
             logging.getLogger().info(new_connect.count_entities(collection))
         res_count_3 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_3)
         assert res_count_3 == big_nb
Example #4
0
 def _test_delete_during_flushing(self, connect, collection, args):
     """
     target: flushing will recover
     method: call function: create collection, then delete/flushing, restart server and assert row count
     expected: row count equals (nb - delete_length)
     """
     # disable_autoflush()
     ids = connect.bulk_insert(collection, big_entities)
     connect.flush([collection])
     delete_length = 1000
     delete_ids = ids[big_nb//4:big_nb//4+delete_length]
     delete_res = connect.delete_entity_by_id(collection, delete_ids)
     connect.flush([collection], _async=True)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     # restart server
     assert restart_server(args["service_name"])
     # assert row count again
     new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
     res_count_2 = new_connect.count_entities(collection)
     logging.getLogger().info(res_count_2)
     timeout = 100
     start_time = time.time()
     while new_connect.count_entities(collection) != big_nb - delete_length and (time.time() - start_time < timeout):
         time.sleep(10)
         logging.getLogger().info(new_connect.count_entities(collection))
     if new_connect.count_entities(collection) == big_nb - delete_length:
         time.sleep(10)
         res_count_3 = new_connect.count_entities(collection)
         logging.getLogger().info(res_count_3)
         assert res_count_3 == big_nb - delete_length
Example #5
0
    def test_create_partition_limit(self, connect, collection, args):
        """
        target: test create partitions, check status returned
        method: call function: create_partition for 4097 times
        expected: exception raised
        """
        threads_num = 8
        threads = []
        if args["handler"] == "HTTP":
            pytest.skip("skip in http mode")

        def create(connect, threads_num):
            for i in range(ut.max_partition_num // threads_num):
                tag_tmp = ut.gen_unique_str()
                connect.create_partition(collection, tag_tmp)

        for i in range(threads_num):
            m = ut.get_milvus(host=args["ip"],
                              port=args["port"],
                              handler=args["handler"])
            t = threading.Thread(target=create, args=(m, threads_num))
            threads.append(t)
            t.start()
        for t in threads:
            t.join()
        tag_tmp = ut.gen_unique_str()
        with pytest.raises(Exception) as e:
            connect.create_partition(collection, tag_tmp)
Example #6
0
 def _test_during_indexed(self, connect, collection, args):
     """
     target: flushing will recover
     method: call function: create collection, then indexed, restart server and assert row count
     expected: row count equals nb
     """
     # disable_autoflush()
     ids = connect.bulk_insert(collection, big_entities)
     connect.flush([collection])
     connect.create_index(collection, field_name, default_index)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     stats = connect.get_collection_stats(collection)
     # logging.getLogger().info(stats)
     # pdb.set_trace()
     # restart server
     assert restart_server(args["service_name"])
     # assert row count again
     new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
     assert new_connect.count_entities(collection) == big_nb
     stats = connect.get_collection_stats(collection)
     for file in stats["partitions"][0]["segments"][0]["files"]:
         if file["field"] == field_name and file["name"] != "_raw":
             assert file["data_size"] > 0
             if file["index_type"] != default_index["index_type"]:
                 assert False
             else:
                 assert True
Example #7
0
def milvus(request):
    host = request.config.getoption("--host")
    port = request.config.getoption("--port")
    http_port = request.config.getoption("--http_port")
    handler = request.config.getoption("--handler")
    if handler == "HTTP":
        port = http_port
    return get_milvus(host=host, port=port, handler=handler)
Example #8
0
 def test_connect_with_invalid_uri(self, get_invalid_uri, args):
     """
     target: test uri connect with invalid uri value
     method: set port in gen_invalid_uris
     expected: connected is False
     """
     uri_value = get_invalid_uri
     with pytest.raises(Exception) as e:
         milvus = get_milvus(uri=uri_value, handler=args["handler"])
Example #9
0
 def test_connect_with_invalid_port(self, args, get_invalid_port):
     """
     target: test ip:port connect with invalid port value
     method: set port in gen_invalid_ports
     expected: connected is False
     """
     port = get_invalid_port
     with pytest.raises(Exception) as e:
         milvus = get_milvus(args["ip"], port, args["handler"])
Example #10
0
 def test_connect_uri_null(self, args):
     """
     target: test connect with null uri
     method: uri set null
     expected: connected is True
     """
     uri_value = ""
     if self.local_ip(args):
         milvus = get_milvus(None,
                             None,
                             uri=uri_value,
                             handler=args["handler"])
     else:
         with pytest.raises(Exception) as e:
             milvus = get_milvus(None,
                                 None,
                                 uri=uri_value,
                                 handler=args["handler"])
Example #11
0
 def test_connect_uri(self, args):
     """
     target: test connect with correct uri
     method: uri format and value are both correct
     expected: connected is True
     """
     uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
     milvus = get_milvus(args["ip"],
                         args["port"],
                         uri=uri_value,
                         handler=args["handler"])
Example #12
0
def dis_connect(request):
    host = request.config.getoption("--host")
    service_name = request.config.getoption("--service")
    port = request.config.getoption("--port")
    http_port = request.config.getoption("--http_port")
    handler = request.config.getoption("--handler")
    if handler == "HTTP":
        port = http_port
    milvus = get_milvus(host=host, port=port, handler=handler)
    milvus.close()
    return milvus
Example #13
0
 def _test_insert_during_flushing_multi_collections(self, connect, args):
     """
     target: flushing will recover
     method: call function: create collections, then insert/flushing, restart server and assert row count
     expected: row count equals 0
     """
     # disable_autoflush()
     collection_num = 2
     collection_list = []
     for i in range(collection_num):
         collection_name = gen_unique_str(uid)
         collection_list.append(collection_name)
         connect.create_collection(collection_name, default_fields)
         ids = connect.bulk_insert(collection_name, big_entities)
     connect.flush(collection_list, _async=True)
     res_count = connect.count_entities(collection_list[-1])
     logging.getLogger().info(res_count)
     if res_count < big_nb:
         # restart server
         assert restart_server(args["service_name"])
         # assert row count again
         new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
         res_count_2 = new_connect.count_entities(collection_list[-1])
         logging.getLogger().info(res_count_2)
         timeout = 300
         start_time = time.time()
         while time.time() - start_time < timeout:
             count_list = []
             break_flag = True
             for index, name in enumerate(collection_list):
                 tmp_count = new_connect.count_entities(name)
                 count_list.append(tmp_count)
                 logging.getLogger().info(count_list)
                 if tmp_count != big_nb:
                     break_flag = False
                     break
             if break_flag == True:
                 break
             time.sleep(10)
         for name in collection_list:
             assert new_connect.count_entities(name) == big_nb
Example #14
0
 def _test_insert_flush(self, connect, collection, args):
     """
     target: return the same row count after server restart
     method: call function: create collection, then insert/flush, restart server and assert row count
     expected: row count keep the same
     """
     ids = connect.bulk_insert(collection, default_entities)
     connect.flush([collection])
     ids = connect.bulk_insert(collection, default_entities)
     connect.flush([collection])
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     assert res_count == 2 * default_nb
     # restart server
     logging.getLogger().info("Start restart server")
     assert restart_server(args["service_name"])
     # assert row count again
     new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"])
     res_count = new_connect.count_entities(collection)
     logging.getLogger().info(res_count)
     assert res_count == 2 * default_nb
Example #15
0
 def _test_kill_mysql_during_index(self, connect, collection, args):
     big_nb = 20000
     index_param = {"nlist": 1024, "m": 16}
     index_type = IndexType.IVF_PQ
     vectors = gen_vectors(big_nb, default_dim)
     status, ids = connect.bulk_insert(collection, vectors, ids=[i for i in range(big_nb)])
     status = connect.flush([collection])
     assert status.OK()
     status, res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     assert status.OK()
     assert res_count == big_nb
     logging.getLogger().info("Start create index async")
     status = connect.create_index(collection, index_type, index_param, _async=True)
     time.sleep(2)
     logging.getLogger().info("Start play mysql failure")
     # pass
     new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"])
     status, res_count = new_connect.count_entities(collection)
     assert status.OK()
     assert res_count == big_nb
Example #16
0
 def _test_delete_flush_during_compacting(self, connect, collection, args):
     """
     target: verify server work after restart during compaction
     method: call function: create collection, then delete/flush/compacting, restart server and assert row count
         call `compact` again, compact pass
     expected: row count equals (nb - delete_length)
     """
     # disable_autoflush()
     ids = connect.bulk_insert(collection, big_entities)
     connect.flush([collection])
     delete_length = 1000
     loop = 10
     for i in range(loop):
         delete_ids = ids[i*delete_length:(i+1)*delete_length]
         delete_res = connect.delete_entity_by_id(collection, delete_ids)
     connect.flush([collection])
     connect.compact(collection, _async=True)
     res_count = connect.count_entities(collection)
     logging.getLogger().info(res_count)
     assert res_count == big_nb - delete_length*loop
     info = connect.get_collection_stats(collection)
     size_old = info["partitions"][0]["segments"][0]["data_size"]
     logging.getLogger().info(size_old)
     # restart server
     assert restart_server(args["service_name"])
     # assert row count again
     new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
     res_count_2 = new_connect.count_entities(collection)
     logging.getLogger().info(res_count_2)
     assert res_count_2 == big_nb - delete_length*loop
     info = connect.get_collection_stats(collection)
     size_before = info["partitions"][0]["segments"][0]["data_size"]
     status = connect.compact(collection)
     assert status.OK()
     info = connect.get_collection_stats(collection)
     size_after = info["partitions"][0]["segments"][0]["data_size"]
     assert size_before > size_after
Example #17
0
def connect(request):
    host = request.config.getoption("--host")
    service_name = request.config.getoption("--service")
    port = request.config.getoption("--port")
    http_port = request.config.getoption("--http_port")
    handler = request.config.getoption("--handler")
    if handler == "HTTP":
        port = http_port
    try:
        milvus = get_milvus(host=host, port=port, handler=handler)
        # reset_build_index_threshold(milvus)
    except Exception as e:
        logging.getLogger().error(str(e))
        pytest.exit("Milvus server can not connected, exit pytest ...")

    def fin():
        try:
            milvus.close()
            pass
        except Exception as e:
            logging.getLogger().info(str(e))

    request.addfinalizer(fin)
    return milvus
Example #18
0
 def connect():
     milvus = get_milvus(args["ip"],
                         args["port"],
                         handler=args["handler"])
     assert milvus
Example #19
0
 def test_connect_with_invalid_ip(self, args, get_invalid_ip):
     ip = get_invalid_ip
     with pytest.raises(Exception) as e:
         milvus = get_milvus(ip, args["port"], args["handler"])