def e2e_milvus(host, c_name): # connect connections.add_connection(default={"host": host, "port": 19530}) connections.connect(alias='default') # create # c_name = cf.gen_unique_str(prefix) collection_w = ApiCollectionWrapper() collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema()) # collection_w.init_collection(name=c_name) # insert data = cf.gen_default_list_data(ct.default_nb) mutation_res, _ = collection_w.insert(data) assert mutation_res.insert_count == ct.default_nb # create index collection_w.create_index(ct.default_float_vec_field_name, ct.default_index) assert collection_w.has_index()[0] assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name, ct.default_index) # search collection_w.load() search_res, _ = collection_w.search(data[-1][:ct.default_nq], ct.default_float_vec_field_name, ct.default_search_params, ct.default_limit) assert len(search_res[0]) == ct.default_limit # query ids = search_res[0].ids[0] term_expr = f'{ct.default_int64_field_name} in [{ids}]' query_res, _ = collection_w.query(term_expr, output_fields=["*", "%"]) assert query_res[0][ct.default_int64_field_name] == ids
def test_chaos_memory_stress_querynode(self, connection, chaos_yaml): """ target: explore query node behavior after memory stress chaos injected and recovered method: 1. create a collection, insert some data 2. inject memory stress chaos 3. load collection and search, query 4. todo (verify query node response) 5. delete chaos or chaos finished 6. release and reload collection, verify search and query is available expected: after chaos deleted, load, search and query are all available """ c_name = 'chaos_memory_nx6DNW4q' collection_w = ApiCollectionWrapper() collection_w.init_collection(c_name) log.debug(collection_w.schema) log.debug(collection_w._shards_num) # apply memory stress # apply_memory_stress(chaos_yaml) # wait memory stress # sleep(constants.WAIT_PER_OP * 2) # query collection_w.release() collection_w.load() term_expr = f'{ct.default_int64_field_name} in [0, 1, 999, 99]' for i in range(4): t0_query = datetime.datetime.now() query_res, _ = collection_w.query(term_expr) tt_query = datetime.datetime.now() - t0_query log.info(f"{i} query cost: {tt_query}") assert len(query_res) == 4
def test_chaos_memory_stress_querynode(self, connection, chaos_yaml): """ target: explore query node behavior after memory stress chaos injected and recovered method: 1. Create a collection, insert some data 2. Inject memory stress chaos 3. Start a threas to load, search and query 4. After chaos duration, check query search success rate 5. Delete chaos or chaos finished finally expected: 1.If memory is insufficient, querynode is OOMKilled and available after restart 2.If memory is sufficient, succ rate of query and search both are 1.0 """ c_name = 'chaos_memory_nx6DNW4q' collection_w = ApiCollectionWrapper() collection_w.init_collection(c_name) log.debug(collection_w.schema) log.debug(collection_w._shards_num) # apply memory stress chaos chaos_config = gen_experiment_config(chaos_yaml) log.debug(chaos_config) chaos_res = CusResource(kind=chaos_config['kind'], group=constants.CHAOS_GROUP, version=constants.CHAOS_VERSION, namespace=constants.CHAOS_NAMESPACE) chaos_res.create(chaos_config) log.debug("chaos injected") duration = chaos_config.get('spec').get('duration') duration = duration.replace('h', '*3600+').replace( 'm', '*60+').replace('s', '*1+') + '+0' meta_name = chaos_config.get('metadata').get('name') # wait memory stress sleep(constants.WAIT_PER_OP * 2) # try to do release, load, query and serach in a duration time loop try: start = time.time() while time.time() - start < eval(duration): collection_w.release() collection_w.load() term_expr = f'{ct.default_int64_field_name} in {[random.randint(0, 100)]}' query_res, _ = collection_w.query(term_expr) assert len(query_res) == 1 search_res, _ = collection_w.search( cf.gen_vectors(1, ct.default_dim), ct.default_float_vec_field_name, ct.default_search_params, ct.default_limit) log.debug(search_res[0].ids) assert len(search_res[0].ids) == ct.default_limit except Exception as e: raise Exception(str(e)) finally: chaos_res.delete(meta_name)
def e2e_milvus(host, c_name): """ e2e milvus """ log.debug(f'pid: {os.getpid()}') # connect connections.add_connection(default={"host": host, "port": 19530}) connections.connect(alias='default') # create collection_w = ApiCollectionWrapper() collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema()) # insert df = cf.gen_default_dataframe_data() mutation_res, _ = collection_w.insert(df) assert mutation_res.insert_count == ct.default_nb log.debug(collection_w.num_entities) # create index collection_w.create_index(ct.default_float_vec_field_name, ct.default_index) assert collection_w.has_index()[0] assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name, ct.default_index) # search collection_w.load() search_res, _ = collection_w.search(cf.gen_vectors(1, dim=ct.default_dim), ct.default_float_vec_field_name, ct.default_search_params, ct.default_limit) assert len(search_res[0]) == ct.default_limit log.debug(search_res[0].ids) # query ids = search_res[0].ids[0] term_expr = f'{ct.default_int64_field_name} in [{ids}]' query_res, _ = collection_w.query(term_expr, output_fields=["*", "%"]) assert query_res[0][ct.default_int64_field_name] == ids
def test_simd_compat_e2e(self, request, simd): log.info(f"start to e2e verification: {simd}") # parse results from previous results results = request.config.cache.get(simd, None) alias = results.get('alias', simd) conn = connections.connect(alias=alias) assert conn is not None simd_cache = request.config.cache.get(simd, None) log.info(f"simd_cache: {simd_cache}") # create name = cf.gen_unique_str("compat") t0 = time.time() collection_w = ApiCollectionWrapper() collection_w.init_collection(name=name, schema=cf.gen_default_collection_schema(), using=alias, timeout=40) tt = time.time() - t0 assert collection_w.name == name entities = collection_w.num_entities log.info(f"assert create collection: {tt}, init_entities: {entities}") # insert data = cf.gen_default_list_data() t0 = time.time() _, res = collection_w.insert(data) tt = time.time() - t0 log.info(f"assert insert: {tt}") assert res # flush t0 = time.time() assert collection_w.num_entities == len(data[0]) + entities tt = time.time() - t0 entities = collection_w.num_entities log.info(f"assert flush: {tt}, entities: {entities}") # search collection_w.load() search_vectors = cf.gen_vectors(1, ct.default_dim) search_params = {"metric_type": "L2", "params": {"nprobe": 16}} t0 = time.time() res_1, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1) tt = time.time() - t0 log.info(f"assert search: {tt}") assert len(res_1) == 1 collection_w.release() # index d = cf.gen_default_list_data() collection_w.insert(d) log.info(f"assert index entities: {collection_w.num_entities}") _index_params = { "index_type": "IVF_SQ8", "params": { "nlist": 64 }, "metric_type": "L2" } t0 = time.time() index, _ = collection_w.create_index( field_name=ct.default_float_vec_field_name, index_params=_index_params, name=cf.gen_unique_str()) tt = time.time() - t0 log.info(f"assert index: {tt}") assert len(collection_w.indexes) == 1 # search t0 = time.time() collection_w.load() tt = time.time() - t0 log.info(f"assert load: {tt}") search_vectors = cf.gen_vectors(1, ct.default_dim) t0 = time.time() res_1, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1) tt = time.time() - t0 log.info(f"assert search: {tt}") # query term_expr = f'{ct.default_int64_field_name} in [1001,1201,4999,2999]' t0 = time.time() res, _ = collection_w.query(term_expr) tt = time.time() - t0 log.info(f"assert query result {len(res)}: {tt}")
def test_simd_compat_e2e(self, simd_id): """ steps 1. [test_milvus_install]: set up milvus with customized simd configured 2. [test_simd_compat_e2e]: verify milvus is working well 4. [test_milvus_cleanup]: delete milvus instances in teardown """ simd = supported_simd_types[simd_id] log.info(f"start to install milvus with simd {simd}") release_name, host, port = _install_milvus(simd) self.release_name = release_name assert host is not None conn = connections.connect("default", host=host, port=port) assert conn is not None mil = MilvusSys(alias="default") log.info(f"milvus build version: {mil.build_version}") log.info(f"milvus simdType: {mil.simd_type}") assert str(mil.simd_type).lower() in [ simd_type.lower() for simd_type in supported_simd_types[simd_id:] ] log.info(f"start to e2e verification: {simd}") # create name = cf.gen_unique_str("compat") t0 = time.time() collection_w = ApiCollectionWrapper() collection_w.init_collection(name=name, schema=cf.gen_default_collection_schema(), timeout=40) tt = time.time() - t0 assert collection_w.name == name entities = collection_w.num_entities log.info(f"assert create collection: {tt}, init_entities: {entities}") # insert data = cf.gen_default_list_data() t0 = time.time() _, res = collection_w.insert(data) tt = time.time() - t0 log.info(f"assert insert: {tt}") assert res # flush t0 = time.time() assert collection_w.num_entities == len(data[0]) + entities tt = time.time() - t0 entities = collection_w.num_entities log.info(f"assert flush: {tt}, entities: {entities}") # search collection_w.load() search_vectors = cf.gen_vectors(1, ct.default_dim) search_params = {"metric_type": "L2", "params": {"nprobe": 16}} t0 = time.time() res_1, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1) tt = time.time() - t0 log.info(f"assert search: {tt}") assert len(res_1) == 1 collection_w.release() # index d = cf.gen_default_list_data() collection_w.insert(d) log.info(f"assert index entities: {collection_w.num_entities}") _index_params = { "index_type": "IVF_SQ8", "params": { "nlist": 64 }, "metric_type": "L2" } t0 = time.time() index, _ = collection_w.create_index( field_name=ct.default_float_vec_field_name, index_params=_index_params, name=cf.gen_unique_str()) tt = time.time() - t0 log.info(f"assert index: {tt}") assert len(collection_w.indexes) == 1 # search t0 = time.time() collection_w.load() tt = time.time() - t0 log.info(f"assert load: {tt}") search_vectors = cf.gen_vectors(1, ct.default_dim) t0 = time.time() res_1, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1) tt = time.time() - t0 log.info(f"assert search: {tt}") # query term_expr = f'{ct.default_int64_field_name} in [1001,1201,4999,2999]' t0 = time.time() res, _ = collection_w.query(term_expr) tt = time.time() - t0 log.info(f"assert query result {len(res)}: {tt}")
def test_chaos_data_consist(self, connection, chaos_yaml): """ target: verify data consistence after chaos injected and recovered method: 1. create a collection, insert some data, search and query 2. inject a chaos object 3. reconnect to service 4. verify a) data entities persists, index persists, b) search and query results persist expected: collection data and results persist """ c_name = cf.gen_unique_str('chaos_collection_') nb = 5000 i_name = cf.gen_unique_str('chaos_index_') index_params = { "index_type": "IVF_SQ8", "metric_type": "L2", "params": { "nlist": 64 } } # create t0 = datetime.datetime.now() collection_w = ApiCollectionWrapper() collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema()) tt = datetime.datetime.now() - t0 log.info(f"assert create: {tt}") assert collection_w.name == c_name # insert data = cf.gen_default_list_data(nb=nb) t0 = datetime.datetime.now() _, res = collection_w.insert(data) tt = datetime.datetime.now() - t0 log.info(f"assert insert: {tt}") assert res # flush t0 = datetime.datetime.now() assert collection_w.num_entities == nb tt = datetime.datetime.now() - t0 log.info(f"assert flush: {tt}") # search collection_w.load() search_vectors = cf.gen_vectors(1, ct.default_dim) t0 = datetime.datetime.now() search_params = {"metric_type": "L2", "params": {"nprobe": 16}} search_res, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1) tt = datetime.datetime.now() - t0 log.info(f"assert search: {tt}") assert len(search_res) == 1 # index t0 = datetime.datetime.now() index, _ = collection_w.create_index( field_name=ct.default_float_vec_field_name, index_params=index_params, name=i_name) tt = datetime.datetime.now() - t0 log.info(f"assert index: {tt}") assert len(collection_w.indexes) == 1 # query term_expr = f'{ct.default_int64_field_name} in [1001,1201,999,99]' t0 = datetime.datetime.now() query_res, _ = collection_w.query(term_expr) tt = datetime.datetime.now() - t0 log.info(f"assert query: {tt}") assert len(query_res) == 4 # reboot a pod reboot_pod(chaos_yaml) # parse chaos object chaos_config = cc.gen_experiment_config(chaos_yaml) meta_name = chaos_config.get('metadata', None).get('name', None) # wait all pods ready log.info( f"wait for pods in namespace {constants.CHAOS_NAMESPACE} with label app.kubernetes.io/instance={meta_name}" ) wait_pods_ready(constants.CHAOS_NAMESPACE, f"app.kubernetes.io/instance={meta_name}") log.info( f"wait for pods in namespace {constants.CHAOS_NAMESPACE} with label release={meta_name}" ) wait_pods_ready(constants.CHAOS_NAMESPACE, f"release={meta_name}") log.info("all pods are ready") # reconnect if needed sleep(constants.WAIT_PER_OP * 3) reconnect(connections, alias='default') # verify collection persists assert utility.has_collection(c_name) log.info("assert collection persists") collection_w2 = ApiCollectionWrapper() collection_w2.init_collection(c_name) # verify data persist assert collection_w2.num_entities == nb log.info("assert data persists") # verify index persists assert collection_w2.has_index(i_name) log.info("assert index persists") # verify search results persist collection_w2.load() search_res, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1) tt = datetime.datetime.now() - t0 log.info(f"assert search: {tt}") assert len(search_res) == 1 # verify query results persist query_res2, _ = collection_w2.query(term_expr) assert len(query_res2) == len(query_res) log.info("assert query result persists")
def test_customize_segment_size(self, seg_size, seg_count): """ steps """ log.info(f"start to install milvus with segment size {seg_size}") release_name, host, port = _install_milvus(seg_size) self.release_name = release_name assert host is not None conn = connections.connect("default", host=host, port=port) assert conn is not None mil = MilvusSys(alias="default") log.info(f"milvus build version: {mil.build_version}") log.info(f"start to e2e verification: {seg_size}") # create name = cf.gen_unique_str("segsiz") t0 = time.time() collection_w = ApiCollectionWrapper() collection_w.init_collection(name=name, schema=cf.gen_default_collection_schema(), timeout=40) tt = time.time() - t0 assert collection_w.name == name entities = collection_w.num_entities log.info(f"assert create collection: {tt}, init_entities: {entities}") # insert nb = 50000 data = cf.gen_default_list_data(nb=nb) t0 = time.time() _, res = collection_w.insert(data) tt = time.time() - t0 log.info(f"assert insert: {tt}") assert res # insert 2 million entities rounds = 40 for _ in range(rounds - 1): _, res = collection_w.insert(data) entities = collection_w.num_entities assert entities == nb * rounds # load collection_w.load() utility_wrap = ApiUtilityWrapper() segs, _ = utility_wrap.get_query_segment_info(collection_w.name) log.info(f"assert segments: {len(segs)}") assert len(segs) == seg_count # search search_vectors = cf.gen_vectors(1, ct.default_dim) search_params = {"metric_type": "L2", "params": {"nprobe": 16}} t0 = time.time() res_1, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1, timeout=30) tt = time.time() - t0 log.info(f"assert search: {tt}") assert len(res_1) == 1 collection_w.release() # index d = cf.gen_default_list_data() collection_w.insert(d) log.info(f"assert index entities: {collection_w.num_entities}") _index_params = { "index_type": "IVF_SQ8", "params": { "nlist": 64 }, "metric_type": "L2" } t0 = time.time() index, _ = collection_w.create_index( field_name=ct.default_float_vec_field_name, index_params=_index_params, name=cf.gen_unique_str(), timeout=120) tt = time.time() - t0 log.info(f"assert index: {tt}") assert len(collection_w.indexes) == 1 # search t0 = time.time() collection_w.load() tt = time.time() - t0 log.info(f"assert load: {tt}") search_vectors = cf.gen_vectors(1, ct.default_dim) t0 = time.time() res_1, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param=search_params, limit=1, timeout=30) tt = time.time() - t0 log.info(f"assert search: {tt}") # query term_expr = f'{ct.default_int64_field_name} in [1001,1201,4999,2999]' t0 = time.time() res, _ = collection_w.query(term_expr, timeout=30) tt = time.time() - t0 log.info(f"assert query result {len(res)}: {tt}")
def test_chaos_data_consist(self, connection, chaos_yaml): c_name = cf.gen_unique_str('chaos_collection_') nb = 5000 i_name = cf.gen_unique_str('chaos_index_') index_params = { "index_type": "IVF_SQ8", "metric_type": "L2", "params": { "nlist": 64 } } # create t0 = datetime.datetime.now() collection_w = ApiCollectionWrapper() collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema()) tt = datetime.datetime.now() - t0 log.debug(f"assert create: {tt}") assert collection_w.name == c_name # insert data = cf.gen_default_list_data(nb=nb) t0 = datetime.datetime.now() _, res = collection_w.insert(data) tt = datetime.datetime.now() - t0 log.debug(f"assert insert: {tt}") assert res # flush t0 = datetime.datetime.now() assert collection_w.num_entities == nb tt = datetime.datetime.now() - t0 log.debug(f"assert flush: {tt}") # search collection_w.load() search_vectors = cf.gen_vectors(1, ct.default_dim) t0 = datetime.datetime.now() search_res, _ = collection_w.search( data=search_vectors, anns_field=ct.default_float_vec_field_name, param={"nprobe": 16}, limit=1) tt = datetime.datetime.now() - t0 log.debug(f"assert search: {tt}") assert len(search_res) == 1 # index t0 = datetime.datetime.now() index, _ = collection_w.create_index( field_name=ct.default_float_vec_field_name, index_params=index_params, name=i_name) tt = datetime.datetime.now() - t0 log.debug(f"assert index: {tt}") assert len(collection_w.indexes) == 1 # query term_expr = f'{ct.default_int64_field_name} in [3001,4001,4999,2999]' t0 = datetime.datetime.now() query_res, _ = collection_w.query(term_expr) tt = datetime.datetime.now() - t0 log.debug(f"assert query: {tt}") assert len(query_res) == 4 # reboot a pod reboot_pod(chaos_yaml) # reconnect if needed sleep(constants.WAIT_PER_OP * 4) reconnect(connections, self.host, self.port) # verify collection persists assert utility.has_collection(c_name) log.debug("assert collection persists") collection_w2 = ApiCollectionWrapper() collection_w2.init_collection(c_name) # verify data persist assert collection_w2.num_entities == nb log.debug("assert data persists") # verify index persists assert collection_w2.has_index(i_name) log.debug("assert index persists") # verify search results persist # verify query results persist query_res2, _ = collection_w2.query(term_expr) assert query_res2 == query_res log.debug("assert query result persists")