Example #1
0
def assert_statistic(checkers, expectations={}):
    for k in checkers.keys():
        # expect succ if no expectations
        succ_rate = checkers[k].succ_rate()
        if expectations.get(k, '') == constants.FAIL:
            log.debug(f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {checkers[k].total()}")
            delayed_assert.expect(succ_rate < 0.49)
        else:
            log.debug(f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {checkers[k].total()}")
            delayed_assert.expect(succ_rate > 0.90)
Example #2
0
 def teardown(self):
     chaos_opt = ChaosOpt(self._chaos_config['kind'])
     meta_name = self._chaos_config.get('metadata', None).get('name', None)
     chaos_opt.delete_chaos_object(meta_name, raise_ex=False)
     for k, ch in self.health_checkers.items():
         ch.terminate()
         log.debug(f"tear down: checker {k} terminated")
     sleep(2)
     for k, t in self.checker_threads.items():
         log.debug(f"Thread {k} is_alive(): {t.is_alive()}")
Example #3
0
    def test_chaos(self, chaos_yaml):
        # start the monitor threads to check the milvus ops
        start_monitor_threads(self.health_checkers)

        # parse chaos object
        print("test.start")
        chaos_config = gen_experiment_config(chaos_yaml)
        log.debug(chaos_config)

        # parse the test expectations in testcases.yaml
        self.parser_testcase_config(chaos_yaml)

        # wait 120s
        sleep(1)

        # assert statistic:all ops 100% succ
        assert_statistic(self.health_checkers)

        # reset counting
        reset_counting(self.health_checkers)

        # apply chaos object
        # chaos_opt = ChaosOpt(chaos_config['kind'])
        # chaos_opt.create_chaos_object(chaos_config)

        # wait 120s
        sleep(1)

        # assert statistic
        assert_statistic(self.health_checkers,
                         expectations={
                             Op.create: self.expect_create,
                             Op.insert: self.expect_insert,
                             Op.flush: self.expect_flush,
                             Op.index: self.expect_index,
                             Op.search: self.expect_search,
                             Op.query: self.expect_query
                         })
        #
        # delete chaos
        # meta_name = chaos_config.get('metadata', None).get('name', None)
        # chaos_opt.delete_chaos_object(meta_name)

        # reset counting again
        reset_counting(self.health_checkers)

        # wait 300s (varies by feature)
        sleep(1)

        # assert statistic: all ops success again
        assert_statistic(self.health_checkers)

        # terminate thread
        for ch in self.health_checkers.values():
            ch.terminate()
Example #4
0
    def test_delete_insert_same_id_sealed(self, to_query):
        """
        target: test insert same id entity after delete from sealed data
        method: 1.create and insert with flush
                2.load and query with the id
                3.delte the id entity
                4.insert new entity with the same id and flush
                5.query the id
        expected: Verify that the query gets the newly inserted entity
        """
        # init collection and insert data without flush
        collection_w = self.init_collection_wrap(
            name=cf.gen_unique_str(prefix))

        # insert
        df = cf.gen_default_dataframe_data(1000)
        collection_w.insert(df)
        log.debug(collection_w.num_entities)

        # load and query
        collection_w.load()
        res = df.iloc[:1, :1].to_dict('records')
        default_search_params = {"metric_type": "L2", "params": {"nprobe": 16}}
        collection_w.search(data=[df[ct.default_float_vec_field_name][0]],
                            anns_field=ct.default_float_vec_field_name,
                            param=default_search_params,
                            limit=1)
        collection_w.query(tmp_expr,
                           check_task=CheckTasks.check_query_results,
                           check_items={'exp_res': res})

        # delete
        collection_w.delete(tmp_expr)
        if to_query:
            collection_w.query(tmp_expr,
                               check_task=CheckTasks.check_query_empty)

        # re-insert
        df_new = cf.gen_default_dataframe_data(nb=1)
        collection_w.insert(df_new)
        log.debug(collection_w.num_entities)

        # re-query
        res = df_new.iloc[[0], [0, 2]].to_dict('records')
        collection_w.query(tmp_expr,
                           output_fields=[ct.default_float_vec_field_name],
                           check_task=CheckTasks.check_query_results,
                           check_items={
                               'exp_res': res,
                               'with_vec': True
                           })
        collection_w.search(data=[df_new[ct.default_float_vec_field_name][0]],
                            anns_field=ct.default_float_vec_field_name,
                            param=default_search_params,
                            limit=1)
Example #5
0
    def test_chaos_memory_stress_querynode(self, connection, chaos_yaml):
        """
        target: explore query node behavior after memory stress chaos injected and recovered
        method: 1. Create a collection, insert some data
                2. Inject memory stress chaos
                3. Start a threas to load, search and query
                4. After chaos duration, check query search success rate
                5. Delete chaos or chaos finished finally
        expected: 1.If memory is insufficient, querynode is OOMKilled and available after restart
                  2.If memory is sufficient, succ rate of query and search both are 1.0
        """
        c_name = 'chaos_memory_nx6DNW4q'
        collection_w = ApiCollectionWrapper()
        collection_w.init_collection(c_name)
        log.debug(collection_w.schema)
        log.debug(collection_w._shards_num)

        # apply memory stress chaos
        chaos_config = gen_experiment_config(chaos_yaml)
        log.debug(chaos_config)
        chaos_res = CusResource(kind=chaos_config['kind'],
                                group=constants.CHAOS_GROUP,
                                version=constants.CHAOS_VERSION,
                                namespace=constants.CHAOS_NAMESPACE)
        chaos_res.create(chaos_config)
        log.debug("chaos injected")
        duration = chaos_config.get('spec').get('duration')
        duration = duration.replace('h', '*3600+').replace('m', '*60+').replace('s', '*1+') + '+0'
        meta_name = chaos_config.get('metadata').get('name')
        # wait memory stress
        sleep(constants.WAIT_PER_OP * 2)

        # try to do release, load, query and serach in a duration time loop
        try:
            start = time.time()
            while time.time() - start < eval(duration):
                collection_w.release()
                collection_w.load()

                term_expr = f'{ct.default_int64_field_name} in {[random.randint(0, 100)]}'
                query_res, _ = collection_w.query(term_expr)
                assert len(query_res) == 1

                search_res, _ = collection_w.search(cf.gen_vectors(1, ct.default_dim),
                                                    ct.default_float_vec_field_name,
                                                    ct.default_search_params, ct.default_limit)
                log.debug(search_res[0].ids)
                assert len(search_res[0].ids) == ct.default_limit

        except Exception as e:
            raise Exception(str(e))

        finally:
            chaos_res.delete(meta_name)
    def test_expand_index_node(self):
        """
        target: test expand indexNode from 1 to 2
        method: 1.deploy two indexNode
                2.create index with two indexNode
                3.expand indexNode from 1 to 2
                4.create index with one indexNode
        expected: The cost of one indexNode is about twice that of two indexNodes
        """
        release_name = "scale-index"
        env = HelmEnv(release_name=release_name)
        host = env.helm_install_cluster_milvus()

        # connect
        connections.add_connection(default={"host": host, "port": 19530})
        connections.connect(alias='default')

        data = cf.gen_default_dataframe_data(nb)

        # create
        c_name = "index_scale_one"
        collection_w = ApiCollectionWrapper()
        # collection_w.init_collection(name=c_name)
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema())
        # insert
        loop = 10
        for i in range(loop):
            collection_w.insert(data)
        assert collection_w.num_entities == nb * loop

        # create index on collection one and two
        start = datetime.datetime.now()
        collection_w.create_index(ct.default_float_vec_field_name,
                                  default_index_params)
        assert collection_w.has_index()[0]
        t0 = datetime.datetime.now() - start

        log.debug(f't0: {t0}')

        collection_w.drop_index()
        assert not collection_w.has_index()[0]

        # expand indexNode from 1 to 2
        env.helm_upgrade_cluster_milvus(indexNode=2)

        start = datetime.datetime.now()
        collection_w.create_index(ct.default_float_vec_field_name,
                                  default_index_params)
        assert collection_w.has_index()[0]
        t1 = datetime.datetime.now() - start

        log.debug(f't1: {t1}')
        assert round(t0 / t1) == 2
Example #7
0
    def test_memory_stress_replicas_cross_group_load_balance(self, prepare_collection):
        """
        target: test apply memory stress on one group and no load balance cross replica groups
        method: 1.Limit all querynodes memory 6Gi
                2.Create and insert 1000,000 entities
                3.Load collection with two replicas
                4.Apply memory stress on one grooup 80%
        expected: Verify that load balancing across groups is not occurring
        """
        collection_w = prepare_collection
        utility_w = ApiUtilityWrapper()
        release_name = "mic-memory"

        # load and searchc
        collection_w.load(replica_number=2)
        progress, _ = utility_w.loading_progress(collection_w.name)
        assert progress["loading_progress"] == "100%"
        seg_info_before, _ = utility_w.get_query_segment_info(collection_w.name)

        # get the replica and random chaos querynode
        replicas, _ = collection_w.get_replicas()
        group_nodes = list(replicas.groups[0].group_nodes)
        label = f"app.kubernetes.io/instance={release_name}, app.kubernetes.io/component=querynode"
        querynode_id_pod_pair = get_querynode_id_pod_pairs("chaos-testing", label)
        group_nodes_pod = [querynode_id_pod_pair[node_id] for node_id in group_nodes]

        # apply memory stress
        chaos_config = gen_experiment_config("./chaos_objects/memory_stress/chaos_replicas_memory_stress_pods.yaml")
        chaos_config['spec']['selector']['pods']['chaos-testing'] = group_nodes_pod
        log.debug(chaos_config)
        chaos_res = CusResource(kind=chaos_config['kind'],
                                group=constants.CHAOS_GROUP,
                                version=constants.CHAOS_VERSION,
                                namespace=constants.CHAOS_NAMESPACE)
        chaos_res.create(chaos_config)
        log.debug(f"Apply memory stress on querynode {group_nodes}, pod {group_nodes_pod}")

        duration = chaos_config.get('spec').get('duration')
        duration = duration.replace('h', '*3600+').replace('m', '*60+').replace('s', '*1+') + '+0'
        sleep(eval(duration))

        chaos_res.delete(metadata_name=chaos_config.get('metadata', None).get('name', None))

        # Verfiy auto load loadbalance
        seg_info_after, _ = utility_w.get_query_segment_info(collection_w.name)
        seg_distribution_before = cf.get_segment_distribution(seg_info_before)
        seg_distribution_after = cf.get_segment_distribution(seg_info_after)
        for node_id in group_nodes:
            assert len(seg_distribution_before[node_id]) == len(seg_distribution_after[node_id])

        search_res, _ = collection_w.search(cf.gen_vectors(1, dim=self.dim),
                                            ct.default_float_vec_field_name, ct.default_search_params,
                                            ct.default_limit, timeout=120)
        assert 1 == len(search_res) and ct.default_limit == len(search_res[0])
Example #8
0
def construct_from_data(collection_name,
                        h5_path='./testdata/random_data_10000.h5'):
    import pandas as pd
    df = pd.read_hdf(h5_path, key='df')
    collection_w = ApiCollectionWrapper()
    collection_w.construct_from_dataframe(
        collection_name,
        dataframe=df,
        primary_field=ct.default_int64_field_name)
    log.debug(collection_w.num_entities)
    return collection_w
Example #9
0
 def test_query_expr_non_constant_array_term(self, constant):
     """
     target: test query with non-constant array term expr
     method: query with non-constant array expr
     expected: raise exception
     """
     collection_w, vectors, _, = self.init_collection_general(prefix, insert_data=True)
     term_expr = f'{ct.default_int64_field_name} in [{constant}]'
     log.debug(term_expr)
     error = {ct.err_code: 1, ct.err_msg: "unsupported leaf node"}
     collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
Example #10
0
 def inner_wrapper(*args, **kwargs):
     try:
         res = func(*args, **kwargs)
         log.debug("(api_response) Response : %s " %
                   str(res)[0:log_row_length])
         return res, True
     except Exception as e:
         log.error(traceback.format_exc())
         log.error("(api_response) [Milvus API Exception]%s: %s" %
                   (str(func), str(e)[0:log_row_length]))
         return Error(e), False
Example #11
0
 def run_task(self):
     if self.recheck_failed_task and self.failed_tasks:
         self.c_name = self.failed_tasks.pop(0)
         log.debug(f"check failed task: {self.c_name}")
     else:
         self.c_name = cf.gen_unique_str("BulkLoadChecker_")
     self.c_wrap.init_collection(name=self.c_name, schema=self.schema)
     # import data
     task_ids, completed = self.bulk_load()
     if not completed:
         self.failed_tasks.append(self.c_name)
     return task_ids, completed
Example #12
0
 def test_query_output_binary_vec_field(self):
     """
     target: test query with binary vec output field
     method: specify binary vec field as output field
     expected: return primary field and binary vec field
     """
     collection_w, vectors = self.init_collection_general(prefix, insert_data=True, is_binary=True)[0:2]
     log.debug(collection_w.schema)
     fields = [[ct.default_binary_vec_field_name], [ct.default_int64_field_name, ct.default_binary_vec_field_name]]
     for output_fields in fields:
         res, _ = collection_w.query(default_term_expr, output_fields=output_fields)
         assert list(res[0].keys()) == fields[-1]
Example #13
0
    def test_load_default(self):
        name = 'load_test_collection_1'
        name2 = 'load_test_collection_2'
        # create
        # collection_w = self.init_collection_wrap(name=name)
        # collection_w2 = self.init_collection_wrap(name=name2)
        # assert collection_w.name == name

        for i in range(50):
            name = f"load_collection2_{i}"
            self.init_collection_wrap(name=name)
        log.debug(f"total collections: {len(utility.list_collections())}")
Example #14
0
def api_request(_list, **kwargs):
    if isinstance(_list, list):
        func = _list[0]
        if callable(func):
            arg = []
            if len(_list) > 1:
                for a in _list[1:]:
                    arg.append(a)
            log.debug("(api_request) Request: [%s] args: %s, kwargs: %s" %
                      (str(func), str(arg)[0:log_row_length], str(kwargs)))
            return func(*arg, **kwargs)
    return False, False
Example #15
0
 def test_delete_expr_none(self):
     """
     target: test delete with None expr
     method: delete with None expr
     expected: todo
     """
     # init collection with tmp_nb default data
     collection_w = self.init_collection_general(prefix,
                                                 nb=tmp_nb,
                                                 insert_data=True)[0]
     collection_w.delete(None)
     log.debug(collection_w.num_entities)
    def test_chaos_memory_stress_indexnode(self, connection, chaos_yaml):
        """
        target: test inject memory stress into indexnode
        method: 1.Deploy milvus and limit indexnode memory resource 1Gi
                2.Create collection and insert some data
                3.Create index
                4.Inject memory stress chaos 512Mi
        expected:
        """
        # init collection and insert 250 nb
        nb = 50000  # vector size: 512*4*nb about 100Mi and create index need 600Mi memory
        dim = 512
        c_name = cf.gen_unique_str('chaos_memory')
        index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 128}}

        collection_w = ApiCollectionWrapper()
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema(dim=dim), shards_num=1)

        # insert 256000 512 dim entities, size 512Mi
        for i in range(2):
            t0_insert = datetime.datetime.now()
            df = cf.gen_default_dataframe_data(nb=nb // 2, dim=dim)
            res = collection_w.insert(df)[0]
            assert res.insert_count == nb // 2
            # log.info(f'After {i + 1} insert, num_entities: {collection_w.num_entities}')
            tt_insert = datetime.datetime.now() - t0_insert
            log.info(f"{i} insert data cost: {tt_insert}")

        # flush
        t0_flush = datetime.datetime.now()
        assert collection_w.num_entities == nb
        tt_flush = datetime.datetime.now() - t0_flush
        log.info(f'flush {nb * 10} entities cost: {tt_flush}')

        # create index
        t0_index = datetime.datetime.now()
        index, _ = collection_w.create_index(field_name=ct.default_float_vec_field_name,
                                             index_params=index_params)
        tt_index = datetime.datetime.now() - t0_index

        log.info(f"create index cost: {tt_index}")
        log.info(collection_w.indexes)

        # indexNode start build index, inject chaos memory stress
        chaos_config = gen_experiment_config(chaos_yaml)
        log.debug(chaos_config)
        chaos_res = CusResource(kind=chaos_config['kind'],
                                group=constants.CHAOS_GROUP,
                                version=constants.CHAOS_VERSION,
                                namespace=constants.CHAOS_NAMESPACE)
        chaos_res.create(chaos_config)
        log.debug("inject chaos")
    def test_chaos_memory_stress_etcd(self, chaos_yaml):
        """
        target: test inject memory stress into all etcd pods
        method: 1.Deploy milvus and limit etcd memory resource 1Gi witl all mode
                2.Continuously and concurrently do milvus operations
                3.Inject memory stress chaos 51024Mi
                4.After duration, delete chaos stress
        expected: Verify milvus operation succ rate
        """
        mic_checkers = {
            Op.create: CreateChecker(),
            Op.insert: InsertFlushChecker(),
            Op.flush: InsertFlushChecker(flush=True),
            Op.index: IndexChecker(),
            Op.search: SearchChecker(),
            Op.query: QueryChecker()
        }
        # start thread keep running milvus op
        start_monitor_threads(mic_checkers)

        # parse chaos object
        chaos_config = cc.gen_experiment_config(chaos_yaml)
        # duration = chaos_config["spec"]["duration"]
        meta_name = chaos_config.get('metadata').get('name')
        duration = chaos_config.get('spec').get('duration')

        # apply chaos object
        chaos_res = CusResource(kind=chaos_config['kind'],
                                group=constants.CHAOS_GROUP,
                                version=constants.CHAOS_VERSION,
                                namespace=constants.CHAOS_NAMESPACE)
        chaos_res.create(chaos_config)
        log.info("Chaos injected")

        # convert string duration time to a int number in seconds
        if isinstance(duration, str):
            duration = duration.replace('h', '*3600+').replace(
                'm', '*60+').replace('s', '*1+') + '+0'
        else:
            log.error("Duration must be string type")

        # Delete experiment after it's over
        timer = threading.Timer(interval=eval(duration),
                                function=chaos_res.delete,
                                args=(meta_name, False))
        timer.start()
        timer.join()

        # output milvus op succ rate
        for k, ch in mic_checkers.items():
            log.debug(f'Succ rate of {k.value}: {ch.succ_rate()}')
            assert ch.succ_rate() == 1.0
Example #18
0
 def get(self, metadata_name):
     """get a customer resources by name in k8s"""
     try:
         config.load_kube_config()
         api_instance = client.CustomObjectsApi()
         api_response = api_instance.get_namespaced_custom_object(self.group, self.version,
                                                                  self.namespace, self.plural,
                                                                  name=metadata_name)
         log.debug(f"get custom resource response: {api_response}")
     except ApiException as e:
         log.error("Exception when calling CustomObjectsApi->get_namespaced_custom_object: %s\n" % e)
         raise Exception(str(e))
     return api_response
Example #19
0
 def list_all(self):
     """list all the customer resources in k8s"""
     pretty = 'true'
     try:
         config.load_kube_config()
         api_instance = client.CustomObjectsApi()
         data = api_instance.list_namespaced_custom_object(self.group, self.version, self.namespace,
                                                           plural=self.plural, pretty=pretty)
         log.debug(f"list custom resource response: {data}")
     except ApiException as e:
         log.error("Exception when calling CustomObjectsApi->list_namespaced_custom_object: %s\n" % e)
         raise Exception(str(e))
     return data
Example #20
0
 def inner_wrapper(*args, **kwargs):
     try:
         res = func(*args, **kwargs)
         log_res = str(res)[0:log_row_length] + '......' if len(
             str(res)) > log_row_length else str(res)
         log.debug("(api_response) : %s " % log_res)
         return res, True
     except Exception as e:
         log_e = str(e)[0:log_row_length] + '......' if len(
             str(e)) > log_row_length else str(e)
         log.error(traceback.format_exc())
         log.error("(api_response) : %s" % log_e)
         return Error(e), False
Example #21
0
 def create(self, body):
     """create or apply a custom resource in k8s"""
     pretty = 'true'
     config.load_kube_config()
     api_instance = client.CustomObjectsApi()
     try:
         api_response = api_instance.create_namespaced_custom_object(self.group, self.version, self.namespace,
                                                                     plural=self.plural, body=body, pretty=pretty)
         log.debug(f"create custom resource response: {api_response}")
     except ApiException as e:
         log.error("Exception when calling CustomObjectsApi->create_namespaced_custom_object: %s\n" % e)
         raise Exception(str(e))
     return api_response
Example #22
0
 def delete(self, metadata_name, raise_ex=True):
     """delete or uninstall a custom resource in k8s"""
     print(metadata_name)
     try:
         config.load_kube_config()
         api_instance = client.CustomObjectsApi()
         data = api_instance.delete_namespaced_custom_object(self.group, self.version, self.namespace, self.plural,
                                                             metadata_name)
         log.debug(f"delete custom resource response: {data}")
     except ApiException as e:
         if raise_ex:
             log.error("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e)
             raise Exception(str(e))
Example #23
0
    def keep_running(self):
        while True:
            c_name = self.c_wrap.name
            res, _ = self.c_wrap.get_replicas()
            # prepare load balance params
            # find a group which has multi nodes
            group_nodes = []
            for g in res.groups:
                if len(g.group_nodes) >= 2:
                    group_nodes = list(g.group_nodes)
                    break
            src_node_id = group_nodes[0]
            dst_node_ids = group_nodes[1:]
            res, _ = self.utility_wrap.get_query_segment_info(c_name)
            segment_distribution = cf.get_segment_distribution(res)
            sealed_segment_ids = segment_distribution[src_node_id]["sealed"]
            # load balance
            t0 = time.time()
            _, result = self.utility_wrap.load_balance(c_name, src_node_id,
                                                       dst_node_ids,
                                                       sealed_segment_ids)
            t1 = time.time()
            # get segments distribution after load balance
            time.sleep(3)
            res, _ = self.utility_wrap.get_query_segment_info(c_name)
            segment_distribution = cf.get_segment_distribution(res)
            sealed_segment_ids_after_load_banalce = segment_distribution[
                src_node_id]["sealed"]
            check_1 = len(
                set(sealed_segment_ids)
                & set(sealed_segment_ids_after_load_banalce)) == 0
            des_sealed_segment_ids = []
            for des_node_id in dst_node_ids:
                des_sealed_segment_ids += segment_distribution[des_node_id][
                    "sealed"]
            # assert sealed_segment_ids is subset of des_sealed_segment_ids
            check_2 = set(sealed_segment_ids).issubset(
                set(des_sealed_segment_ids))

            if result and (check_1 and check_2):
                self.rsp_times.append(t1 - t0)
                self.average_time = (
                    (t1 - t0) + self.average_time * self._succ) / (self._succ +
                                                                   1)
                self._succ += 1
                log.debug(
                    f"load balance success, time: {t1 - t0:.4f}, average_time: {self.average_time:.4f}"
                )
            else:
                self._fail += 1
            sleep(10)
Example #24
0
def list_de_duplication(_list):
    if not isinstance(_list, list):
        log.error("[LIST_DE_DUPLICATION] Type of list(%s) is not a list." % str(_list))
        return _list

    # de-duplication of _list
    result = list(set(_list))

    # Keep the order of the elements unchanged
    result.sort(key=_list.index)

    log.debug("[LIST_DE_DUPLICATION] %s after removing the duplicate elements, the list becomes %s" % (
        str(_list), str(result)))
    return result
Example #25
0
    def test_compact_after_binary_index(self):
        """
        target: test compact after create index
        method: 1.insert binary data into two segments
                2.create binary index
                3.compact
                4.search
        expected: Verify segment info and index info
        """
        # create collection with 1 shard and insert 2 segments
        collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), shards_num=1,
                                                 schema=cf.gen_default_binary_collection_schema())
        for i in range(2):
            df, _ = cf.gen_default_binary_dataframe_data()
            collection_w.insert(data=df)
            assert collection_w.num_entities == (i + 1) * ct.default_nb

        # create index
        collection_w.create_index(ct.default_binary_vec_field_name, ct.default_binary_index)
        log.debug(collection_w.index())

        # load and search
        collection_w.load()
        search_params = {"metric_type": "JACCARD", "params": {"nprobe": 32}}
        search_res_one, _ = collection_w.search(df[ct.default_binary_vec_field_name][:ct.default_nq].to_list(),
                                                ct.default_binary_vec_field_name, search_params, ct.default_limit)

        # compact
        collection_w.compact()
        collection_w.wait_for_compaction_completed()
        c_plans = collection_w.get_compaction_plans(check_task=CheckTasks.check_merge_compact)[0]

        # waiting for handoff completed and search
        cost = 30
        start = time()
        while True:
            sleep(5)
            segment_info = self.utility_wrap.get_query_segment_info(collection_w.name)[0]
            if len(segment_info) != 0 and segment_info[0].segmentID == c_plans.plans[0].target:
                log.debug(segment_info)
                break
            if time() - start > cost:
                raise MilvusException(1, f"Handoff after compact and index cost more than {cost}s")

        # verify search result
        search_res_two, _ = collection_w.search(df[ct.default_binary_vec_field_name][:ct.default_nq].to_list(),
                                                ct.default_binary_vec_field_name, search_params, ct.default_limit)
        assert len(search_res_one) == ct.default_nq
        for hits in search_res_one:
            assert len(hits) == ct.default_limit
Example #26
0
 def test_query(self):
     """
     target: test query
     method: query with term expr
     expected: verify query result
     """
     # create collection, insert default_nb, load collection
     collection_w, vectors, _, = self.init_collection_general(
         prefix, insert_data=True)
     int_values = vectors[0][ct.default_int64_field_name].values.tolist()
     pos = 5
     term_expr = f'{ct.default_int64_field_name} in {int_values[:pos]}'
     res, _ = collection_w.query(term_expr)
     log.debug(res)
Example #27
0
def api_request(_list, **kwargs):
    if isinstance(_list, list):
        func = _list[0]
        if callable(func):
            arg = _list[1:]
            arg_str = str(arg)
            log_arg = arg_str[0:log_row_length] + '......' if len(
                arg_str) > log_row_length else arg_str
            # if enable_traceback == "True":
            if kwargs.get("enable_traceback", True):
                log.debug("(api_request)  : [%s] args: %s, kwargs: %s" %
                          (func.__qualname__, log_arg, str(kwargs)))
            return func(*arg, **kwargs)
    return False, False
Example #28
0
 def test_insert_none(self):
     """
     target: test insert None
     method: data is None
     expected: raise exception
     """
     c_name = cf.gen_unique_str(prefix)
     collection_w = self.init_collection_wrap(name=c_name)
     mutation_res, _ = collection_w.insert(data=None)
     log.debug(f'mutation result: {mutation_res}')
     assert mutation_res.insert_count == 0
     assert len(mutation_res.primary_keys) == 0
     assert collection_w.is_empty
     assert collection_w.num_entities == 0
Example #29
0
def api_request(_list, **kwargs):
    if isinstance(_list, list):
        func = _list[0]
        if callable(func):
            arg = []
            if len(_list) > 1:
                for a in _list[1:]:
                    arg.append(a)
            log_arg = str(arg)[0:log_row_length] + '......' if len(
                str(arg)) > log_row_length else str(arg)
            log.debug("(api_request)  : [%s] args: %s, kwargs: %s" %
                      (func.__qualname__, log_arg, str(kwargs)))
            return func(*arg, **kwargs)
    return False, False
Example #30
0
 def patch(self, metadata_name, body):
     """patch a custom resource in k8s"""
     config.load_kube_config()
     api_instance = client.CustomObjectsApi()
     try:
         api_response = api_instance.patch_namespaced_custom_object(self.group, self.version, self.namespace,
                                                                    plural=self.plural,
                                                                    name=metadata_name,
                                                                    body=body)
         log.debug(f"patch custom resource response: {api_response}")
     except ApiException as e:
         log.error("Exception when calling CustomObjectsApi->patch_namespaced_custom_object: %s\n" % e)
         raise Exception(str(e))
     return api_response