Exemplo n.º 1
0
    def test_shrink_index_node(self):
        """
        target: test shrink indexNode from 2 to 1
        method: 1.deploy two indexNode
                2.create index with two indexNode
                3.shrink indexNode from 2 to 1
                4.create index with 1 indexNode
        expected: The cost of one indexNode is about twice that of two indexNodes
        """
        release_name = "scale-index"
        env = HelmEnv(release_name=release_name, indexNode=2)
        host = env.helm_install_cluster_milvus()

        # connect
        connections.add_connection(default={"host": host, "port": 19530})
        connections.connect(alias='default')

        data = cf.gen_default_dataframe_data(nb)

        # create
        c_name = "index_scale_one"
        collection_w = ApiCollectionWrapper()
        # collection_w.init_collection(name=c_name)
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema())
        # insert
        loop = 10
        for i in range(loop):
            collection_w.insert(data)
        assert collection_w.num_entities == nb * loop

        # create index on collection one and two
        start = datetime.datetime.now()
        collection_w.create_index(ct.default_float_vec_field_name,
                                  default_index_params)
        assert collection_w.has_index()[0]
        t0 = datetime.datetime.now() - start

        log.debug(f'two indexNodes: {t0}')

        collection_w.drop_index()
        assert not collection_w.has_index()[0]

        # expand indexNode from 2 to 1
        env.helm_upgrade_cluster_milvus(indexNode=1)

        start = datetime.datetime.now()
        collection_w.create_index(ct.default_float_vec_field_name,
                                  default_index_params)
        assert collection_w.has_index()[0]
        t1 = datetime.datetime.now() - start

        log.debug(f'one indexNode: {t1}')
        log.debug(t1 / t0)
        assert round(t1 / t0) == 2
Exemplo n.º 2
0
    def test_expand_query_node(self):
        release_name = "scale-query"
        env = HelmEnv(release_name=release_name)
        host = env.helm_install_cluster_milvus()

        # connect
        connections.add_connection(default={"host": host, "port": 19530})
        connections.connect(alias='default')

        # create
        c_name = "query_scale_one"
        collection_w = ApiCollectionWrapper()
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema())
        # insert
        data = cf.gen_default_list_data(ct.default_nb)
        mutation_res, _ = collection_w.insert(data)
        assert mutation_res.insert_count == ct.default_nb
        # # create index
        # collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
        # assert collection_w.has_index()[0]
        # assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name,
        #                                         default_index_params)
        collection_w.load()
        # vectors = [[random.random() for _ in range(ct.default_dim)] for _ in range(5)]
        res1, _ = collection_w.search(data[-1][:5],
                                      ct.default_float_vec_field_name,
                                      ct.default_search_params,
                                      ct.default_limit)

        # scale queryNode pod
        env.helm_upgrade_cluster_milvus(queryNode=2)

        c_name_2 = "query_scale_two"
        collection_w2 = ApiCollectionWrapper()
        collection_w2.init_collection(
            name=c_name_2, schema=cf.gen_default_collection_schema())
        collection_w2.insert(data)
        assert collection_w2.num_entities == ct.default_nb
        collection_w2.load()
        res2, _ = collection_w2.search(data[-1][:5],
                                       ct.default_float_vec_field_name,
                                       ct.default_search_params,
                                       ct.default_limit)

        assert res1[0].ids == res2[0].ids
Exemplo n.º 3
0
    def test_expand_data_node(self):
        """
        target: test create and insert api after expand dataNode pod
        method: 1.create collection a and insert df
                2.expand dataNode pod from 1 to 2
                3.verify collection a property and verify create and insert of new collection
        expected: two collection create and insert op are both correctly
        """
        # deploy all nodes one pod cluster milvus with helm
        release_name = "scale-data"
        env = HelmEnv(release_name=release_name)
        host = env.helm_install_cluster_milvus()

        # connect
        connections.add_connection(default={"host": host, "port": 19530})
        connections.connect(alias='default')
        # create
        c_name = cf.gen_unique_str(prefix)
        collection_w = ApiCollectionWrapper()
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema())
        # # insert
        data = cf.gen_default_list_data(ct.default_nb)
        mutation_res, _ = collection_w.insert(data)
        assert mutation_res.insert_count == ct.default_nb
        # scale dataNode to 2 pods
        env.helm_upgrade_cluster_milvus(dataNode=2)
        # after scale, assert data consistent
        assert utility.has_collection(c_name)
        assert collection_w.num_entities == ct.default_nb
        # assert new operations
        new_cname = cf.gen_unique_str(prefix)
        new_collection_w = ApiCollectionWrapper()
        new_collection_w.init_collection(
            name=new_cname, schema=cf.gen_default_collection_schema())
        new_mutation_res, _ = new_collection_w.insert(data)
        assert new_mutation_res.insert_count == ct.default_nb
        assert new_collection_w.num_entities == ct.default_nb
        # assert old collection ddl
        mutation_res_2, _ = collection_w.insert(data)
        assert mutation_res.insert_count == ct.default_nb
        assert collection_w.num_entities == ct.default_nb * 2

        collection_w.drop()
        new_collection_w.drop()
Exemplo n.º 4
0
    def test_shrink_proxy(self):
        """
        target: test shrink proxy pod from 2 to 1
        method: 1.deploy two proxy node
                2.e2e test
                3.shrink proxy pods
                4.e2e test
        expected:
        """
        # deploy all nodes one pod cluster milvus with helm
        release_name = "scale-proxy"
        env = HelmEnv(release_name=release_name, proxy=2)
        host = env.helm_install_cluster_milvus()

        c_name = cf.gen_unique_str(prefix)
        sc.e2e_milvus(host, c_name)

        # scale proxy
        env.helm_upgrade_cluster_milvus(proxy=1)

        # c_name_2 = cf.gen_unique_str(prefix)
        sc.e2e_milvus(host, c_name, collection_exist=True)
Exemplo n.º 5
0
    def test_expand_proxy(self):
        """
        target: test milvus operation after proxy expand
        method: 1.deploy two proxy pods
                2.milvus e2e test
                3.expand proxy pod from 1 to 2
                4.milvus e2e test
        expected: 1.verify data consistent and func work
        """
        # deploy all nodes one pod cluster milvus with helm
        release_name = "scale-proxy"
        env = HelmEnv(release_name=release_name)
        host = env.helm_install_cluster_milvus()

        c_name = cf.gen_unique_str(prefix)
        sc.e2e_milvus(host, c_name)

        # scale proxy
        env.helm_upgrade_cluster_milvus(proxy=2)

        # c_name_2 = cf.gen_unique_str(prefix)
        sc.e2e_milvus(host, c_name, collection_exist=True)
Exemplo n.º 6
0
    def test_shrink_data_node(self):
        """
        target: test shrink dataNode from 2 to 1
        method: 1.create collection and insert df 2. shrink dataNode 3.insert df
        expected: verify the property of collection which channel on shrink pod
        """
        release_name = "scale-data"
        env = HelmEnv(release_name=release_name, dataNode=2)
        host = env.helm_install_cluster_milvus(
            image_pull_policy=constants.IF_NOT_PRESENT)

        # connect
        connections.add_connection(default={"host": host, "port": 19530})
        connections.connect(alias='default')

        c_name = "data_scale_one"
        data = cf.gen_default_list_data(ct.default_nb)
        collection_w = ApiCollectionWrapper()
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema())
        mutation_res, _ = collection_w.insert(data)
        assert mutation_res.insert_count == ct.default_nb
        assert collection_w.num_entities == ct.default_nb

        c_name_2 = "data_scale_two"
        collection_w2 = ApiCollectionWrapper()
        collection_w2.init_collection(
            name=c_name_2, schema=cf.gen_default_collection_schema())
        mutation_res2, _ = collection_w2.insert(data)
        assert mutation_res2.insert_count == ct.default_nb
        assert collection_w2.num_entities == ct.default_nb

        env.helm_upgrade_cluster_milvus(dataNode=1)

        assert collection_w.num_entities == ct.default_nb
        mutation_res2, _ = collection_w2.insert(data)
        assert collection_w2.num_entities == ct.default_nb * 2
        collection_w.drop()
        collection_w2.drop()
Exemplo n.º 7
0
    def test_shrink_query_node(self):
        """
        target: test shrink queryNode from 2 to 1
        method: 1.deploy two queryNode
                2.search two collections in two queryNode
                3.upgrade queryNode from 2 to 1
                4.search second collection
        expected: search result is correct
        """
        # deploy
        release_name = "scale-query"
        env = HelmEnv(release_name=release_name, queryNode=2)
        host = env.helm_install_cluster_milvus(
            image_pull_policy=constants.IF_NOT_PRESENT)

        # connect
        connections.add_connection(default={"host": host, "port": 19530})
        connections.connect(alias='default')

        # collection one
        data = cf.gen_default_list_data(nb)
        c_name = "query_scale_one"
        collection_w = ApiCollectionWrapper()
        collection_w.init_collection(name=c_name,
                                     schema=cf.gen_default_collection_schema())
        collection_w.insert(data)
        assert collection_w.num_entities == nb
        collection_w.load()
        res1, _ = collection_w.search(data[-1][:nq],
                                      ct.default_float_vec_field_name,
                                      ct.default_search_params,
                                      ct.default_limit)
        assert res1[0].ids[0] == data[0][0]

        # collection two
        c_name_2 = "query_scale_two"
        collection_w2 = ApiCollectionWrapper()
        collection_w2.init_collection(
            name=c_name_2, schema=cf.gen_default_collection_schema())
        collection_w2.insert(data)
        assert collection_w2.num_entities == nb
        collection_w2.load()
        res2, _ = collection_w2.search(data[-1][:nq],
                                       ct.default_float_vec_field_name,
                                       ct.default_search_params,
                                       ct.default_limit)
        assert res2[0].ids[0] == data[0][0]

        # scale queryNode pod
        env.helm_upgrade_cluster_milvus(queryNode=1)

        # search
        res1, _ = collection_w.search(data[-1][:nq],
                                      ct.default_float_vec_field_name,
                                      ct.default_search_params,
                                      ct.default_limit)
        assert res1[0].ids[0] == data[0][0]
        res2, _ = collection_w2.search(data[-1][:nq],
                                       ct.default_float_vec_field_name,
                                       ct.default_search_params,
                                       ct.default_limit)
        assert res2[0].ids[0] == data[0][0]