def del_vectors(collection_name, ids): try: milvus = Milvus(host=MILVUS_ADDR, port=MILVUS_PORT) milvus.delete_entity_by_id(collection_name=collection_name, id_array=ids) except Exception as e: err_msg = "There was some error when delete vectors" logger.error(f"{err_msg} : {str(e)}", exc_info=True) raise MilvusError(err_msg, e)
class MilvusClient(object): def __init__(self, collection_name=None, host=None, port=None, timeout=180): self._collection_name = collection_name start_time = time.time() if not host: host = SERVER_HOST_DEFAULT if not port: port = SERVER_PORT_DEFAULT logger.debug(host) logger.debug(port) # retry connect remote server i = 0 while time.time() < start_time + timeout: try: self._milvus = Milvus(host=host, port=port, try_connect=False, pre_ping=False) break except Exception as e: logger.error(str(e)) logger.error("Milvus connect failed: %d times" % i) i = i + 1 time.sleep(i) if time.time() > start_time + timeout: raise Exception("Server connect timeout") # self._metric_type = None def __str__(self): return 'Milvus collection %s' % self._collection_name def check_status(self, status): if not status.OK(): logger.error(status.message) logger.error(self._milvus.server_status()) logger.error(self.count()) raise Exception("Status not ok") def check_result_ids(self, result): for index, item in enumerate(result): if item[0].distance >= epsilon: logger.error(index) logger.error(item[0].distance) raise Exception("Distance wrong") # only support the given field name def create_collection(self, dimension, data_type=DataType.FLOAT_VECTOR, auto_id=False, collection_name=None, other_fields=None): self._dimension = dimension if not collection_name: collection_name = self._collection_name vec_field_name = utils.get_default_field_name(data_type) fields = [{ "name": vec_field_name, "type": data_type, "params": { "dim": dimension } }] if other_fields: other_fields = other_fields.split(",") if "int" in other_fields: fields.append({ "name": utils.DEFAULT_INT_FIELD_NAME, "type": DataType.INT64 }) if "float" in other_fields: fields.append({ "name": utils.DEFAULT_FLOAT_FIELD_NAME, "type": DataType.FLOAT }) create_param = {"fields": fields, "auto_id": auto_id} try: self._milvus.create_collection(collection_name, create_param) logger.info("Create collection: <%s> successfully" % collection_name) except Exception as e: logger.error(str(e)) raise def create_partition(self, tag, collection_name=None): if not collection_name: collection_name = self._collection_name self._milvus.create_partition(collection_name, tag) def generate_values(self, data_type, vectors, ids): values = None if data_type in [DataType.INT32, DataType.INT64]: values = ids elif data_type in [DataType.FLOAT, DataType.DOUBLE]: values = [(i + 0.0) for i in ids] elif data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]: values = vectors return values def generate_entities(self, vectors, ids=None, collection_name=None): entities = [] if collection_name is None: collection_name = self._collection_name info = self.get_info(collection_name) for field in info["fields"]: field_type = field["type"] entities.append({ "name": field["name"], "type": field_type, "values": self.generate_values(field_type, vectors, ids) }) return entities @time_wrapper def insert(self, entities, ids=None, collection_name=None): tmp_collection_name = self._collection_name if collection_name is None else collection_name try: insert_ids = self._milvus.insert(tmp_collection_name, entities, ids=ids) return insert_ids except Exception as e: logger.error(str(e)) def get_dimension(self): info = self.get_info() for field in info["fields"]: if field["type"] in [ DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR ]: return field["params"]["dim"] def get_rand_ids(self, length): segment_ids = [] while True: stats = self.get_stats() segments = stats["partitions"][0]["segments"] # random choice one segment segment = random.choice(segments) try: segment_ids = self._milvus.list_id_in_segment( self._collection_name, segment["id"]) except Exception as e: logger.error(str(e)) if not len(segment_ids): continue elif len(segment_ids) > length: return random.sample(segment_ids, length) else: logger.debug("Reset length: %d" % len(segment_ids)) return segment_ids # def get_rand_ids_each_segment(self, length): # res = [] # status, stats = self._milvus.get_collection_stats(self._collection_name) # self.check_status(status) # segments = stats["partitions"][0]["segments"] # segments_num = len(segments) # # random choice from each segment # for segment in segments: # status, segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["name"]) # self.check_status(status) # res.extend(segment_ids[:length]) # return segments_num, res # def get_rand_entities(self, length): # ids = self.get_rand_ids(length) # status, get_res = self._milvus.get_entity_by_id(self._collection_name, ids) # self.check_status(status) # return ids, get_res def get(self): get_ids = random.randint(1, 1000000) self._milvus.get_entity_by_id(self._collection_name, [get_ids]) @time_wrapper def get_entities(self, get_ids): get_res = self._milvus.get_entity_by_id(self._collection_name, get_ids) return get_res @time_wrapper def delete(self, ids, collection_name=None): tmp_collection_name = self._collection_name if collection_name is None else collection_name self._milvus.delete_entity_by_id(tmp_collection_name, ids) def delete_rand(self): delete_id_length = random.randint(1, 100) count_before = self.count() logger.debug("%s: length to delete: %d" % (self._collection_name, delete_id_length)) delete_ids = self.get_rand_ids(delete_id_length) self.delete(delete_ids) self.flush() logger.info("%s: count after delete: %d" % (self._collection_name, self.count())) get_res = self._milvus.get_entity_by_id(self._collection_name, delete_ids) for item in get_res: assert not item # if count_before - len(delete_ids) < self.count(): # logger.error(delete_ids) # raise Exception("Error occured") @time_wrapper def flush(self, _async=False, collection_name=None): tmp_collection_name = self._collection_name if collection_name is None else collection_name self._milvus.flush([tmp_collection_name], _async=_async) @time_wrapper def compact(self, collection_name=None): tmp_collection_name = self._collection_name if collection_name is None else collection_name status = self._milvus.compact(tmp_collection_name) self.check_status(status) @time_wrapper def create_index(self, field_name, index_type, metric_type, _async=False, index_param=None): index_type = INDEX_MAP[index_type] metric_type = utils.metric_type_trans(metric_type) logger.info( "Building index start, collection_name: %s, index_type: %s, metric_type: %s" % (self._collection_name, index_type, metric_type)) if index_param: logger.info(index_param) index_params = { "index_type": index_type, "metric_type": metric_type, "params": index_param } self._milvus.create_index(self._collection_name, field_name, index_params, _async=_async) # TODO: need to check def describe_index(self, field_name): # stats = self.get_stats() info = self._milvus.describe_index(self._collection_name, field_name) index_info = {"index_type": "flat", "index_param": None} for field in info["fields"]: for index in field['indexes']: if not index or "index_type" not in index: continue else: for k, v in INDEX_MAP.items(): if index['index_type'] == v: index_info['index_type'] = k index_info['index_param'] = index['params'] return index_info return index_info def drop_index(self, field_name): logger.info("Drop index: %s" % self._collection_name) return self._milvus.drop_index(self._collection_name, field_name) @time_wrapper def query(self, vector_query, filter_query=None, collection_name=None): tmp_collection_name = self._collection_name if collection_name is None else collection_name must_params = [vector_query] if filter_query: must_params.extend(filter_query) query = {"bool": {"must": must_params}} result = self._milvus.search(tmp_collection_name, query) return result @time_wrapper def load_and_query(self, vector_query, filter_query=None, collection_name=None): tmp_collection_name = self._collection_name if collection_name is None else collection_name must_params = [vector_query] if filter_query: must_params.extend(filter_query) query = {"bool": {"must": must_params}} self.load_collection(tmp_collection_name) result = self._milvus.search(tmp_collection_name, query) return result def get_ids(self, result): idss = result._entities.ids ids = [] len_idss = len(idss) len_r = len(result) top_k = len_idss // len_r for offset in range(0, len_idss, top_k): ids.append(idss[offset:min(offset + top_k, len_idss)]) return ids def query_rand(self, nq_max=100): # for ivf search dimension = 128 top_k = random.randint(1, 100) nq = random.randint(1, nq_max) nprobe = random.randint(1, 100) search_param = {"nprobe": nprobe} query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)] metric_type = random.choice(["l2", "ip"]) logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe)) vec_field_name = utils.get_default_field_name() vector_query = { "vector": { vec_field_name: { "topk": top_k, "query": query_vectors, "metric_type": utils.metric_type_trans(metric_type), "params": search_param } } } self.query(vector_query) def load_query_rand(self, nq_max=100): # for ivf search dimension = 128 top_k = random.randint(1, 100) nq = random.randint(1, nq_max) nprobe = random.randint(1, 100) search_param = {"nprobe": nprobe} query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)] metric_type = random.choice(["l2", "ip"]) logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe)) vec_field_name = utils.get_default_field_name() vector_query = { "vector": { vec_field_name: { "topk": top_k, "query": query_vectors, "metric_type": utils.metric_type_trans(metric_type), "params": search_param } } } self.load_and_query(vector_query) # TODO: need to check def count(self, collection_name=None): if collection_name is None: collection_name = self._collection_name row_count = self._milvus.get_collection_stats( collection_name)["row_count"] logger.debug("Row count: %d in collection: <%s>" % (row_count, collection_name)) return row_count def drop(self, timeout=120, collection_name=None): timeout = int(timeout) if collection_name is None: collection_name = self._collection_name logger.info("Start delete collection: %s" % collection_name) self._milvus.drop_collection(collection_name) i = 0 while i < timeout: try: row_count = self.count(collection_name=collection_name) if row_count: time.sleep(1) i = i + 1 continue else: break except Exception as e: logger.debug(str(e)) break if i >= timeout: logger.error("Delete collection timeout") def get_stats(self): return self._milvus.get_collection_stats(self._collection_name) def get_info(self, collection_name=None): # pdb.set_trace() if collection_name is None: collection_name = self._collection_name return self._milvus.get_collection_info(collection_name) def show_collections(self): return self._milvus.list_collections() def exists_collection(self, collection_name=None): if collection_name is None: collection_name = self._collection_name res = self._milvus.has_collection(collection_name) return res def clean_db(self): collection_names = self.show_collections() for name in collection_names: self.drop(collection_name=name) @time_wrapper def load_collection(self, collection_name=None): if collection_name is None: collection_name = self._collection_name return self._milvus.load_collection(collection_name, timeout=3000) @time_wrapper def release_collection(self, collection_name=None): if collection_name is None: collection_name = self._collection_name return self._milvus.release_collection(collection_name, timeout=3000) @time_wrapper def load_partitions(self, tag_names, collection_name=None): if collection_name is None: collection_name = self._collection_name return self._milvus.load_partitions(collection_name, tag_names, timeout=3000) @time_wrapper def release_partitions(self, tag_names, collection_name=None): if collection_name is None: collection_name = self._collection_name return self._milvus.release_partitions(collection_name, tag_names, timeout=3000)
def main(): milvus = Milvus(_HOST, _PORT) # num = random.randint(1, 100000) num = 100000 # Create collection demo_collection if it dosen't exist. collection_name = 'example_hybrid_collections_{}'.format(num) if milvus.has_collection(collection_name): milvus.drop_collection(collection_name) collection_param = { "fields": [{ "field": "A", "type": DataType.INT32 }, { "field": "B", "type": DataType.INT32 }, { "field": "C", "type": DataType.INT64 }, { "field": "Vec", "type": DataType.FLOAT_VECTOR, "params": { "dim": 128, "metric_type": "L2" } }], "segment_size": 100 } milvus.create_collection(collection_name, collection_param) milvus.compact(collection_name) # milvus.create_partition(collection_name, "p_01", timeout=1800) # pars = milvus.list_partitions(collection_name) # ok = milvus.has_partition(collection_name, "p_01", timeout=1800) # assert ok # ok = milvus.has_partition(collection_name, "p_02") # assert not ok # for p in pars: # if p == "_default": # continue # milvus.drop_partition(collection_name, p) # milvus.drop_collection(collection_name) # sys.exit(0) A_list = [random.randint(0, 255) for _ in range(num)] vec = [[random.random() for _ in range(128)] for _ in range(num)] hybrid_entities = [{ "field": "A", "values": A_list, "type": DataType.INT32 }, { "field": "B", "values": A_list, "type": DataType.INT32 }, { "field": "C", "values": A_list, "type": DataType.INT64 }, { "field": "Vec", "values": vec, "type": DataType.FLOAT_VECTOR, "params": { "dim": 128 } }] for slice_e in utils.entities_slice(hybrid_entities): ids = milvus.insert(collection_name, slice_e) milvus.flush([collection_name]) print("Flush ... ") # time.sleep(3) count = milvus.count_entities(collection_name) milvus.delete_entity_by_id(collection_name, ids[:1]) milvus.flush([collection_name]) print("Get entity be id start ...... ") entities = milvus.get_entity_by_id(collection_name, ids[:1]) et = entities.dict() milvus.delete_entity_by_id(collection_name, ids[1:2]) milvus.flush([collection_name]) print("Create index ......") milvus.create_index(collection_name, "Vec", { "index_type": "IVF_FLAT", "metric_type": "L2", "params": { "nlist": 100 } }) print("Create index done.") info = milvus.get_collection_info(collection_name) print(info) stats = milvus.get_collection_stats(collection_name) print("\nstats\n") print(stats) query_hybrid = \ { "bool": { "must": [ { "term": { "A": [1, 2, 5] } }, { "range": { "B": {"GT": 1, "LT": 100} } }, { "vector": { "Vec": { "topk": 10, "query": vec[: 10000], "params": {"nprobe": 10} } } } ], }, } # print("Start searach ..", flush=True) # results = milvus.search(collection_name, query_hybrid) # print(results) # # for r in list(results): # print("ids", r.ids) # print("distances", r.distances) t0 = time.time() count = 0 results = milvus.search(collection_name, query_hybrid, fields=["B"]) for r in list(results): # print("ids", r.ids) # print("distances", r.distances) for rr in r: count += 1 # print(rr.entity.get("B")) print("Search cost {} s".format(time.time() - t0)) # for result in results: # for r in result: # print(f"{r}") # itertor entity id # for result in results: # for r in result: # # get distance # dis = r.distance # id_ = r.id # # obtain all field name # fields = r.entity.fields # for f in fields: # # get field value by field name # # fv = r.entity. # fv = r.entity.value_of_field(f) # print(fv) milvus.drop_collection(collection_name)
for entities in results: for topk_film in entities: current_entity = topk_film.entity print("- id: {}".format(topk_film.id)) print("- distance: {}".format(topk_film.distance)) print("- release_year: {}".format(current_entity.release_year)) print("- duration: {}".format(current_entity.duration)) print("- embedding: {}".format(current_entity.embedding)) # ------ # Basic delete: # Now let's see how to delete things in Milvus. # You can simply delete entities by their ids. # ------ client.delete_entity_by_id(collection_name, ids=[1, 4]) client.flush() # flush is important result = client.get_entity_by_id(collection_name, ids=[1, 4]) counts_delete = sum([1 for entity in result if entity is not None]) counts_in_collection = client.count_entities(collection_name) print("\n----------delete id = 1, id = 4----------") print("Get {} entities by id 1, 4".format(counts_delete)) print("There are {} entities after delete films with 1, 4".format(counts_in_collection)) # ------ # Basic delete: # You can drop partitions we create, and drop the collection we create. # ------ client.drop_partition(collection_name, partition_tag='American') if collection_name in client.list_collections():
class MilvusHelper(BaseVectorSimilarityHelper): def __init__(self, _server_url, _server_port, _timeout=10): super().__init__() self.server_url = _server_url self.server_port = _server_port self.timeout = _timeout self.client = None self.metric_type_mapper = { VectorMetricType.L2: MetricType.L2, VectorMetricType.IP: MetricType.IP, VectorMetricType.JACCARD: MetricType.JACCARD, VectorMetricType.HAMMING: MetricType.HAMMING, } self.index_type_mapper = { VectorIndexType.FLAT: IndexType.FLAT, VectorIndexType.IVFLAT: IndexType.IVFLAT, VectorIndexType.IVF_SQ8: IndexType.IVF_SQ8, VectorIndexType.RNSG: IndexType.RNSG, VectorIndexType.IVF_SQ8H: IndexType.IVF_SQ8H, VectorIndexType.IVF_PQ: IndexType.IVF_PQ, VectorIndexType.HNSW: IndexType.HNSW, VectorIndexType.ANNOY: IndexType.ANNOY, } def init(self): if self.client is None: if not (self.server_url is None or self.server_url is None): try: self.client = Milvus(host=self.server_url, port=self.server_port) except: raise MilvusRuntimeException(f'cannot connect to {self.server_url}:{self.server_port}') else: raise MilvusRuntimeException('Milvus config is not correct') def insert(self, _database_name, _to_insert_vector, _partition_tag=None, _params=None): """ 向数据库中插入一系列的特征向量 notes:如果用户有自己的id,建议使用insert_with_id函数 ATTENTION!!! 一个库中不能既调用insert_with_id还调用insert,只能调用一种,否则会报错 Args: _database_name: 数据库名称 _to_insert_vector: 待插入的特征向量的列表 _partition_tag: 分区标签 _params: 插入参数 Returns: 插入后的id """ self.init() status, ids = self.client.insert(_database_name, _to_insert_vector, partition_tag=_partition_tag, params=_params, timeout=self.timeout) self.flush(_database_name) if status.OK(): return ids else: raise MilvusRuntimeException(status.message) def insert_with_id(self, _database_name, _to_insert_vector, _to_insert_ids, _partition_tag=None, _params=None): """ 向数据库中插入一系列的有固定id的特征向量 ATTENTION!!! 一个库中不能既调用insert_with_id还调用insert,只能调用一种,否则会报错 Args: _database_name: 数据库名称 _to_insert_vector: 待插入的特征向量的列表 _to_insert_ids: 待插入的特征向量的id的列表,每个元素必须为正整数,且不越界 _partition_tag: 分区标签 _params: 插入参数 Returns: 插入后的id """ self.init() status, ids = self.client.insert(_database_name, _to_insert_vector, ids=_to_insert_ids, partition_tag=_partition_tag, params=_params, timeout=self.timeout) self.flush(_database_name) if status.OK(): return ids else: raise MilvusRuntimeException(status.message) def delete(self, _database_name, _to_delete_ids): """ 删除特定id Args: _database_name: 数据库名称 _to_delete_ids: 待删除的id Returns: 是否删除成功 """ self.init() status = self.client.delete_entity_by_id(_database_name, _to_delete_ids, self.timeout) self.flush(_database_name) if status.OK(): return True else: raise MilvusRuntimeException(status.message) def database_exist(self, _database_name): """ 数据库是否存在 Args: _database_name: 数据库名称 Returns: 是否存在 """ self.init() status, is_exist = self.client.has_collection(_database_name, self.timeout) if status.OK(): return is_exist else: raise MilvusRuntimeException(status.message) def create_database(self, _database_name, _dimension, _index_file_size, _metric_type): """ 创建数据库 Args: _database_name: 数据库名称 _dimension: 特征向量维度 _index_file_size: index的文件大小 _metric_type: 度量类型 Returns: 是否创建成功 """ self.init() if not self.database_exist(_database_name): assert _metric_type in self.metric_type_mapper, f'{_metric_type} not support in milvus' status = self.client.create_collection({ 'collection_name': _database_name, 'dimension': _dimension, 'index_file_size': _index_file_size, 'metric_type': self.metric_type_mapper[_metric_type] }) if status.OK(): return True else: raise MilvusRuntimeException(status.message) else: return True def create_index(self, _database_name, _index_type): """ 创建index(索引) Args: _database_name: 数据库名称 _index_type: index类型 Returns: 是否创建成功 """ self.init() if self.database_exist(_database_name): assert _index_type in self.index_type_mapper, f'{_index_type} not support in milvus' status = self.client.create_index(_database_name, self.index_type_mapper[_index_type], timeout=self.timeout) if status.OK(): return True else: raise MilvusRuntimeException(status.message) def search(self, _database_name, _query_vector_list, _top_k, _partition_tag=None, _params=None): """ 检索用参数 Args: _database_name: 数据库名称 _query_vector_list: 检索用的特征向量列表 _top_k: top k _partition_tag: 分区标签 _params: 检索参数 Returns: 检索的结果,包含id和distance """ self.init() if self.database_exist(_database_name): status, search_result = self.client.search(_database_name, _top_k, _query_vector_list, partition_tags=_partition_tag, params=_params, timeout=self.timeout) if status.OK(): return search_result else: raise MilvusRuntimeException(status.message) else: raise DatabaseNotExist(f'{_database_name} not exist') def flush(self, _database_name): """ sink数据库 Args: _database_name: 数据库名称 Returns: 是否flush成功 """ self.init() status = self.client.flush([_database_name, ], self.timeout) if status.OK(): return True else: raise MilvusRuntimeException(status.message) def __del__(self): if self.client is not None: self.client.close()
class Indexer: ''' 索引器。 ''' def __init__(self, name, host='127.0.0.1', port='19531'): ''' 初始化。 ''' self.client = Milvus(host=host, port=port) self.collection = name def init(self, lenient=False): ''' 创建集合。 ''' if lenient: status, result = self.client.has_collection( collection_name=self.collection) if status.code != 0: raise ExertMilvusException(status) if result: return status = self.client.create_collection({ 'collection_name': self.collection, 'dimension': 512, 'index_file_size': 1024, 'metric_type': MetricType.L2 }) if status.code != 0 and not (lenient and status.code == 9): raise ExertMilvusException(status) # 创建索引。 status = self.client.create_index(collection_name=self.collection, index_type=IndexType.IVF_FLAT, params={'nlist': 16384}) if status.code != 0: raise ExertMilvusException(status) return status def drop(self): ''' 删除集合。 ''' status = self.client.drop_collection(collection_name=self.collection) if status.code != 0: raise ExertMilvusException(status) def flush(self): ''' 写入到硬盘。 ''' status = self.client.flush([self.collection]) if status.code != 0: raise ExertMilvusException(status) def compact(self): ''' 压缩集合。 ''' status = self.client.compact(collection_name=self.collection) if status.code != 0: raise ExertMilvusException(status) def close(self): ''' 关闭链接。 ''' self.client.close() def new_tag(self, tag): ''' 建分块标签。 ''' status = self.client.create_partition(collection_name=self.collection, partition_tag=tag) if status.code != 0: raise ExertMilvusException(status) def list_tag(self): ''' 列举分块标签。 ''' status, result = self.client.list_partitions( collection_name=self.collection) if status.code != 0: raise ExertMilvusException(status) return result def drop_tag(self, tag): ''' 删除分块标签。 ''' status = self.client.drop_partition(collection_name=self.collection, partition_tag=tag) if status.code != 0: raise ExertMilvusException(status) def index(self, vectors, tag=None, ids=None): ''' 添加索引 ''' params = {} if tag != None: params['tag'] = tag if ids != None: params['ids'] = ids status, result = self.client.insert(collection_name=self.collection, records=vectors, **params) if status.code != 0: raise ExertMilvusException(status) return result def listing(self, ids): ''' 列举信息。 ''' status, result = self.client.get_entity_by_id( collection_name=self.collection, ids=ids) if status.code != 0: raise ExertMilvusException(status) return result def counting(self): ''' 计算索引数。 ''' status, result = self.client.count_entities( collection_name=self.collection) if status.code != 0: raise ExertMilvusException(status) return result def unindex(self, ids): ''' 去掉索引。 ''' status = self.client.delete_entity_by_id( collection_name=self.collection, id_array=ids) if status.code != 0: raise ExertMilvusException(status) def search(self, vectors, top_count=100, tags=None): ''' 搜索。 ''' params = {'params': {'nprobe': 16}} if tags != None: params['partition_tags'] = tags status, results = self.client.search(collection_name=self.collection, query_records=vectors, top_k=top_count, **params) if status.code != 0: raise ExertMilvusException(status) return results
class MilvusClient(object): def __init__(self, collection_name=None, host=None, port=None, timeout=60): """ Milvus client wrapper for python-sdk. Default timeout set 60s """ self._collection_name = collection_name try: start_time = time.time() if not host: host = SERVER_HOST_DEFAULT if not port: port = SERVER_PORT_DEFAULT logger.debug(host) logger.debug(port) # retry connect for remote server i = 0 while time.time() < start_time + timeout: try: self._milvus = Milvus(host=host, port=port, try_connect=False, pre_ping=False) if self._milvus.server_status(): logger.debug("Try connect times: %d, %s" % (i, round(time.time() - start_time, 2))) break except Exception as e: logger.debug("Milvus connect failed: %d times" % i) i = i + 1 if time.time() > start_time + timeout: raise Exception("Server connect timeout") except Exception as e: raise e self._metric_type = None if self._collection_name and self.exists_collection(): self._metric_type = metric_type_to_str(self.describe()[1].metric_type) self._dimension = self.describe()[1].dimension def __str__(self): return 'Milvus collection %s' % self._collection_name def set_collection(self, name): self._collection_name = name def check_status(self, status): if not status.OK(): logger.error(self._collection_name) logger.error(status.message) logger.error(self._milvus.server_status()) logger.error(self.count()) raise Exception("Status not ok") def check_result_ids(self, result): for index, item in enumerate(result): if item[0].distance >= epsilon: logger.error(index) logger.error(item[0].distance) raise Exception("Distance wrong") def create_collection(self, collection_name, dimension, index_file_size, metric_type): if not self._collection_name: self._collection_name = collection_name if metric_type not in METRIC_MAP.keys(): raise Exception("Not supported metric_type: %s" % metric_type) metric_type = METRIC_MAP[metric_type] create_param = {'collection_name': collection_name, 'dimension': dimension, 'index_file_size': index_file_size, "metric_type": metric_type} status = self._milvus.create_collection(create_param) self.check_status(status) def create_partition(self, tag_name): status = self._milvus.create_partition(self._collection_name, tag_name) self.check_status(status) def drop_partition(self, tag_name): status = self._milvus.drop_partition(self._collection_name, tag_name) self.check_status(status) def list_partitions(self): status, tags = self._milvus.list_partitions(self._collection_name) self.check_status(status) return tags @time_wrapper def insert(self, X, ids=None, collection_name=None): if collection_name is None: collection_name = self._collection_name status, result = self._milvus.insert(collection_name, X, ids) self.check_status(status) return status, result def insert_rand(self): insert_xb = random.randint(1, 100) X = [[random.random() for _ in range(self._dimension)] for _ in range(insert_xb)] X = utils.normalize(self._metric_type, X) count_before = self.count() status, _ = self.insert(X) self.check_status(status) self.flush() if count_before + insert_xb != self.count(): raise Exception("Assert failed after inserting") def get_rand_ids(self, length): while True: status, stats = self._milvus.get_collection_stats(self._collection_name) self.check_status(status) segments = stats["partitions"][0]["segments"] # random choice one segment segment = random.choice(segments) status, segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["name"]) if not status.OK(): logger.error(status.message) continue if len(segment_ids): break if length >= len(segment_ids): logger.debug("Reset length: %d" % len(segment_ids)) return segment_ids return random.sample(segment_ids, length) def get_rand_ids_each_segment(self, length): res = [] status, stats = self._milvus.get_collection_stats(self._collection_name) self.check_status(status) segments = stats["partitions"][0]["segments"] segments_num = len(segments) # random choice from each segment for segment in segments: status, segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["name"]) self.check_status(status) res.extend(segment_ids[:length]) return segments_num, res def get_rand_entities(self, length): ids = self.get_rand_ids(length) status, get_res = self._milvus.get_entity_by_id(self._collection_name, ids) self.check_status(status) return ids, get_res @time_wrapper def get_entities(self, get_ids): status, get_res = self._milvus.get_entity_by_id(self._collection_name, get_ids) self.check_status(status) return get_res @time_wrapper def delete(self, ids, collection_name=None): if collection_name is None: collection_name = self._collection_name status = self._milvus.delete_entity_by_id(collection_name, ids) self.check_status(status) def delete_rand(self): delete_id_length = random.randint(1, 100) count_before = self.count() logger.info("%s: length to delete: %d" % (self._collection_name, delete_id_length)) delete_ids = self.get_rand_ids(delete_id_length) self.delete(delete_ids) self.flush() logger.info("%s: count after delete: %d" % (self._collection_name, self.count())) status, get_res = self._milvus.get_entity_by_id(self._collection_name, delete_ids) self.check_status(status) for item in get_res: if item: raise Exception("Assert failed after delete") if count_before - len(delete_ids) != self.count(): raise Exception("Assert failed after delete") @time_wrapper def flush(self, collection_name=None): if collection_name is None: collection_name = self._collection_name status = self._milvus.flush([collection_name]) self.check_status(status) @time_wrapper def compact(self, collection_name=None): if collection_name is None: collection_name = self._collection_name status = self._milvus.compact(collection_name) self.check_status(status) @time_wrapper def create_index(self, index_type, index_param=None): index_type = INDEX_MAP[index_type] logger.info("Building index start, collection_name: %s, index_type: %s" % (self._collection_name, index_type)) if index_param: logger.info(index_param) status = self._milvus.create_index(self._collection_name, index_type, index_param) self.check_status(status) def describe_index(self): status, result = self._milvus.get_index_info(self._collection_name) self.check_status(status) index_type = None for k, v in INDEX_MAP.items(): if result._index_type == v: index_type = k break return {"index_type": index_type, "index_param": result._params} def drop_index(self): logger.info("Drop index: %s" % self._collection_name) return self._milvus.drop_index(self._collection_name) def query(self, X, top_k, search_param=None, collection_name=None): if collection_name is None: collection_name = self._collection_name status, result = self._milvus.search(collection_name, top_k, query_records=X, params=search_param) self.check_status(status) return result def query_rand(self): top_k = random.randint(1, 100) nq = random.randint(1, 100) nprobe = random.randint(1, 100) search_param = {"nprobe": nprobe} _, X = self.get_rand_entities(nq) logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe)) status, _ = self._milvus.search(self._collection_name, top_k, query_records=X, params=search_param) self.check_status(status) # for i, item in enumerate(search_res): # if item[0].id != ids[i]: # logger.warning("The index of search result: %d" % i) # raise Exception("Query failed") # @time_wrapper # def query_ids(self, top_k, ids, search_param=None): # status, result = self._milvus.search_by_id(self._collection_name, ids, top_k, params=search_param) # self.check_result_ids(result) # return result def count(self, name=None): if name is None: name = self._collection_name logger.debug(self._milvus.count_entities(name)) row_count = self._milvus.count_entities(name)[1] if not row_count: row_count = 0 logger.debug("Row count: %d in collection: <%s>" % (row_count, name)) return row_count def drop(self, timeout=120, name=None): timeout = int(timeout) if name is None: name = self._collection_name logger.info("Start delete collection: %s" % name) status = self._milvus.drop_collection(name) self.check_status(status) i = 0 while i < timeout: if self.count(name=name): time.sleep(1) i = i + 1 continue else: break if i >= timeout: logger.error("Delete collection timeout") def describe(self): # logger.info(self._milvus.get_collection_info(self._collection_name)) return self._milvus.get_collection_info(self._collection_name) def show_collections(self): return self._milvus.list_collections() def exists_collection(self, collection_name=None): if collection_name is None: collection_name = self._collection_name _, res = self._milvus.has_collection(collection_name) # self.check_status(status) return res def clean_db(self): collection_names = self.show_collections()[1] for name in collection_names: logger.debug(name) self.drop(name=name) @time_wrapper def preload_collection(self): status = self._milvus.load_collection(self._collection_name, timeout=3000) self.check_status(status) return status def get_server_version(self): _, res = self._milvus.server_version() return res def get_server_mode(self): return self.cmd("mode") def get_server_commit(self): return self.cmd("build_commit_id") def get_server_config(self): return json.loads(self.cmd("get_config *")) def get_mem_info(self): result = json.loads(self.cmd("get_system_info")) result_human = { # unit: Gb "memory_used": round(int(result["memory_used"]) / (1024*1024*1024), 2) } return result_human def cmd(self, command): status, res = self._milvus._cmd(command) logger.info("Server command: %s, result: %s" % (command, res)) self.check_status(status) return res
class SearchEngine: def __init__(self, host, port): self.host = os.environ.get('MILVUS_HOST', host) self.port = os.environ.get('MILVUS_PORT', str(port)) self.engine = Milvus(host=self.host, port=self.port) self.collection_name = None ################################################# # HANDLE COLLECTION ################################################# def create_collection(self, collection_name, dimension): # collection 생성 param = { 'collection_name': collection_name, 'dimension': dimension, 'index_file_size': 1000, 'metric_type': MetricType.IP } self.engine.create_collection(param) print('[INFO] collection {}을 생성했습니다.'.format(collection_name)) def drop_collection(self, collection_name): # collection 삭제 self.engine.drop_collection(collection_name=collection_name) print('[INFO] collection {}을 삭제했습니다.'.format(collection_name)) def get_collection_stats(self, collection_name): # collection 정보 출력 print(self.engine.get_collection_info(collection_name)) print(self.engine.get_collection_stats(collection_name)) def set_collection(self, collection_name): # 쿼리 조작을 하기 위한 collection 지정 self.collection_name = collection_name print('[INFO] setting collection {}'.format(self.collection_name)) ################################################# # UTILS ################################################# def check_set_collection(self): # 쿼리 조작을 위한 collection 지정이 되어있는 지 체크 assert self.collection_name is not None, '[ERROR] collection을 setting해 주십시오!!' def check_exist_data_by_key(self, key): # collection에 정해진 key가 존재하는 지 확인 self.check_set_collection() _, vector = self.engine.get_entity_by_id( collection_name=self.collection_name, ids=key) vector = vector if vector else [vector] return True if vector[0] else False def convert_key_format(self, key): return [key] if isinstance(key, int) else key def convert_value_format(self, value): rank = len(value.shape) assert rank < 2, '[ERROR] value의 dim을 2 미만으로 입력해 주세요!!' return value.reshape(1, -1) if rank == 1 else value ################################################# # INSERT ################################################# def insert_data(self, key, value): # 데이터를 collection에 입력 key = self.convert_key_format(key) value = self.convert_value_format(value) if self.check_exist_data_by_key(key): print("[ERROR] 이미 collection에 데이터가 존재합니다.") return self.engine.insert(collection_name=self.collection_name, records=value, ids=key) self.engine.flush([self.collection_name]) print('[INFO] insert key {}'.format(key)) ################################################# # DELELTE ################################################# def delete_data(self, key): # 데이터를 collection에서 제거 key = self.convert_key_format(key) if not self.check_exist_data_by_key(key): print("[ERROR] collection에 데이터가 존재하지 않습니다.") return self.engine.delete_entity_by_id(self.collection_name, key) self.engine.flush([self.collection_name]) print('[INFO] delete key {}'.format(key)) ################################################# # UPDATE ################################################# def update_data(self, key, value): # 데이터를 업데이트 key = self.convert_key_format(key) value = self.convert_value_format(value) if not self.check_exist_data_by_key(key): print("[ERROR] collection에 데이터가 존재하지 않습니다.") return self.engine.delete_entity_by_id(self.collection_name, key) self.engine.flush([self.collection_name]) self.engine.insert(collection_name=self.collection_name, records=value, ids=key) self.engine.flush([self.collection_name]) print('[INFO] update key {}'.format(key)) ################################################# # SEARCH ################################################# def search_by_feature(self, feature, top_k): # feature를 이용해서 데이터를 검색 self.check_set_collection() feature = self.convert_value_format(feature) _, result = self.engine.search(collection_name=self.collection_name, query_records=feature, top_k=top_k) li_id = [ list(map(lambda x: x.id, result[0])) for i in range(len(result)) ] li_dist = [ list(map(lambda x: x.distance, result[0])) for i in range(len(result)) ] return li_id, li_dist def search_by_key(self, key, top_k): # key를 이용해서 데이터를 검색 self.check_set_collection() key = self.convert_key_format(key) if not self.check_exist_data_by_key(key): print("[ERROR] collection에 데이터가 존재하지 않습니다.") return _, vector = self.engine.get_entity_by_id( collection_name=self.collection_name, ids=key) _, result = self.engine.search(collection_name=self.collection_name, query_records=vector, top_k=top_k + 1) li_id = [ list(map(lambda x: x.id, result[0][1:])) for i in range(len(result)) ] li_dist = [ list(map(lambda x: x.distance, result[0][1:])) for i in range(len(result)) ] return li_id, li_dist
def main(): milvus = Milvus(uri=uri) param = { 'collection_name': collection_name, 'dimension': _DIM, 'index_file_size': 32, #'metric_type': MetricType.IP 'metric_type': MetricType.L2 } # show collections in Milvus server _, collections = milvus.list_collections() # 创建 collection milvus.create_collection(param) # 创建 collection partion milvus.create_partition(collection_name, partition_tag) print(f'collections in Milvus: {collections}') # Describe demo_collection _, collection = milvus.get_collection_info(collection_name) print(f'descript demo_collection: {collection}') # build fake vectors vectors = [[random.random() for _ in range(_DIM)] for _ in range(10)] vectors1 = [[random.random() for _ in range(_DIM)] for _ in range(10)] status, id = milvus.insert(collection_name=collection_name, records=vectors, ids=list(range(10)), partition_tag=partition_tag) print(f'status: {status} | id: {id}') if not status.OK(): print(f"insert failded: {status}") status1, id1 = milvus.insert(collection_name=collection_name, records=vectors1, ids=list(range(10, 20)), partition_tag=partition_tag) print(f'status1: {status1} | id1: {id1}') ids_deleted = list(range(10)) status_delete = milvus.delete_entity_by_id(collection_name=collection_name, id_array=ids_deleted) if status_delete.OK(): print(f'delete successful') # Flush collection insered data to disk milvus.flush([collection_name]) # Get demo_collection row count status, result = milvus.count_entities(collection_name) print(f"demo_collection row count: {result}") # Obtain raw vectors by providing vector ids status, result_vectors = milvus.get_entity_by_id(collection_name, list(range(10, 20))) # create index of vectors, search more repidly index_param = {'nlist': 2} # create ivflat index in demo_collection status = milvus.create_index(collection_name, IndexType.IVF_FLAT, index_param) if status.OK(): print(f"create index ivf_flat succeeed") # use the top 10 vectors for similarity search query_vectors = vectors1[0:2] # execute vector similariy search search_param = {"nprobe": 16} param = { 'collection_name': collection_name, 'query_records': query_vectors, 'top_k': 1, 'params': search_param } status, results = milvus.search(**param) if status.OK(): if results[0][0].distance == 0.0: print('query result is correct') else: print('not correct') print(results) else: print(f'search failed: {status}') # 清除已经存在的collection milvus.drop_collection(collection_name=collection_name) milvus.close()
for seg in par["segments"]: print( "\t\tsegment name: {}, vector count: {}, index: {}, storage size {:.3f} MB" .format(seg["name"], seg["row_count"], seg["index_name"], seg["data_size"] / 1024 / 1024)) # obtain vector ids from segment, then # get vector by specifying vector id segment0 = info["partitions"][0]["segments"][0] status, ids = client.list_id_in_segment(collection_name, segment0["name"]) if not status.OK(): print("Cannot obtain vector ids from segment {}. exiting ....".format( segment0["name"])) sys.exit(1) # obtain top 5 vector status, vectors = client.get_entity_by_id(collection_name, ids[:5]) if not status.OK(): print("Cannot obtain vector. exiting ....") sys.exit(1) # delete top 10 vectors status = client.delete_entity_by_id(collection_name, ids[:10]) if status.OK(): print("Delete top 10 vectors successfully") else: print("Error occurred when try to delete top 10 vectors. Reason: ", status.message) client.drop_collection(collection_name)