def test_search(self, started_app): collection_name = inspect.currentframe().f_code.co_name to_index_cnt = random.randint(10, 20) collection = TablesFactory(collection_id=collection_name, state=Tables.NORMAL) to_index_files = TableFilesFactory.create_batch( to_index_cnt, collection=collection, file_type=TableFiles.FILE_TYPE_TO_INDEX) topk = random.randint(5, 10) nq = random.randint(5, 10) param = { 'collection_name': collection_name, 'query_records': self.random_data(nq, collection.dimension), 'top_k': topk, 'params': { 'nprobe': 2049 } } result = [ milvus_pb2.TopKQueryResult(query_result_arrays=[ milvus_pb2.QueryResult(id=i, distance=random.random()) for i in range(topk) ]) for i in range(nq) ] mock_results = milvus_pb2.TopKQueryResultList(status=status_pb2.Status( error_code=status_pb2.SUCCESS, reason="Success"), topk_query_result=result) collection_schema = CollectionSchema( collection_name=collection_name, index_file_size=collection.index_file_size, metric_type=collection.metric_type, dimension=collection.dimension) status, _ = self.client.search_vectors(**param) assert status.code == Status.ILLEGAL_ARGUMENT param['params']['nprobe'] = 2048 RouterMixin.connection = mock.MagicMock(return_value=Milvus()) RouterMixin.query_conn.conn = mock.MagicMock(return_value=Milvus()) Milvus.describe_collection = mock.MagicMock( return_value=(BAD, collection_schema)) status, ret = self.client.search_vectors(**param) assert status.code == Status.COLLECTION_NOT_EXISTS Milvus.describe_collection = mock.MagicMock( return_value=(OK, collection_schema)) Milvus.search_vectors_in_files = mock.MagicMock( return_value=mock_results) status, ret = self.client.search_vectors(**param) assert status.OK() assert len(ret) == nq
def test_files_to_search(self): table = TablesFactory() new_files_cnt = 5 to_index_cnt = 10 raw_cnt = 20 backup_cnt = 12 to_delete_cnt = 9 index_cnt = 8 new_index_cnt = 6 new_merge_cnt = 11 new_files = TableFilesFactory.create_batch( new_files_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW, date=110) to_index_files = TableFilesFactory.create_batch( to_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_INDEX, date=110) raw_files = TableFilesFactory.create_batch( raw_cnt, table=table, file_type=TableFiles.FILE_TYPE_RAW, date=120) backup_files = TableFilesFactory.create_batch( backup_cnt, table=table, file_type=TableFiles.FILE_TYPE_BACKUP, date=110) index_files = TableFilesFactory.create_batch( index_cnt, table=table, file_type=TableFiles.FILE_TYPE_INDEX, date=110) new_index_files = TableFilesFactory.create_batch( new_index_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_INDEX, date=110) new_merge_files = TableFilesFactory.create_batch( new_merge_cnt, table=table, file_type=TableFiles.FILE_TYPE_NEW_MERGE, date=110) to_delete_files = TableFilesFactory.create_batch( to_delete_cnt, table=table, file_type=TableFiles.FILE_TYPE_TO_DELETE, date=110) assert table.files_to_search().count( ) == raw_cnt + index_cnt + to_index_cnt assert table.files_to_search([(100, 115) ]).count() == index_cnt + to_index_cnt assert table.files_to_search([(111, 120)]).count() == 0 assert table.files_to_search([(111, 121)]).count() == raw_cnt assert table.files_to_search([ (110, 121) ]).count() == raw_cnt + index_cnt + to_index_cnt