def encode(self, docs: DocumentArray, **kwargs): with torch.inference_mode(): _input = torch.from_numpy(docs.blobs.astype('float32')) _features = self._get_features(_input).detach() _features = _features.numpy() _features = self._get_pooling(_features) docs.embeddings = _features
def encode(self, docs: DocumentArray, **kwargs): content = np.stack(docs.get_attributes('blob')) _input = torch.from_numpy(content.astype('float32')) _features = self._get_features(_input).detach() _features = _features.numpy() _features = self._get_pooling(_features) docs.embeddings = _features
def test_embeddings_setter_da(): emb = np.random.random((100, 128)) da = DocumentArray([Document() for _ in range(100)]) da.embeddings = emb np.testing.assert_almost_equal(da.embeddings, emb) for x, doc in zip(emb, da): np.testing.assert_almost_equal(x, doc.embedding)
def test_embeddings_wrong_len(): da = DocumentArray([Document() for _ in range(100)]) embeddings = np.ones((2, 10)) with pytest.raises(ValueError, match='the number of rows in the'): da.embeddings = embeddings
def test_set_embeddings_multi_kind(array): da = DocumentArray([Document() for _ in range(10)]) da.embeddings = array