def test_pca_without_grad(self, device: str): seed_embeddings = torch.eye(2, device=device) pca = PCABiasDirection() const = 1 / math.sqrt(2) expected_bias_direction = torch.tensor([const, -const], device=device) test_bias_direction = pca(seed_embeddings) k = expected_bias_direction / test_bias_direction assert k[0].item() == pytest.approx(k[1].item()) assert seed_embeddings.grad is None
def test_pca_with_grad(self, device: str): # add noise to avoid "RuntimeError: triangular_solve_cpu: U(2,2) is zero, singular U." torch.manual_seed(0) seed_embeddings = torch.eye( 2, device=device) + (1 - torch.eye(2, device=device)) * 1e-1 seed_embeddings = seed_embeddings.requires_grad_() assert seed_embeddings.grad is None pca = PCABiasDirection(requires_grad=True) test_bias_direction = pca(seed_embeddings) test_bias_direction.sum().backward() assert seed_embeddings.grad is not None
def __init__( self, seed_words_file: Union[PathLike, str], tokenizer: Tokenizer, direction_vocab: Optional[Vocabulary] = None, namespace: str = "tokens", requires_grad: bool = False, noise: float = 1e-10, ): self.ids = load_words(seed_words_file, tokenizer, direction_vocab, namespace) self.direction = PCABiasDirection(requires_grad=requires_grad) self.noise = noise
def test_pca_invalid_dims(self): pca = PCABiasDirection() with pytest.raises(ConfigurationError): pca(torch.zeros(2))