Beispiel #1
0
    def __getitem__(self, mask: torch.BoolTensor) -> nn.Module:
        self.default = mask.to(self.default)

        if self.hyper is not None:
            self.hyper(self.net, self.default * 2. - 1.)

        return self
    def forward(
        self, inputs: torch.Tensor, tags: torch.Tensor,
        mask: torch.BoolTensor = None, reduction: str = None
    ) -> torch.Tensor:
        """
        Computes the negative log likelihood.
        """

        if mask is None:
            mask = torch.ones(*tags.size(), dtype=torch.bool)
        else:
            # The code below fails in weird ways if this isn't a bool tensor, so we make sure.
            mask = mask.to(torch.bool)

        log_denominator = self._input_likelihood(inputs, mask)
        log_numerator = self._joint_likelihood(inputs, tags, mask)

        nll = log_denominator - log_numerator

        if reduction is None:
            reduction = self.reduction

        if reduction == 'sum':
            nll = torch.sum(nll)
        elif reduction == 'mean':
            nll = torch.mean(nll)

        return nll
Beispiel #3
0
    def __getitem__(self, mask: torch.BoolTensor) -> nn.Module:
        mask = mask.to(self.masks)
        select = torch.all(mask == self.masks, dim=-1)
        indices = torch.nonzero(select).squeeze(-1).tolist()

        for i in indices:
            return self.nes[i]

        return None
    def forward(
        self, inputs: torch.Tensor, tags: torch.Tensor, mask: torch.BoolTensor = None
    ) -> torch.Tensor:
        """
        Computes the log likelihood.
        """

        if mask is None:
            mask = torch.ones(*tags.size(), dtype=torch.bool)
        else:
            # The code below fails in weird ways if this isn't a bool tensor, so we make sure.
            mask = mask.to(torch.bool)

        log_denominator = self._input_likelihood(inputs, mask)
        log_numerator = self._joint_likelihood(inputs, tags, mask)

        return torch.sum(log_numerator - log_denominator)