Exemplo n.º 1
0
 def evaluate(self):
     acc = torch.diag(self.hist).sum() / (self.hist.sum() + self.eps)
     acc_cls = torch.diag(self.hist) / (self.hist.sum(axis=1) + +self.eps)
     acc_cls = torch.nansum(acc_cls) / len(acc_cls)
     iu = torch.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - torch.diag(self.hist) + self.eps)
     mean_iu = torch.nansum(iu) / len(iu)
     freq = self.hist.sum(axis=1) / (self.hist.sum() + self.eps)
     fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
     return acc, acc_cls, iu, mean_iu, fwavacc
Exemplo n.º 2
0
    def constrain_weight(self, weight_arr):
        square_weight_arr = weight_arr * weight_arr
        while torch.nansum(square_weight_arr) > self.weight_limit:
            weight_arr = weight_arr * 0.9
            square_weight_arr = weight_arr * weight_arr

        return weight_arr
Exemplo n.º 3
0
 def elbo(self, mean, var, y):
     y_masked = y.masked_fill(y.isnan(), 0.)
     v = self.log_var.exp()
     elbo = (var + mean**2 - 2. * y_masked * mean + y_masked**2) / v
     elbo += self.log_var + np.log(2. * np.pi)
     elbo.masked_fill(y.isnan(), 0.)
     return -0.5 * torch.nansum(elbo)
Exemplo n.º 4
0
 def reduction_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     return (
         torch.argmax(a),
         torch.argmin(a),
         torch.amax(a),
         torch.amin(a),
         torch.aminmax(a),
         torch.all(a),
         torch.any(a),
         torch.max(a),
         torch.min(a),
         torch.dist(a, b),
         torch.logsumexp(a, 0),
         torch.mean(a),
         torch.nanmean(a),
         torch.median(a),
         torch.nanmedian(a),
         torch.mode(a),
         torch.norm(a),
         torch.nansum(a),
         torch.prod(a),
         torch.quantile(a, torch.tensor([0.25, 0.5, 0.75])),
         torch.nanquantile(a, torch.tensor([0.25, 0.5, 0.75])),
         torch.std(a),
         torch.std_mean(a),
         torch.sum(a),
         torch.unique(a),
         torch.unique_consecutive(a),
         torch.var(a),
         torch.var_mean(a),
         torch.count_nonzero(a),
     )
Exemplo n.º 5
0
    def compute_cams(self,
                     class_idx: int,
                     scores: Optional[Tensor] = None,
                     normalized: bool = True) -> Tensor:
        """Compute the CAM for a specific output class

        Args:
            class_idx (int): output class index of the target class whose CAM will be computed
            scores (torch.Tensor[1, K], optional): forward output scores of the hooked model
            normalized (bool, optional): whether the CAM should be normalized

        Returns:
            torch.Tensor[M, N]: class activation map of hooked conv layer
        """

        # Get map weight & unsqueeze it
        weights = self._get_weights(class_idx, scores)
        weights = weights[
            (..., ) + (None, ) *
            (self.hook_a.ndim - 2)]  # type: ignore[operator, union-attr]

        # Perform the weighted combination to get the CAM
        batch_cams = torch.nansum(weights * self.hook_a.squeeze(0),
                                  dim=0)  # type: ignore[union-attr]

        if self._relu:
            batch_cams = F.relu(batch_cams, inplace=True)

        # Normalize the CAM
        if normalized:
            batch_cams = self._normalize(batch_cams)

        return batch_cams
Exemplo n.º 6
0
 def log_likelihood(self, proba=None, A=None):
     nll = torch.tensor(0.).cuda()
     if (A is not None) and (proba is not None):
         A = A.unsqueeze(1)
         llk = A * torch.log(proba) + (1. - A) * torch.log(1. - proba)
         nll += -torch.nansum(llk) / llk.size(1)
     return -nll
Exemplo n.º 7
0
def kl_divergence(log_prediction: Tensor, log_target: Tensor) -> Tensor:
    sum_dim = [i for i in range(1, log_prediction.ndim)]
    return torch.nansum(F.kl_div(log_prediction,
                                 log_target,
                                 reduction='none',
                                 log_target=True),
                        dim=sum_dim)
Exemplo n.º 8
0
 def _cdist_nb_full(self, x, cutoff=9.0, mask=False):
     dmat = torch.cdist(x.permute(0, 2, 1), x.permute(0, 2, 1))
     dmat6 = (self._warp_domain(dmat, 1.9)**6)
     LJpB = self.vdw_B / dmat6
     LJpA = self.vdw_A / (dmat6**2)
     Cp = (self.q1q2 / self._warp_domain(dmat, 0.4))
     return torch.nansum(LJpA - LJpB + Cp)
Exemplo n.º 9
0
    def forward(self, g):
        # input embedding
        h = self.embedding_h(g.ndata['feat'])
        e = self.embedding_e_real(g.edata['feat'])

        PosEnc = g.ndata['pos_enc']  # (Num nodes) x (Num Eigenvectors) x 2
        empty_mask = torch.isnan(
            PosEnc)  # (Num nodes) x (Num Eigenvectors) x 2

        PosEnc[empty_mask] = 0  # (Num nodes) x (Num Eigenvectors) x 2
        PosEnc = torch.transpose(
            PosEnc, 0, 1).float()  # (Num Eigenvectors) x (Num nodes) x 2
        PosEnc = self.linear_A(
            PosEnc)  # (Num Eigenvectors) x (Num nodes) x PE_dim

        # 1st Transformer: Learned PE
        PosEnc = self.PE_Transformer(src=PosEnc,
                                     src_key_padding_mask=empty_mask[:, :, 0])

        # remove masked sequences
        PosEnc[torch.transpose(empty_mask, 0, 1)[:, :, 0]] = float('nan')

        # Sum pooling
        PosEnc = torch.nansum(PosEnc, 0, keepdim=False)

        # Concatenate learned PE to input embedding
        h = torch.cat((h, PosEnc), 1)

        h = self.in_feat_dropout(h)

        # Second Transformer
        for conv in self.layers:
            h, e = conv(g, h, e)
        g.ndata['feat'] = h
Exemplo n.º 10
0
 def forward(self, x):
     self._check_init()
     inputs = self.drop_(-(x.unsqueeze(dim=2) - self.centers) ** 2 * (self.h / self.sigmas ** 2 + self.eps))
     inputs = torch.nansum(
        inputs, dim=1
     )
     frs = F.softmax(inputs, dim=1)
     return frs
Exemplo n.º 11
0
def jensen_shannon_divergence(p, log_p: Tensor, q, log_q: Tensor) -> Tensor:
    sum_dim = [i for i in range(1, log_p.ndim)]

    with torch.no_grad():
        log_m = torch.log((p + q) / 2)

    kl_p = F.kl_div(log_m, log_p, reduction='none', log_target=True)
    kl_q = F.kl_div(log_m, log_q, reduction='none', log_target=True)

    return torch.nansum(kl_p + kl_q, dim=sum_dim) / 2
Exemplo n.º 12
0
 def log_likelihood(self,
                    mean_cts=None,
                    X_cts=None,
                    proba_bin=None,
                    X_bin=None):
     nll = torch.tensor(0.).cuda()
     if (X_cts is not None) and (mean_cts is not None):
         X_cts = X_cts.unsqueeze(1)
         var = torch.exp(self.cts_logvar)
         X_cts_masked = X_cts.masked_fill(X_cts.isnan(), 0.)
         llk = (mean_cts - X_cts_masked)**2 / (2. * var) + 0.5 * torch.log(
             2. * math.pi * var)
         llk = llk.masked_fill(X_cts.isnan(), 0.)
         nll += torch.nansum(llk) / llk.size(-1)
     if (X_bin is not None) and (proba_bin is not None):
         X_bin = X_bin.unsqueeze(1)
         X_bin_masked = X_bin.masked_fill(X_bin.isnan(), 0.)
         llk = X_bin_masked * torch.log(proba_bin) + (
             1. - X_bin_masked) * torch.log(1. - proba_bin)
         llk = llk.masked_fill(X_bin.isnan(), 0.)
         nll += -torch.nansum(llk) / llk.size(-1)
     return -nll
Exemplo n.º 13
0
 def mean_average_precision(self, y_que, X_que, y_pool, X_pool):
     if self.rank == None:
         raise ("rank function is not provided.")
     n_que = len(y_que)
     n_pool = len(y_pool)
     ap = th.zeros(n_que, device=self.device)
     for i in range(n_que):
         y = y_que[i]
         ranks = self.rank(X_que[i], X_pool)
         rel = y_pool[ranks] == y
         pre_k = th.cumsum(rel, dim=0) / th.arange(
             1, n_pool + 1, device=self.device)
         ap[i] = th.divide(th.sum(pre_k * rel), th.sum(rel))
     return th.nansum(ap) / (n_que - th.sum(th.isnan(ap)))
Exemplo n.º 14
0
 def area_under_the_curve(self, y_que, X_que, y_pool, X_pool):
     if self.rank == None:
         raise ("rank function is not provided.")
     n_que = len(y_que)
     auc = th.zeros(n_que, device=self.device)
     for i in range(n_que):
         y = y_que[i]
         ranks = self.rank(X_que[i], X_pool)
         y_count = th.sum(y_pool == y)
         swapped_pairs = th.sum(
             (y_pool[ranks] != y) *
             (y_count - th.cumsum(y_pool[ranks] == y, dim=0)))
         auc[i] = 1 - swapped_pairs / (y_count * (len(y_pool) - y_count))
     return th.nansum(auc) / (n_que - th.sum(th.isnan(auc)))
Exemplo n.º 15
0
 def _cdist_nb(self, x, cutoff=9.0, mask=False):
     dmat = torch.cdist(x.permute(0, 2, 1), x.permute(0, 2, 1))
     LJp = self.vdw_A / (self._warp_domain(dmat, 1.9)**12)
     Cp = (self.q1q2 / self._warp_domain(dmat, 0.4))
     return torch.nansum(LJp + Cp)
Exemplo n.º 16
0
 def product(self, dim, keepdim=False):
     return Gaussian(
         torch.nansum(self._precision, dim, keepdim=keepdim),
         torch.nansum(self._mean_times_precision, dim, keepdim=keepdim)
     )
Exemplo n.º 17
0
 def compute(self) -> torch.Tensor:
     avg_precision = torch.Tensor(super().compute())
     total = torch.nansum(avg_precision)
     count = (~torch.isnan(avg_precision)).sum()
     return total / count if count != 0 else torch.Tensor(
         [0.], device=avg_precision.device)
Exemplo n.º 18
0
    print(
        torch.bernoulli(a)
    )  # draw binary random numbers (0 or 1) from a Bernoulli distribution
    weights = torch.tensor([0, 10, 3, 0],
                           dtype=torch.float)  # create a tensor of weights
    print(torch.multinomial(weights, 2))

    x = torch.randn(3)
    print(x)
    print(torch.mean(x))
    print(torch.sum(x))
    print(torch.median(x))
    # print(torch.nanmedian(x))  # ignoring NaN values
    print(torch.min(x))
    print(torch.max(x))
    print(torch.mode(x))
    print(torch.std(x))
    print(torch.var(x))
    print(torch.quantile(x, 0.1))
    print(x.nanquantile(0.1))
    print(torch.nansum(x))  # treating NaN as zero
    print(torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long)))

    # count the frequency of each value in an array of non-negative int
    print(torch.bincount(torch.randint(0, 8, (5, ), dtype=torch.int64)))
    print(torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3))

    x = torch.zeros(3, 3)
    x[torch.randn(3, 3) > 0.5] = 1
    print(torch.count_nonzero(x))
Exemplo n.º 19
0
 def elbo(self, mean, var, y):
     y_masked = y.masked_fill(y.isnan(), 0.)
     t = torch.sqrt(mean**2 + var)
     elbo = nn.LogSigmoid()(t) + (y_masked - 0.5) * mean - 0.5 * t
     elbo.masked_fill(y.isnan(), 0.)
     return torch.nansum(elbo)
Exemplo n.º 20
0
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        relevance_score=None,
        labels=None,
        mlm_mask=None,
        output_attentions=None,
        output_hidden_states=None,
        return_dict=None,
    ):
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        (
            flattened_input_ids,
            flattened_attention_mask,
            flattened_token_type_ids,
        ) = self._flatten_inputs(input_ids, attention_mask, token_type_ids)

        joint_outputs = self.realm(
            flattened_input_ids,
            attention_mask=flattened_attention_mask,
            token_type_ids=flattened_token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # [batch_size * num_candidates, joint_seq_len, d_model]
        joint_output = joint_outputs[0]
        # [batch_size * num_candidates, joint_seq_len, s_vocab]
        prediction_scores = self.cls(joint_output)
        # [batch_size, num_candidates]
        candidate_score = relevance_score

        masked_lm_loss = None
        if labels is not None:
            if candidate_score is None:
                raise ValueError(
                    "You have to specify `relevance_score` when `labels` is specified in order to compute loss."
                )

            batch_size, seq_length = labels.size()

            if mlm_mask is None:
                mlm_mask = torch.ones_like(labels, dtype=torch.float32)
            else:
                mlm_mask = mlm_mask.type(torch.float32)

            # Compute marginal log-likelihood
            loss_fct = CrossEntropyLoss(
                reduction="none")  # -100 index = padding token

            # [batch_size * num_candidates * joint_seq_len, s_vocab]
            mlm_logits = prediction_scores.view(-1, self.config.s_vocab)
            # [batch_size * num_candidates * joint_seq_len]
            mlm_targets = labels.tile(1, self.config.num_candidates).view(-1)
            # [batch_size, num_candidates, joint_seq_len]
            masked_lm_log_prob = -loss_fct(mlm_logits, mlm_targets).view(
                batch_size, self.config.num_candidates, seq_length)
            # [batch_size, num_candidates, 1]
            candidate_log_prob = candidate_score.log_softmax(-1).unsqueeze(-1)
            # [batch_size, num_candidates, joint_seq_len]
            joint_gold_log_prob = candidate_log_prob + masked_lm_log_prob
            # [batch_size, joint_seq_len]
            marginal_gold_log_probs = joint_gold_log_prob.logsumexp(1)
            # []
            masked_lm_loss = -torch.nansum(
                torch.sum(marginal_gold_log_probs * mlm_mask) /
                torch.sum(mlm_mask))

        if not return_dict:
            output = (prediction_scores, ) + joint_outputs[2:4]
            return ((masked_lm_loss, ) +
                    output) if masked_lm_loss is not None else output

        return MaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hiddens=joint_outputs.hiddens,
            attns=joint_outputs.attns,
        )