def getClassWeight(targets, maxClasses: int = None):
    elements, counts = torch.unique(targets, sorted=True, return_counts=True)
    if maxClasses:
        assert maxClasses > int(max(elements)), f"Found more label classes than given maxClasses={maxClasses}"
    else:
        maxClasses = int(max(elements)) + 1

    countPerClass = torch.tensor([0 for _ in range(maxClasses)], dtype=torch.float)
    for idx, count in zip(elements, counts):
        countPerClass[idx] = count

    return torch.reciprocal(countPerClass)
Exemplo n.º 2
0
    def get_MD_LogLikelihood(self, MD_params, target):
        if target.ndim == 1:
            target = target.unsqueeze(dim=-1)
        u, v, p = MD_params[0], MD_params[1], MD_params[2]
        d = u - torch.cat([target] * self.components_size, dim=-1)
        logLikelihoods = -d * d * torch.reciprocal(
            2.0 * v) - 0.5 * torch.log(v) + p

        # normalizing constant
        logLikelihoods -= 0.39908993417  # -log(1/sqrt(2pi))

        return torch.logsumexp(logLikelihoods, dim=-1)  # size: [N]
def calculate_mrr(ind, y):

    y = y.view(-1, 1)
    y = y.expand_as(ind)

    match = (y == ind)
    match = match.nonzero()

    ranks = (match[:, -1] + 1).float()
    ranks = torch.reciprocal(ranks)

    return (torch.sum(ranks).data / y.size(0)).item()
Exemplo n.º 4
0
def compute_sumrr(self, probs, target):
    '''

    :param probs: batch_size*tagset_size
    :param target: batch_size*1
    :return: sum of reciprocal rank
    '''
    prob_target = torch.gather(probs.data, 1, target.data.view(-1, 1))
    comp = torch.gt(probs.data, prob_target)
    rank = torch.add(comp.float().sum(dim=1), 1)
    rr = torch.sum(torch.reciprocal(rank))
    return rr
Exemplo n.º 5
0
def meancurvature(pos, faces):
  if pos.shape[-1] != 3:
    raise ValueError("Vertices positions must have shape [n,3]")

  if faces.shape[-1] != 3:
    raise ValueError("Face indices must have shape [m,3]") 

  n = pos.shape[0]
  stiff, mass = laplacebeltrami_FEM_v2(pos, faces)
  ai, av = mass
  mcf = tsparse.spmm(ai, torch.reciprocal(av), n, n, tsparse.spmm(*stiff, n, n, pos))
  return mcf.norm(dim=-1, p=2), stiff, mass
Exemplo n.º 6
0
def kl_ind_mv(mvg0,indep0):
    delta_mean =mvg0.mean-indep0.mean
    indep_recip =torch.reciprocal(indep0.variance)
    mahlab_term= torch.sum(torch.mul(torch.mul(delta_mean,indep_recip),delta_mean),dim=-1)

    dimension = mvg0.event_shape[0]
    logdet_gauss = torch.logdet(mvg0.covariance_matrix)
    log_det_ind= torch.sum(torch.log(indep0.variance),dim=-1)
    partial_term =log_det_ind-logdet_gauss-dimension
    first_term =torch.sum(torch.diagonal(torch.mul(torch.unsqueeze(indep_recip,dim=-2),mvg0.covariance_matrix),dim1=-2,dim2=-1),dim=-1)
    kl_score =0.5*(partial_term+mahlab_term + first_term)
    return kl_score
Exemplo n.º 7
0
    def kullback_leibler_similarity(
        mu_e: torch.FloatTensor,
        mu_r: torch.FloatTensor,
        sigma_e: torch.FloatTensor,
        sigma_r: torch.FloatTensor,
        epsilon: float = 1.0e-10,
    ) -> torch.FloatTensor:
        r"""Compute the similarity based on KL divergence.

        This is done between two Gaussian distributions given by mean mu_* and diagonal covariance matrix sigma_*.

        .. math::

            D((\mu_e, \Sigma_e), (\mu_r, \Sigma_r)))
            = \frac{1}{2} \left(
                tr(\Sigma_r^{-1}\Sigma_e)
                + (\mu_r - \mu_e)^T\Sigma_r^{-1}(\mu_r - \mu_e)
                - \log \frac{det(\Sigma_e)}{det(\Sigma_r)} - k_e
            \right)

        Note: The sign of the function has been flipped as opposed to the description in the paper, as the
              Kullback Leibler divergence is large if the distributions are dissimilar.

        :param mu_e: torch.Tensor, shape: (s_1, ..., s_k, d)
            The mean of the first Gaussian.
        :param mu_r: torch.Tensor, shape: (s_1, ..., s_k, d)
            The mean of the second Gaussian.
        :param sigma_e: torch.Tensor, shape: (s_1, ..., s_k, d)
            The diagonal covariance matrix of the first Gaussian.
        :param sigma_r: torch.Tensor, shape: (s_1, ..., s_k, d)
            The diagonal covariance matrix of the second Gaussian.
        :param epsilon: float (default=1.0)
            Small constant used to avoid numerical issues when dividing.

        :return: torch.Tensor, shape: (s_1, ..., s_k)
            The similarity.
        """
        d = mu_e.shape[-1]
        safe_sigma_r = torch.clamp_min(sigma_r, min=epsilon)
        sigma_r_inv = torch.reciprocal(safe_sigma_r)

        #: a = tr(\Sigma_r^{-1}\Sigma_e)
        a = torch.sum(sigma_e * sigma_r_inv, dim=-1)

        #: b = (\mu_r - \mu_e)^T\Sigma_r^{-1}(\mu_r - \mu_e)
        mu = mu_r - mu_e
        b = torch.sum(sigma_r_inv * mu**2, dim=-1)

        #: c = \log \frac{det(\Sigma_e)}{det(\Sigma_r)}
        # = sum log (sigma_e)_i - sum log (sigma_r)_i
        c = sigma_e.clamp_min(min=epsilon).log().sum(
            dim=-1) - safe_sigma_r.log().sum(dim=-1)
        return -0.5 * (a + b - c - d)
Exemplo n.º 8
0
def getMrr(indices, targets):
    """
    Calculates the MRR score for the given predictions and targets
    """
    tmp = targets.view(-1, 1)
    targets = tmp.expand_as(indices)
    hits = (targets == indices).nonzero()
    ranks = hits[:, -1] + 1
    ranks = ranks.float()
    rranks = torch.reciprocal(ranks)
    mrr = torch.sum(rranks).data / targets.size(0)
    return mrr
Exemplo n.º 9
0
def reciprocal(x: T.FloatTensor) -> T.FloatTensor:
    """
    Elementwise inverse of a tensor.

    Args:
        x (non-zero): A tensor:

    Returns:
        tensor: Elementwise inverse.

    """
    return torch.reciprocal(x)
Exemplo n.º 10
0
    def get_likelihood(self, e, ro, pi, mu, sigma, y_true):
        mu_shp = mu.shape
        # import pdb; pdb.set_trace()
        mu = mu.unsqueeze(3).reshape(mu_shp[0], mu_shp[1], mu_shp[2]//self.num_mixtures , self.num_mixtures)
        
        mu_x = mu[:,:,0,:]
        mu_y = mu[:,:,1,:] 
        
        # import pdb; pdb.set_trace()
        sigma = sigma.unsqueeze(3).reshape(mu_shp[0], mu_shp[1], mu_shp[2]//self.num_mixtures , self.num_mixtures)
        sigma_x = sigma[:,:,0,:]
        sigma_y = sigma[:,:,1,:]


        # import pdb; pdb.set_trace()
        reci_sigma_xy = torch.reciprocal(sigma_x * sigma_y)
        reci_sigma_xx = torch.reciprocal(sigma_x * sigma_x)
        reci_sigma_yy = torch.reciprocal(sigma_y * sigma_y)
        
        # import pdb; pdb.set_trace()
        cood_x = y_true[:,:,1:2].float()
        cood_y = y_true[:,:,2:].float()
        # import pdb; pdb.set_trace()
        exp_denom_ro = torch.reciprocal(2 * (1 - ro * ro))
        # import pdb; pdb.set_trace()
        norm_denom_ro = torch.reciprocal(2 * np.pi * torch.sqrt(1-ro*ro)) * reci_sigma_xy
        # import pdb; pdb.set_trace()
        z_xx = (cood_x - mu_x) * (cood_x - mu_x) * reci_sigma_xx
        z_yy = (cood_y - mu_y) * (cood_y - mu_y) * reci_sigma_yy
        z_xy = -2 * ro * (cood_y - mu_y) * (cood_x - mu_x) * reci_sigma_xy
        # import pdb; pdb.set_trace()
        z = z_xx + z_yy + z_xy
        # import pdb; pdb.set_trace()
        exp_term = torch.exp(-1 * z * exp_denom_ro)
        N = torch.sum(norm_denom_ro * exp_term * pi, dim = 2)
        # import pdb; pdb.set_trace()
        N = -1 * torch.log(N + 1e-20)
        # import pdb; pdb.set_trace()

        return N
Exemplo n.º 11
0
    def __init__(
        self,
        include_background: bool = True,
        to_onehot_y: bool = False,
        sigmoid: bool = False,
        softmax: bool = False,
        other_act: Optional[Callable] = None,
        w_type: Union[Weight, str] = Weight.SQUARE,
        reduction: Union[LossReduction, str] = LossReduction.MEAN,
    ) -> None:
        """
        Args:
            include_background: If False channel index 0 (background category) is excluded from the calculation.
            to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
            sigmoid: If True, apply a sigmoid function to the prediction.
            softmax: If True, apply a softmax function to the prediction.
            other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
                other activation layers, Defaults to ``None``. for example:
                `other_act = torch.tanh`.
            squared_pred: use squared versions of targets and predictions in the denominator or not.
            w_type: {``"square"``, ``"simple"``, ``"uniform"``}
                Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
            reduction: {``"none"``, ``"mean"``, ``"sum"``}
                Specifies the reduction to apply to the output. Defaults to ``"mean"``.

                - ``"none"``: no reduction will be applied.
                - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
                - ``"sum"``: the output will be summed.

        Raises:
            TypeError: When ``other_act`` is not an ``Optional[Callable]``.
            ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
                Incompatible values.

        """
        super().__init__(reduction=LossReduction(reduction).value)
        if other_act is not None and not callable(other_act):
            raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
        if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
            raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
        self.include_background = include_background
        self.to_onehot_y = to_onehot_y
        self.sigmoid = sigmoid
        self.softmax = softmax
        self.other_act = other_act

        w_type = Weight(w_type)
        self.w_func: Callable = torch.ones_like
        if w_type == Weight.SIMPLE:
            self.w_func = torch.reciprocal
        elif w_type == Weight.SQUARE:
            self.w_func = lambda x: torch.reciprocal(x * x)
Exemplo n.º 12
0
    def mean(self):
        """
        The mean of of the Amoroso distribution exists for `alpha + 1/beta >= 0`.
        It can be computed analytically by

        ```
        mean = a + theta * gamma(alpha + 1/beta) / gamma(alpha)
        ```
        """
        a, theta, alpha, beta = self.a, self.theta, self.alpha, self.beta
        return a + torch.exp(
            torch.log(theta) + torch.lgamma(alpha + torch.reciprocal(beta)) -
            torch.lgamma(alpha))
Exemplo n.º 13
0
    def _init_layers(self, x: torch.Tensor, mask_len: int,
                     k: int) -> torch.Tensor:
        noise = torch.rand(*x.shape, dtype=x.dtype, device=x.device) * self.eps
        corrs = torch.sum(torch.abs(corrcoef((x + noise).transpose(1, 0))),
                          dim=1)
        scores = torch.reciprocal(corrs)

        if self.scaling:
            scores = (4.0 * scores / torch.max(scores)
                      )  # TODO: remove hard coding or annotate
        self.layer_correlation = torch.nn.functional.softmax(scores, dim=-1)

        if self.ht_norm:
            # Horvitz-Thopson normalization (1 / marginal_prob for each element)
            probabilities = self.layer_correlation
            samples = max(1000, 4 * x.size(-1))  # TODO: why? (explain 1000)
            self.norm = torch.reciprocal(
                mc_probability(probabilities, k, samples))

        # Initially we should pass identity mask,
        # otherwise we won't get right correlations for all layers
        return torch.ones(mask_len, dtype=x.dtype, device=x.device)
Exemplo n.º 14
0
    def forward(self, frames):

        _, (x, _) = self.lstm(frames)  # lstm out,hidden,
        # x = x[:, -1]  #last layer -> embeds

        x = self.linear(x[-1])
        x = self.relu(x)
        # x = torch.mean(x,dim=1)
        # x = self.relu(x)

        x = x * torch.reciprocal(torch.norm(x, dim=1, keepdim=True))

        return x
Exemplo n.º 15
0
    def mdnloss(pi, mu, var, target):
        quad = torch.pow(target.expand_as(mu) - mu,
                         2) * torch.reciprocal(var + VAR_EPS) * -0.5
        logdet = torch.log(var + VAR_EPS) * -0.5
        # logconstant = torch.log(2*np.pi) * -0.5
        logpi = torch.log(pi)
        exponents = quad + logdet + logpi

        logprobs = torch.logsumexp(exponents, 1)
        gmm_prob = torch.exp(logprobs)
        gmm_nll = -torch.mean(logprobs)

        return gmm_nll
Exemplo n.º 16
0
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -(x - mean) * (x - mean)
    log_norm *= torch.reciprocal(2. * log_var.exp())
    log_norm += -0.5 * log_var
    log_norm += -0.5 * torch.log(2. * PI)

    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm
Exemplo n.º 17
0
    def _get_weights(ref, trg):
        with torch.no_grad():
            mean_trg = normalize(trg.mean(dim=2, keepdim=True), dim=1, p=2)
            weights = torch.sum(ref * mean_trg, dim=1).clamp_min(0.0)

            sum_weights = torch.sum(weights, dim=1, keepdim=True)
            scales = torch.where(sum_weights > 0.0,
                                 torch.reciprocal(sum_weights),
                                 torch.ones_like(sum_weights))
            num_nodes = weights.size(1)
            norm_weights = (float(num_nodes) * scales) * weights

        return norm_weights
Exemplo n.º 18
0
def th_mdn_loss_ind(gt, mu, sigma, pi, mask, V, C=17, **kwargs):
    BS = gt.shape[0]
    M = pi.shape[-1]
    I = gt.shape[1]
    lmask = (torch.sum(mask, 2) > 0).float()

    mask = torch.reshape(mask[:, :, np.repeat(np.arange(C), 2)],
                         (BS, I, 1, C * 2))
    mask = mask.repeat(1, 1, M, 1)

    gt = gt.reshape(BS, I, 1, 2 * C)
    gt = gt.repeat(1, 1, M, 1)

    mu = mu.reshape(BS, I, M, 2 * C)
    V = torch.reshape(V[np.repeat(np.arange(C), 2)], (1, 1, 1, C * 2))
    sigma = torch.reshape(
        sigma,
        (BS, I, M, 2))[:, :, :,
                       np.concatenate([np.arange(2) for _ in np.arange(C)])]

    e = .5 * ((gt - mu) * torch.reciprocal(sigma) * torch.reciprocal(V))**2
    e = torch.where(mask > .0, e, torch.zeros_like(e))
    e = torch.sum(e, -1)

    nviskps = torch.sum(mask[:, :, :, 0::2] > .0, -1).float().detach()
    sigma_y = sigma[:, :, :, 0]
    sigma_x = sigma[:, :, :, 1]
    PI = torch.tensor(np.pi).cuda()
    coef = -nviskps * torch.log(sigma_y) - nviskps * torch.log(
        sigma_x) - nviskps * torch.log(2 * PI)
    exponent = torch.log(pi) + coef - e
    loss = -torch.squeeze(
        log_sum_exp(exponent, 2, mdn_max=kwargs.get("mdn_max", False)), 2)
    _loss = torch.sum(lmask * loss, dim=1).detach()
    loss = torch.sum(lmask * loss)
    loss = loss / (1. + torch.sum(lmask))
    _loss = _loss / (1. + torch.sum(lmask))

    return loss, _loss
Exemplo n.º 19
0
 def forward(self, x, sldj, reverse=False):
     if not reverse:
         y = torch.sigmoid(x)
         ldj = -F.softplus(x) - F.softplus(-x)
         ldj = ldj.flatten(1).sum(-1)
         sldj = sldj + ldj
         return y, sldj
     else:
         y = -(torch.reciprocal(x) - 1.).log()
         ldj = -x.log() - (1. - x).log()
         ldj = ldj.flatten(1).sum(-1)
         sldj = sldj + ldj
         return y, sldj
Exemplo n.º 20
0
def evaluate(args, model, f_dataset, src_idxs):
    # parepare dataloader
    args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
    # Note that DistributedSampler samples randomly
    eval_sampler = SequentialSampler(f_dataset)
    eval_dataloader = DataLoader(f_dataset,
                                 sampler=eval_sampler,
                                 batch_size=args.eval_batch_size)

    # Evaluate!
    logger.info("***** Running evaluation on %s *****", args.tgt_lang)
    logger.info("  Num of examples = %d", len(f_dataset))
    logger.info("  Instantaneous batch size per GPU = %d",
                args.per_gpu_eval_batch_size)
    logger.info("  GPU IDs for training: %s",
                " ".join([str(id) for id in args.gpu_ids]))
    logger.info("  Batch size = %d", args.eval_batch_size)

    logits = None
    model.eval()
    for batch in eval_dataloader:
        with torch.no_grad():
            outputs = model(batch[0], device=args.device)  # loss, logits
            logits_batch = outputs[0]  # batch_size x n_langs

        logits = logits_batch.detach() if logits is None else torch.cat(
            (logits, logits_batch.detach()), dim=0)

    logits = logits[:, src_idxs]

    if args.tau_metric == "var":
        tau = torch.var(logits)
    elif args.tau_metric == "std":
        tau = torch.std(logits)
    else:
        assert False
    tau = torch.reciprocal(tau)
    logger.info("==> tau: {}".format(tau))

    logits *= tau

    sims = torch.nn.functional.softmax(logits,
                                       dim=-1)  # dataset_len x n_src_langs

    dm_sims = torch.mean(sims, dim=0)  # n_src_langs

    logger.info("  Domain similarities:")
    logger.info("  " + "\t".join(args.src_langs))
    logger.info("  " + "\t".join([str(round(v.item(), 4)) for v in dm_sims]))

    return sims, dm_sims
Exemplo n.º 21
0
def mdn_loss_fn(pi, sigma, mu, y, avg=True):
    minsigma = sigma.min().item()
    assert minsigma >= 0, f'{minsigma} < 0'
    c = mu.size(2)
    result = (y.unsqueeze(1).expand_as(mu) - mu) * torch.reciprocal(sigma)
    result = 0.5 * torch.sum(result * result, 2)
    result -= torch.log(pi)
    result += 0.5 * c * math.log(2 * math.pi)
    result += torch.sum(torch.log(sigma), 2)
    result = -result
    result = -log_sum_exp(result, dim=1)
    if avg:
        result = torch.mean(result)
    return result
Exemplo n.º 22
0
 def forward(self, input, target):
     lab_mask = self.lab_mask.type('torch.ByteTensor')
     output = to_variable(torch.masked_select(target, lab_mask))
     _assert_no_grad(output)
     prediction_t = torch.transpose(input, 1, 2)
     assign_variable = to_variable(self.Assign, require_grad=False)
     phone_out = torch.bmm(assign_variable, prediction_t)
     n, p = self.assignment.size()
     hot_sum = torch.reciprocal(torch.max(self.hot_sum, torch.ones(self.hot_sum.size())))
     hot_sum = hot_sum.view(n, self.assignment.max(), 1)
     hot_sum_3d = to_variable(hot_sum.expand(phone_out.size()), require_grad=False)
     phone_out = phone_out * hot_sum_3d
     phone_view_nonzero = phone_out.view(-1, 46)[lab_mask.view(-1).nonzero(), :].view(-1, 46)
     return F.cross_entropy(phone_view_nonzero, output, size_average=self.size_average)
Exemplo n.º 23
0
def _js_normal_normal(p, q, alpha=0.5):
    if len(p.batch_shape) > 2:
        cum_size = torch.cumprod(torch.tensor(p.batch_shape[:-1]), 0)[-1]
        p = p.view(cum_size, p.batch_shape[-1])
    if len(q.batch_shape) > 2:
        cum_size = torch.cumprod(torch.tensor(q.batch_shape[:-1]), 0)[-1]
        q = q.view(cum_size, q.batch_shape[-1])
    original_shape = p.batch_shape
    harmonic_std = torch.reciprocal((1 - alpha) *
                                    torch.reciprocal(p.variance) +
                                    alpha * torch.reciprocal(q.variance))
    harmonic_mean = torch.bmm(
        harmonic_std.diag_embed(),
        torch.bmm(
            (1 - alpha) *
            (p.variance.reciprocal()).diag_embed(), p.mean.unsqueeze(-1)) +
        alpha * torch.bmm(
            (q.variance.reciprocal()).diag_embed(), q.mean.unsqueeze(-1))
    ).squeeze(-1)
    harmonic_dist = Normal(harmonic_mean, torch.sqrt(harmonic_std))
    div = (1 - alpha) * kl.kl_divergence(
        p, harmonic_dist) + alpha * kl.kl_divergence(q, harmonic_dist)
    return div.view(*original_shape)
Exemplo n.º 24
0
def interpolate(p, q, x, k):
    batch_size = x.shape[0]
    in_channels = x.shape[1]
    sqdist, index = knn(p, q, k)
    sqdist = torch.clamp(sqdist, min=1e-10)
    weight = torch.reciprocal(sqdist)
    weight = weight / torch.sum(weight, dim=1, keepdim=True)
    weight = weight.unsqueeze(1)
    index = index.view(batch_size, -1)
    index = index.unsqueeze(1).expand(-1, in_channels, -1)
    x = torch.gather(x, 2, index)
    x = x.view(batch_size, in_channels, k, -1)
    x = torch.sum(x * weight, dim=2)
    return x
Exemplo n.º 25
0
    def forward(self, x, is_training=False):
        f = self.extractor(x)

        h = f[3]  # bs 2048 w/32 h/32

        g = self.unpool1(h)  # bs 2048 w/16 h/16
        c = self.conv1(torch.cat((g, f[2]), 1))
        c = self.bn1(c)
        c = self.relu1(c)

        h = self.conv2(c)
        h = self.bn2(h)  # bs 128 w/16 h/16
        h = self.relu2(h)

        g = self.unpool2(h)  # bs 128 w/8 h/8
        c = self.conv3(torch.cat((g, f[1]), 1))
        c = self.bn3(c)
        c = self.relu3(c)

        h = self.conv4(c)
        h = self.bn4(h)  # bs 64 w/8 h/8
        h = self.relu4(h)

        g = self.unpool3(h)  # bs 64 w/4 h/4
        c = self.conv5(torch.cat((g, f[0]), 1))
        c = self.bn5(c)
        c = self.relu5(c)

        h = self.conv6(c)  # bs 32 w/4 h/4
        h = self.bn6(h)
        h = self.relu6(h)

        g = self.conv7(h)
        g = self.bn7(g)  # bs 32 w/4 h/4
        g = self.relu7(g)

        inside_score = self.sigmod(self.conv8(g))

        side_v_code = self.sigmod(self.conv9(g))

        side_v_coord = self.conv10(g)
        if is_training:
            thresh_binary = torch.reciprocal(
                1 + torch.exp(-50 * (inside_score - 0.5)))  # DB公式
            east_detect = torch.cat(
                (inside_score, side_v_code, side_v_coord, thresh_binary), 1)
        else:
            east_detect = torch.cat((inside_score, side_v_code, side_v_coord),
                                    1)
        return east_detect
Exemplo n.º 26
0
def test_reciprocal(method, get_clients) -> None:
    clients = get_clients(2)
    session_one = Session(parties=clients)
    SessionManager.setup_mpc(session_one)

    x_secret = torch.Tensor([-2.0, 6.0, 2.0, 3.0, -5.0, -0.5])

    x = MPCTensor(secret=x_secret, session=session_one)
    x_secret_reciprocal = torch.reciprocal(x_secret)

    x_reciprocal = reciprocal(x, method=method)
    assert torch.allclose(x_secret_reciprocal,
                          x_reciprocal.reconstruct(),
                          atol=1e-1)
    def forward(self, depth, K, real_cam_height):
        inv_K = torch.inverse(K)

        cam_points = self.backproject_depth(depth, inv_K)
        surface_normal = self.get_surface_normal(cam_points)
        ground_mask = self.get_ground_mask(cam_points, surface_normal)

        cam_heights = (cam_points[:, :-1, :, :] *
                       surface_normal).sum(1).abs().unsqueeze(1)
        cam_heights_masked = torch.masked_select(cam_heights, ground_mask)
        cam_height = torch.median(cam_heights_masked).unsqueeze(0)
        scale = torch.reciprocal(cam_height).mul_(real_cam_height)

        return scale
Exemplo n.º 28
0
 def invert(self,
            add: Union[float, list, tuple] = 0.,
            multiply: Union[float, list, tuple] = 1.):
     assert self.state, "State dict is empty. Did you call 'update' prior to this?"
     if self.inv_state:
         Warning("State has already been inverted. Is this expected?")
     for index, (layer, value) in enumerate(self.state.items()):
         if not isinstance(add, float) and not isinstance(multiply, float):
             assert len(add) == len(multiply) == len(self.state)
             n, s = add[index], multiply[index]
         else:
             n, s = add, multiply
         reg_inv_lambda = torch.reciprocal(s * value + n).sqrt()
         self.inv_state[layer] = reg_inv_lambda
Exemplo n.º 29
0
def aten_reciprocal(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.RECIP)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        raise NotImplementedError

    return [torch.reciprocal(inp)]
Exemplo n.º 30
0
    def forward(self, x, res1, res2):
        if self.up_factor > 1:
            x = func.interpolate(x, scale_factor=self.up_factor)
            res2 = func.interpolate(res2, scale_factor=self.up_factor)
        x = self.deconv_1(x)
        x = torch.cat((x, res2), dim=1)
        x = self.deconv_2(x)
        x = torch.cat((x, res1), dim=1)
        map = self.deconv_3(x)

        if self.inv_output:
            map = torch.reciprocal(map.clamp(min=1e-8)) - 1

        return map