Esempio n. 1
0
def iCDF_CTG(x, lmda, std=1):
    coeff = 1 / (torch.erfc(lmda / (std * torch.sqrt(torch.tensor([2.])))))
    val = 0.5 * torch.erfc(lmda / (std * torch.sqrt(torch.tensor([2.]))))
    y = torch.zeros_like(x)
    y[x < val] = (coeff) * iCDF_G(x[x < val], std=std)
    y[x > 1 - val] = (coeff) * iCDF_G(x[x > 1 - val], std=std)

    return y
 def aux2aux(sig, xx):
     t1 = 1 / (4 * sig**2) - xx / sig + torch.log(
         torch.erfc(1 / (2 * sig) - xx))
     t2 = 1 / (4 * sig**2) + xx / sig + torch.log(
         torch.erfc(1 / (2 * sig) + xx))
     res = torch.exp(t1) + torch.exp(t2)
     res *= 0.5
     return res
 def aux2aux(sigma, x, y):
     x, y = torch.min(x, y), torch.max(x, y)
     t1 = -(x + y) / sigma + 1 / sigma**2 + torch.log(
         torch.erfc(1 / sigma - x))
     t2 = (x - y) / sigma + torch.log(torch.erf(y) - torch.erf(x))
     t3 = (x + y) / sigma + 1 / sigma**2 + torch.log(
         torch.erfc(1 / sigma + y))
     res = torch.exp(t1) + torch.exp(t2) + torch.exp(t3)
     res *= 0.5
     return res
 def func_asym_pos_inf(self, x: Tensor) -> Tensor:
     y = math.pow(math.sqrt(math.pi) / 2, 3) * torch.exp(torch.pow(x, 2))
     if x.is_cuda:
         if x.numel() > mnn_config.get_value('cpu_or_gpu'):
             y.mul_(torch.pow(torch.erfc(-x), 2) * self.dawson1.erfi(x))
         else:
             device = y.device
             y.mul_(
                 torch.pow(torch.erfc(-x), 2) * torch.from_numpy(
                     scipy.erfi(x.cpu().numpy())).to(device=device))
     else:
         y.mul_(
             torch.pow(torch.erfc(-x), 2) *
             torch.from_numpy(scipy.erfi(x.numpy())))
     return y
Esempio n. 5
0
    def normal_cdf(self, x, mu=0.0, sigma=1.0):
        """
        The cumulative distribution function for the Normal distribution

        Example:

            >>> import pyhf
            >>> pyhf.set_backend("pytorch")
            >>> pyhf.tensorlib.normal_cdf(0.8)
            tensor(0.7881)
            >>> values = pyhf.tensorlib.astensor([0.8, 2.0])
            >>> pyhf.tensorlib.normal_cdf(values)
            tensor([0.7881, 0.9772])

        Args:
            x (:obj:`tensor` or :obj:`float`): The observed value of the random variable to evaluate the CDF for
            mu (:obj:`tensor` or :obj:`float`): The mean of the Normal distribution
            sigma (:obj:`tensor` or :obj:`float`): The standard deviation of the Normal distribution

        Returns:
            PyTorch FloatTensor: The CDF
        """
        # the implementation of torch.Normal.cdf uses torch.erf:
        # 0.5 * (1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2)))
        # (see https://github.com/pytorch/pytorch/blob/3bbedb34b9b316729a27e793d94488b574e1577a/torch/distributions/normal.py#L78-L81)
        # we get a more numerically stable variant for low p-values/high significances using erfc(x) := 1 - erf(x)
        # since erf(-x) = -erf(x) we can replace
        # 1 + erf(x) = 1 - erf(-x) = 1 - (1 - erfc(-x)) = erfc(-x)
        mu, sigma = broadcast_all(mu, sigma)
        return 0.5 * torch.erfc(-((x - mu) * sigma.reciprocal() / math.sqrt(2)))
Esempio n. 6
0
def generate_CTG(shape, lmda, std=1):
    "Generates complementary truncated Gaussian samples"
    val = 0.5 * torch.erfc(lmda / (std * torch.sqrt(torch.tensor([2.]))))
    x = torch.rand(shape) * val
    x[torch.rand(shape) < 0.5] += 1 - val
    y = iCDF_CTG(x, lmda, std)
    return y
def get_expected_brownian_bridge_max_starting_from_0_and_ending_at_or_above_0(
        end_time, end_point):
    # This is derived from https://www.researchgate.net/publication/236984395_On_the_maximum_of_the_generalized_Brownian_bridge eq. (2.7)
    # to get the PDF and then integrating to get EV
    return end_point + \
           1/2 * torch.sqrt(np.pi / 2 * end_time) * torch.exp(end_point ** 2 / (2 * end_time)) \
           * torch.erfc(end_point / torch.sqrt(2 * end_time))
Esempio n. 8
0
def burgers_delta(x: torch.tensor, t: torch.tensor, v: float, A: float):
    """Function to load the analytical solutions of Burgers equation with delta peak initial condition: u(x, 0) = A delta(x)

    Source: https://www.iist.ac.in/sites/default/files/people/IN08026/Burgers_equation_viscous.pdf
    Note that this source has an error in the erfc prefactor, should be sqrt(pi)/2, not sqrt(pi/2).

    Args:
        x ([Tensor]): Input vector of spatial coordinates.
        t ([Tensor]): Input vector of temporal coordinates.
        v (Float): Velocity.
        A (Float): Amplitude of the initial condition.

    Returns:
        [Tensor]: solution.
    """
    x, t = torch.meshgrid(x, t)
    R = torch.tensor(A / (2 * v))  # otherwise throws error
    z = x / torch.sqrt(4 * v * t)

    u = (
        torch.sqrt(v / (pi * t))
        * ((torch.exp(R) - 1) * torch.exp(-(z ** 2)))
        / (1 + (torch.exp(R) - 1) / 2 * torch.erfc(z))
    )
    coords = torch.cat((t.reshape(-1, 1), x.reshape(-1, 1)), dim=1)
    return coords, u.view(-1, 1)
Esempio n. 9
0
def get_expected_rank(batch_mus,
                      batch_vars,
                      batch_cocos=None,
                      return_pairsub_paras=False,
                      return_cdf=False):
    if batch_cocos is None:
        batch_pairsub_mus, batch_pairsub_vars = get_diff_normal(
            batch_mus=batch_mus, batch_vars=batch_vars)
    else:
        batch_pairsub_mus, batch_pairsub_vars = get_diff_normal(
            batch_mus=batch_mus,
            batch_vars=batch_vars,
            batch_cocos=batch_cocos)
    ''' expected ranks '''
    # \Phi(0)$
    batch_Phi0 = 0.5 * torch.erfc(
        batch_pairsub_mus / torch.sqrt(2 * batch_pairsub_vars))
    # remove diagonal entries
    batch_Phi0_subdiag = torch.triu(batch_Phi0, diagonal=1) + torch.tril(
        batch_Phi0, diagonal=-1)
    batch_expt_ranks = torch.sum(batch_Phi0_subdiag, dim=2) + 1.0

    if return_pairsub_paras:
        return batch_expt_ranks, batch_pairsub_mus, batch_pairsub_vars
    elif return_cdf:
        return batch_expt_ranks, batch_Phi0_subdiag
    else:
        return batch_expt_ranks
Esempio n. 10
0
def neg_log_likelihood(batch_pairsub_mus,
                       batch_pairsub_vars,
                       top_k=None,
                       device=None):
    '''
    Compute the negative log-likelihood w.r.t. rankings, where the likelihood is formulated as the joint probability of
    consistent pairwise comparisons.
    @param batch_pairsub_mus: mean w.r.t. a pair comparison
    @param batch_pairsub_vars: variance w.r.t. a pair comparison
    @return:
    '''
    batch_full_erfc = torch.erfc(batch_pairsub_mus /
                                 torch.sqrt(2 * batch_pairsub_vars))

    if top_k is None:
        # use the triu-part of pairwise probabilities w.r.t. d_i > d_j, and using the trick: log(1.0) is zero
        batch_p_ij_triu = 1.0 - 0.5 * torch.triu(batch_full_erfc, diagonal=1)
        # batch_neg_log_probs = - torch.log(triu_probs) # facing the issue of nan due to overflow
        batch_neg_log_probs = F.binary_cross_entropy(input=batch_p_ij_triu,
                                                     reduction='none',
                                                     target=torch.ones_like(
                                                         batch_p_ij_triu,
                                                         device=device))
    else:  # the part to keep will be 1, otherwise 0
        keep_mask = torch.triu(torch.ones_like(batch_pairsub_vars), diagonal=1)
        keep_mask[:,
                  top_k:, :] = 0.0  # without considering pairs beneath position-k
        batch_p_ij_triu_top_k = 1 - batch_full_erfc * keep_mask * 0.5
        # batch_neg_log_probs = - torch.log(1 - batch_full_erfc * keep_mask * 0.5)  # using the trick: log(1.0) is zero
        batch_neg_log_probs = F.binary_cross_entropy(
            input=batch_p_ij_triu_top_k,
            reduction='none',
            target=torch.ones_like(batch_p_ij_triu_top_k, device=device))

    return batch_neg_log_probs  # with a shape of [batch_size, ranking_size, ranking_size]
Esempio n. 11
0
def get_expected_rank_const(batch_mus,
                            const_var,
                            return_pairsub_paras=False,
                            return_cdf=False):

    # f_ij, i.e., mean difference
    batch_pairsub_mus = torch.unsqueeze(batch_mus, dim=2) - torch.unsqueeze(
        batch_mus, dim=1)
    # variance w.r.t. s_i - s_j, which is equal to sigma^2_i + sigma^2_j
    pairsub_vars = 2 * const_var**2
    ''' expected ranks '''
    # \Phi(0)$
    batch_Phi0 = 0.5 * torch.erfc(
        batch_pairsub_mus / torch.sqrt(2 * pairsub_vars))
    # remove diagonal entries
    batch_Phi0_subdiag = torch.triu(batch_Phi0, diagonal=1) + torch.tril(
        batch_Phi0, diagonal=-1)
    batch_expt_ranks = torch.sum(batch_Phi0_subdiag, dim=2) + 1.0

    if return_pairsub_paras:
        return batch_expt_ranks, batch_pairsub_mus, pairsub_vars
    elif return_cdf:
        return batch_expt_ranks, batch_Phi0_subdiag
    else:
        return batch_expt_ranks
Esempio n. 12
0
    def _phi(x):
        r2 = math.sqrt(2)
        rpi2 = math.sqrt(math.pi / 2)
        phix = torch.exp(x**2 / 2) * torch.erfc(x / r2) * rpi2

        idx = (x > 5)
        phix[idx] = (1 / x[idx]) - (1 / x[idx]**3) + 3 / x[idx]**5

        idx = (x < -5)
        phix[idx] = torch.exp(x[idx]**2 / 2) * rpi2
        return phix
    def func_int_asym_pos_inf(self, x: Tensor) -> Tensor:
        if x.is_cuda:
            if x.numel() > mnn_config.get_value('cpu_or_gpu'):
                e1 = self.dawson1.erfi(x)
            else:
                device = x.device
                e1 = torch.from_numpy(scipy.erfi(
                    x.cpu().numpy())).to(device=device)
        else:
            e1 = torch.from_numpy(scipy.erfi(x.numpy()))

        return math.pi**2 / 32 * (e1 - 1) * e1 * torch.pow(torch.erfc(-x), 2)
Esempio n. 14
0
    def custom_loss_function(self, batch_preds, batch_std_labels, **kwargs):
        '''
		@param batch_preds: [batch, ranking_size] each row represents the mean predictions for documents associated with the same query
        @param batch_std_labels: [batch, ranking_size] each row represents the standard relevance grades for documents associated with the same query
		@param kwargs:
		@return:
		'''
        assert 'presort' in kwargs and kwargs[
            'presort'] is True  # aiming for direct usage of ideal ranking
        assert 'nDCG' == self.metric  # TODO support more metrics
        assert LABEL_TYPE.MultiLabel == kwargs[
            'label_type']  # other types are not considered yet
        label_type = kwargs['label_type']

        batch_mus = batch_preds
        ''' expected ranks '''
        # f_ij, i.e., mean difference
        batch_pairsub_mus = torch.unsqueeze(
            batch_mus, dim=2) - torch.unsqueeze(batch_mus, dim=1)
        # variance w.r.t. s_i - s_j, which is equal to sigma^2_i + sigma^2_j
        pairsub_vars = 2 * self.delta**2
        # \Phi(0)$
        batch_Phi0 = 0.5 * torch.erfc(
            batch_pairsub_mus / torch.sqrt(2 * pairsub_vars))
        # remove diagonal entries
        batch_Phi0_subdiag = torch.triu(batch_Phi0, diagonal=1) + torch.tril(
            batch_Phi0, diagonal=-1)
        batch_expt_ranks = torch.sum(batch_Phi0_subdiag, dim=2) + 1.0

        batch_gains = torch.pow(2.0, batch_std_labels) - 1.0
        batch_dists = 1.0 / torch.log2(
            batch_expt_ranks + 1.0)  # discount co-efficients
        batch_idcgs = torch_dcg_at_k(batch_rankings=batch_std_labels,
                                     label_type=label_type,
                                     device=self.device)

        #TODO check the effect of removing batch_idcgs
        if self.top_k is None:
            batch_dcgs = batch_dists * batch_gains
            batch_expt_nDCG = torch.sum(batch_dcgs / batch_idcgs, dim=1)
            batch_loss = -torch.sum(batch_expt_nDCG)
        else:
            k = min(self.top_k, batch_std_labels.size(1))
            batch_dcgs = batch_dists[:, 0:k] * batch_gains[:, 0:k]
            batch_expt_nDCG_k = torch.sum(batch_dcgs / batch_idcgs, dim=1)
            batch_loss = -torch.sum(batch_expt_nDCG_k)

        self.optimizer.zero_grad()
        batch_loss.backward()
        self.optimizer.step()

        return batch_loss
Esempio n. 15
0
 def translator_function(
         self, fed_out,
         interval):  #interval: list of tuple coresponding to sub-surfaces.
     """
     Translator function for clay fraction model.
     :param fed_out: the tensor fed out from linear layer
     :param interval: depth interval that the AEM data covers in the 3-D grid.
     :return: clay fraction at each AEM data point computed by the translator function
     """
     k = special.erfcinv(0.05)
     AEM_CF = 0.5 * torch.erfc(k * fed_out)
     acc_cf = torch.sum(AEM_CF * interval, 2) / sum(interval)
     return acc_cf
Esempio n. 16
0
def make_base(tt, w, tau, model_coh: bool = False) -> torch.Tensor:
    """
    Calculates the basis for the linear least squares problem

    Parameters
    ----------
    tt : ndarry 
        2D-time points
    w : float
        System response
    tau : ndarray
        1D decay times
    model_coh : bool        
        If true, appends a gaussian and its first two 
        dervitates to model coherent behavior at time-zero.


    Returns
    -------
    [type]
        [description]
    """

    k = 1 / (tau[None, None, ...])

    t = (tt)[..., None]
    scaled_tt = tt / w
    if False:
        A = torch.exp(-k * tt)
    else:
        nw = w[:, None]
        A = 0.5 * torch.erfc(-t / nw + nw * k / (2.0))
        A *= torch.exp(k * (nw * nw * k / (4.0) - t))

    if model_coh:
        exp_half = torch.exp(0.5)
        scaled_tt = tt / w
        coh = torch.exp(-0.5 * scaled_tt * scaled_tt)
        coh = coh[:, :, None].repeat((1, 1, 3))
        coh[..., 1] *= (-scaled_tt * exp_half)
        coh[..., 2] *= (scaled_tt - 1)
        A = torch.cat((A, coh), dim=-1)

    #if torch.isnan(A).any():
    #    print(A)
    torch.nan_to_num(A, out=A)
    return A
    def sample_pair(self, weights, L):
        inputs_a = MultivariateNormal(torch.zeros(self.num_inputs),
                                      scale_tril=torch.mm(
                                          torch.diag(torch.sqrt(self.theta)),
                                          L)).sample()
        inputs_b = MultivariateNormal(torch.zeros(self.num_inputs),
                                      scale_tril=torch.mm(
                                          torch.diag(torch.sqrt(self.theta)),
                                          L)).sample()
        if self.dichotomized:
            inputs_a = (inputs_a > 0).float()
            inputs_b = (inputs_b > 0).float()
        inputs = inputs_a - inputs_b
        targets = torch.bernoulli(
            0.5 * torch.erfc(-(weights * inputs).sum(-1, keepdim=True) /
                             (2 * self.sigma)))

        return inputs, targets, inputs_a, inputs_b
Esempio n. 18
0
    def eval(self, tt, w, tau, model_coh=False):
        """
        Evaluates a model for given arrays

        Parameters
        ----------
        tt : ndarray
            Contains the delay-times, should have the same shape as the data.
        w : float
            The IRF width.
        tau : ndarray
            Contains the decay times.
        """
        tt = torch.from_numpy(tt)
        tau = torch.from_numpy(tau)
        if self.use_cuda:
            tt = tt.cuda()
            tau = tau.cuda()

        k = 1 / (tau[None, None, ...])
        t = (tt)[..., None]
        if w == 0:
            A = torch.exp(-k * tt)
        else:
            A = torch.exp(k * (w * w * k / (4.0) - t)) \
                * 0.5 * torch.erfc(-t / w + w * k / (2.0))
        if model_coh:
            coh = torch.exp(-0.5 * (tt / w) * (tt / w))
            coh = coh[:, :, None].repeat((1, 1, 3))
            coh[..., 1] *= (-tt * exp_half / w)
            coh[..., 2] *= (tt * tt / w / w - 1)
            A = torch.cat((A, coh), dim=-1)

        X, fit, res = lstsq(A, self.data)
        self.done_eval = True
        self.c = X
        self.model = fit
        self.residuals = res
        return X, fit, res
Esempio n. 19
0
    def eval(self, tt, w, tau, model_coh=False):
        """
        Evaluates a model for given arrays

        Parameters
        ----------
        tt : ndarray
            Contains the delay-times, should have the same shape as the data.
        w : float
            The IRF width.
        tau : ndarray
            Contains the decay times.
        """
        tt = torch.from_numpy(tt)
        tau = torch.from_numpy(tau)
        if self.use_cuda:
            tt = tt.cuda()
            tau= tau.cuda()

        k = 1 / (tau[None, None, ...])
        t = (tt)[..., None]
        if w == 0:
            A = torch.exp(-k*tt)
        else:
            A = torch.exp(k * (w * w * k / (4.0) - t)) \
                * 0.5 * torch.erfc(-t / w + w * k / (2.0))
        if model_coh:
            coh = torch.exp(-0.5 * (tt / w) * (tt / w))
            coh = coh[:, :, None].repeat((1, 1, 3))
            coh[..., 1] *= (-tt * exp_half / w)
            coh[..., 2] *= (tt * tt / w / w - 1)
            A = torch.cat((A, coh), dim=-1)

        X, fit, res = lstsq(A, self.data)
        self.done_eval = True
        self.c = X
        self.model = fit
        self.residuals = res
        return X, fit, res
Esempio n. 20
0
def get_prob_pairwise_comp_probs(batch_pairsub_mus, batch_pairsub_vars,
                                 q_doc_rele_mat):
    '''
    The difference of two normal random variables is another normal random variable.
    pairsub_mu & pairsub_var denote the corresponding mean & variance of the difference of two normal random variables
    p_ij denotes the probability that d_i beats d_j
    @param batch_pairsub_mus:
    @param batch_pairsub_vars:
    @param batch_std_labels:
    @return:
    '''
    subtopic_std_diffs = torch.unsqueeze(
        q_doc_rele_mat, dim=2) - torch.unsqueeze(q_doc_rele_mat, dim=1)
    subtopic_std_Sij = torch.clamp(subtopic_std_diffs, min=-1.0,
                                   max=1.0)  # ensuring S_{ij} \in {-1, 0, 1}
    subtopic_std_p_ij = 0.5 * (1.0 + subtopic_std_Sij)
    batch_std_p_ij = torch.mean(subtopic_std_p_ij, dim=0, keepdim=True)

    batch_p_ij = 1.0 - 0.5 * torch.erfc(
        batch_pairsub_mus / torch.sqrt(2 * batch_pairsub_vars))

    return batch_p_ij, batch_std_p_ij
Esempio n. 21
0
 def fn_test_erf(x):
     return F.relu(torch.erf(x) - torch.erfc(x))
Esempio n. 22
0
 def _standardized_cumulative(self, inputs):
     # type: (Tensor) -> Tensor
     half = float(0.5)
     const = float(-(2**-0.5))
     # Using the complementary error function maximizes numerical precision.
     return half * torch.erfc(const * inputs)
Esempio n. 23
0
 def test_erfc(x, y):
     c = torch.erfc(torch.add(x, y))
     return c
Esempio n. 24
0
 def pointwise_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
     f = torch.zeros(3)
     g = torch.tensor([-1, 0, 1])
     w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
     return (
         torch.abs(torch.tensor([-1, -2, 3])),
         torch.absolute(torch.tensor([-1, -2, 3])),
         torch.acos(a),
         torch.arccos(a),
         torch.acosh(a.uniform_(1.0, 2.0)),
         torch.add(a, 20),
         torch.add(a, torch.randn(4, 1), alpha=10),
         torch.addcdiv(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.addcmul(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.angle(a),
         torch.asin(a),
         torch.arcsin(a),
         torch.asinh(a),
         torch.arcsinh(a),
         torch.atan(a),
         torch.arctan(a),
         torch.atanh(a.uniform_(-1.0, 1.0)),
         torch.arctanh(a.uniform_(-1.0, 1.0)),
         torch.atan2(a, a),
         torch.bitwise_not(t),
         torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.ceil(a),
         torch.clamp(a, min=-0.5, max=0.5),
         torch.clamp(a, min=0.5),
         torch.clamp(a, max=0.5),
         torch.clip(a, min=-0.5, max=0.5),
         torch.conj(a),
         torch.copysign(a, 1),
         torch.copysign(a, b),
         torch.cos(a),
         torch.cosh(a),
         torch.deg2rad(
             torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0,
                                                              -90.0]])),
         torch.div(a, b),
         torch.divide(a, b, rounding_mode="trunc"),
         torch.divide(a, b, rounding_mode="floor"),
         torch.digamma(torch.tensor([1.0, 0.5])),
         torch.erf(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
         torch.exp(torch.tensor([0.0, math.log(2.0)])),
         torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
         torch.expm1(torch.tensor([0.0, math.log(2.0)])),
         torch.fake_quantize_per_channel_affine(
             torch.randn(2, 2, 2),
             (torch.randn(2) + 1) * 0.05,
             torch.zeros(2),
             1,
             0,
             255,
         ),
         torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
         torch.float_power(torch.randint(10, (4, )), 2),
         torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4,
                                                             -5])),
         torch.floor(a),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
         torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
         torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.frac(torch.tensor([1.0, 2.5, -3.2])),
         torch.randn(4, dtype=torch.cfloat).imag,
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
         torch.lerp(torch.arange(1.0, 5.0),
                    torch.empty(4).fill_(10), 0.5),
         torch.lerp(
             torch.arange(1.0, 5.0),
             torch.empty(4).fill_(10),
             torch.full_like(torch.arange(1.0, 5.0), 0.5),
         ),
         torch.lgamma(torch.arange(0.5, 2, 0.5)),
         torch.log(torch.arange(5) + 10),
         torch.log10(torch.rand(5)),
         torch.log1p(torch.randn(5)),
         torch.log2(torch.rand(5)),
         torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([-100.0, -200.0, -300.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([1.0, 2000.0, 30000.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-100.0, -200.0, -300.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([1.0, 2000.0, 30000.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logical_and(r, s),
         torch.logical_and(r.double(), s.double()),
         torch.logical_and(r.double(), s),
         torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
         torch.logical_not(
             torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
         torch.logical_not(
             torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
             out=torch.empty(3, dtype=torch.int16),
         ),
         torch.logical_or(r, s),
         torch.logical_or(r.double(), s.double()),
         torch.logical_or(r.double(), s),
         torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_xor(r, s),
         torch.logical_xor(r.double(), s.double()),
         torch.logical_xor(r.double(), s),
         torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logit(torch.rand(5), eps=1e-6),
         torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
         torch.i0(torch.arange(5, dtype=torch.float32)),
         torch.igamma(a, b),
         torch.igammac(a, b),
         torch.mul(torch.randn(3), 100),
         torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
         torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
         torch.tensor([float("nan"),
                       float("inf"), -float("inf"), 3.14]),
         torch.nan_to_num(w),
         torch.nan_to_num(w, nan=2.0),
         torch.nan_to_num(w, nan=2.0, posinf=1.0),
         torch.neg(torch.randn(5)),
         # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
         torch.polygamma(1, torch.tensor([1.0, 0.5])),
         torch.polygamma(2, torch.tensor([1.0, 0.5])),
         torch.polygamma(3, torch.tensor([1.0, 0.5])),
         torch.polygamma(4, torch.tensor([1.0, 0.5])),
         torch.pow(a, 2),
         torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
         torch.rad2deg(
             torch.tensor([[3.142, -3.142], [6.283, -6.283],
                           [1.570, -1.570]])),
         torch.randn(4, dtype=torch.cfloat).real,
         torch.reciprocal(a),
         torch.remainder(torch.tensor([-3.0, -2.0]), 2),
         torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.round(a),
         torch.rsqrt(a),
         torch.sigmoid(a),
         torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sgn(a),
         torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sin(a),
         torch.sinc(a),
         torch.sinh(a),
         torch.sqrt(a),
         torch.square(a),
         torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
         torch.tan(a),
         torch.tanh(a),
         torch.trunc(a),
         torch.xlogy(f, g),
         torch.xlogy(f, g),
         torch.xlogy(f, 4),
         torch.xlogy(2, g),
     )
Esempio n. 25
0
"""
torch.erfc : Computes the complementary error function of each element of input
torch.where : Choose output based on condition
torch.eig : Computes the eigenvalues and eigenvectors of a real square matrix
torch.lstsq : Computes the solution to the least squares and least norm problems
torch.svd : Singular value decomposition of an input
"""
import torch
import numpy as np


"""complementary error function."""
# a + torch.erfc(a) = 1
a = torch.randn(2, 2)
print("a: \n", a)
erfc_a = torch.erfc(a)
print("\nerfc(a): \n", erfc_a)

# example 2 - working
b = torch.randn(1, 1)
ans = torch.zeros(1, 1)
print("b: \n", b)
torch.erfc(input=b, out=ans)
print("\nerfc(b):\n", ans)

"""where condition."""
# filter tensor based on specified condition
x = torch.randn(4, 4)
y = torch.zeros(4, 4)
res = torch.where(x > 0, x, y)
print("x:\n", x)
Esempio n. 26
0
 def forward(self, x):
     return torch.erfc(x)
Esempio n. 27
0
def Phi1D(x, m, c):
    z = (x - m) / c.squeeze(-1).sqrt()
    return (erfc(-z * 0.70710678118654746171500846685) / 2).squeeze(-1)
def PhiDiagonal(z):
    return (erfc(-z*0.70710678118654746171500846685)/2).prod(-1)
def standardized_CDF_gaussian(value):
    # Gaussian
    # return 0.5 * (1. + torch.erf(value/ np.sqrt(2)))
    return 0.5 * torch.erfc(value * (-1. / np.sqrt(2)))
Esempio n. 30
0
def Phi(z):
    return erfc(-z * sqrt2M1) / 2
Esempio n. 31
0
def _ndtr(x: torch.Tensor) -> torch.Tensor:
    """
    Standard normal CDF. Called <phid> in Genz's original code.
    """
    return 0.5 * torch.erfc(_neg_inv_sqrt2 * x)