コード例 #1
0
ファイル: common.py プロジェクト: rfeinman/pytorch-minimize
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
    """Shift a point to the interior of a feasible region.
    Each element of the returned vector is at least at a relative distance
    `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
    """
    x_new = x.clone()

    active = find_active_constraints(x, lb, ub, rstep)
    lower_mask = torch.eq(active, -1)
    upper_mask = torch.eq(active, 1)

    if rstep == 0:
        torch.nextafter(lb[lower_mask], ub[lower_mask], out=x_new[lower_mask])
        torch.nextafter(ub[upper_mask], lb[upper_mask], out=x_new[upper_mask])
    else:
        x_new[lower_mask] = lb[lower_mask].add(lb[lower_mask].abs().clamp(
            1, None),
                                               alpha=rstep)
        x_new[upper_mask] = ub[upper_mask].sub(ub[upper_mask].abs().clamp(
            1, None),
                                               alpha=rstep)

    tight_bounds = (x_new < lb) | (x_new > ub)
    x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])

    return x_new
コード例 #2
0
def _nextafter(x1, x2):
    with torch.no_grad():
        if hasattr(torch, "nextafter"):
            out = torch.nextafter(x1, x2)
        else:
            out = np_nextafter(x1, x2)
    return _StitchGradient.apply(x1, out)
コード例 #3
0
 def _round(a):
     return int(torch.rint(torch.nextafter(a, a + 1)))
コード例 #4
0
ファイル: misc.py プロジェクト: luisect/torchdiffeq
def _nextafter(x1, x2):
    if hasattr(torch, "nextafter"):
        return torch.nextafter(x1, x2)
    else:
        return np_nextafter(x1, x2)
コード例 #5
0
ファイル: sorting_utils.py プロジェクト: longhuang318/dpdp
def kthvalue(vals, k, dim=-1, outmask=None, return_count_le=False, return_index=False, min_steps='auto', early_stop_max_k=None, max_chunk_size=int(1e7)):
    """
    Custom implementation of kthvalue for large tensors, which uses a binary search.
    """
    assert early_stop_max_k is None or not return_index, "Returning index not compatible with early stop"
    if early_stop_max_k is None:
        early_stop_max_k = k
    needle_size = (early_stop_max_k - k + 1) # How big is the interval we should hit (at least 1)

    # We make the mask a long since this we need to have a long anyway when summing
    mask = outmask if outmask is not None else torch.empty_like(vals, dtype=torch.bool)
    if not torch.is_tensor(k):
        # Otherwise we we will be casting to tensor over and over
        k = torch.tensor(k, device=vals.device)

    assert (0 < k <= vals.size(dim)).all()
    k, _ = torch.broadcast_tensors(k, vals.narrow(dim, start=0, length=1).squeeze(dim))

    # Compute expected minimal number of steps, take some margin since we can be unlucky
    # Device the number of items were searching for the size of the interval we should hit
    # dividing the two gives by how much we should reduce the search space so we need log2(ratio) steps expected
    steps = compute_needed_bits((vals.size(dim) + needle_size - 1) // needle_size)[0] + 2 if min_steps == 'auto' else min_steps
    try:
        MINVAL = vals.new_tensor(torch.iinfo(vals.dtype).min)
        is_integer = True
    except TypeError:
        MINVAL = vals.new_tensor(-math.inf)
        is_integer = False

    if len(vals) > max_chunk_size:
        def compute_sum(val, dim):
            return torch.stack([chunk.sum(dim) for chunk in val.split(max_chunk_size, dim=dim)], dim).sum(dim)
    else:
        compute_sum = torch.sum

    lb_val, _ = vals.min(dim)
    ub_val, ub_ind = vals.max(dim)

    success = False
    mid_val, mid_ind = ub_val, ub_ind  # Initialize with ub_ind so that if lb_val == ub_val this is the corresponding idx
    count_le = vals.size(0)  # Everything is smaller or equal than upper bound
    # Note: even though we have floating point precision, the actual values will always be one of the elements
    # so we can use exact comparison
    while lb_val != ub_val and not (k <= count_le <= early_stop_max_k):

        for i in range(steps):
            # We should round down, so if lb_val and ub_val are very close, then we should never get the ub_val out
            mid_val = ((lb_val + ub_val) // 2) if is_integer else ((lb_val + ub_val) / 2).clamp(max=torch.nextafter(ub_val, lb_val))
            torch.le(vals, mid_val, out=mask)
            count_le = compute_sum(mask, dim)
            # Find largest value <= mid_val, this is the actual value!
            mid_val_exact, mid_ind = torch.where(mask, vals, MINVAL).max(dim)  # Vector with entries > mid set to lb

            # Note: this is a scalar tensor
            success = count_le >= k

            # By doing it in this way, we can do it in batch and pytorch does not synchronize!
            ub_val = torch.where(success, mid_val, ub_val)

            # Note as lower bound, mid val cannot work so we can set mid_val_above as lower bound
            # Which is a bit tighter
            # As new lower bound, set mid_val + 1 (or float equivalent)
            lb_val = torch.where(success, lb_val, mid_val + 1 if is_integer else torch.nextafter(mid_val, ub_val))

        # Typically we will be done, otherwise do some more steps (but half the size)
        steps = 1

    kthval = ub_val
    if not success:
        # Always ub is guaranteed successfull, find index
        _, kthval_idx = torch.eq(vals, kthval, out=mask).max(dim)
        if outmask is not None or return_count_le:
            # Make sure we fill the mask with entries smaller
            torch.le(vals, kthval, out=mask)
            if return_count_le:
                count_le = mask.sum()
    else:
        kthval_idx = mid_ind
    assert (kthval_idx >= 0).all()

    assert count_le >= k or not return_count_le  # May be incorrect if we don't return it
    if return_index:
        return (kthval, kthval_idx, count_le) if return_count_le else (kthval, kthval_idx)
    else:
        return (kthval, count_le) if return_count_le else kthval
コード例 #6
0
ファイル: math_ops.py プロジェクト: TheVinhLuong102/pytorch
# mvlgamma
torch.mvlgamma(torch.empty(2, 3).uniform_(1, 2), 2)

# nan_to_num
w = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
torch.nan_to_num(x)
torch.nan_to_num(x, nan=2.0)
torch.nan_to_num(x, nan=2.0, posinf=1.0)

# neg/negative
torch.neg(torch.randn(5))

# nextafter
eps = torch.finfo(torch.float32).eps
torch.nextafter(torch.tensor([1, 2]),
                torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps])

# polygamma
torch.polygamma(1, torch.tensor([1, 0.5]))
torch.polygamma(2, torch.tensor([1, 0.5]))
torch.polygamma(3, torch.tensor([1, 0.5]))
torch.polygamma(4, torch.tensor([1, 0.5]))

# pow
torch.pow(a, 2)
torch.pow(torch.arange(1., 5.), torch.arange(1., 5.))

# rad2deg
torch.rad2deg(torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570,
                                                               -1.570]]))