Ejemplo n.º 1
0
def oneMix(mask, data=None, target=None):
    #Mix
    if not (data is None):
        stackedMask0, _ = torch.broadcast_tensors(mask[0], data[0])
        data = (stackedMask0 * data[0] +
                (1 - stackedMask0) * data[1]).unsqueeze(0)
    if not (target is None):
        stackedMask0, _ = torch.broadcast_tensors(mask[0], target[0])
        target = (stackedMask0 * target[0] +
                  (1 - stackedMask0) * target[1]).unsqueeze(0)
    return data, target
Ejemplo n.º 2
0
def face_add_arbitrary(faces, face_ix_to, per_face_data, output, counts=None):
    ind_1d = faces[:, face_ix_to]
    one = torch.ones((1, ), dtype=torch.float32, device=faces.device)
    if len(ind_1d.shape) < len(per_face_data.shape):
        ind_2d, _ = torch.broadcast_tensors(ind_1d.unsqueeze(1), per_face_data)
    else:
        ind_2d = ind_1d
    one_1d, _ = torch.broadcast_tensors(one, ind_1d)
    output.scatter_add_(0, ind_2d, per_face_data)
    if counts is not None:
        counts.scatter_add_(0, ind_1d, one_1d)
Ejemplo n.º 3
0
 def __and__(self, y):
     """Bitwise AND operator (element-wise)"""
     result = self.clone()
     # TODO: Remove explicit broadcasts to allow smaller beaver triples
     if isinstance(y, BinarySharedTensor):
         broadcast_tensors = torch.broadcast_tensors(result.share, y.share)
         result.share = broadcast_tensors[0].clone()
     elif torch.is_tensor(y):
         broadcast_tensors = torch.broadcast_tensors(result.share, y)
         result.share = broadcast_tensors[0].clone()
     return result.__iand__(y)
Ejemplo n.º 4
0
def normalize(MEAN, STD, data=None, target=None):
    #Normalize
    if not (data is None):
        if data.shape[1] == 3:
            STD = torch.Tensor(STD).unsqueeze(0).unsqueeze(2).unsqueeze(
                3).cuda()
            MEAN = torch.Tensor(MEAN).unsqueeze(0).unsqueeze(2).unsqueeze(
                3).cuda()
            STD, data = torch.broadcast_tensors(STD, data)
            MEAN, data = torch.broadcast_tensors(MEAN, data)
            data = ((data - MEAN) / STD).float()
    return data, target
Ejemplo n.º 5
0
def test_method_interface():
    shape = 11, 13
    lin = LinearMasked(*shape)

    mask = torch.tensor(1.0)
    lin.mask_(mask)
    assert torch.allclose(*torch.broadcast_tensors(lin.mask, mask))
    assert torch.allclose(lin.weight_masked, mask * lin.weight)

    lin.mask_(None)
    assert lin.mask is None

    with pytest.raises(RuntimeError, match=r"has no sparsity mask"):
        lin.weight_masked

    # enable/disable
    mask = torch.randint(2, size=(1, 1)).float()
    lin.mask_(mask)
    assert torch.allclose(*torch.broadcast_tensors(lin.mask, mask))
    assert torch.allclose(lin.weight_masked, mask * lin.weight)

    # output masking
    mask = torch.randint(2, size=(shape[1], 1)).float()
    lin.mask_(mask)
    assert torch.allclose(*torch.broadcast_tensors(lin.mask, mask))
    assert torch.allclose(lin.weight_masked, mask * lin.weight)

    # input masking
    mask = torch.randint(2, size=(
        1,
        shape[0],
    )).float()
    lin.mask_(mask)
    assert torch.allclose(*torch.broadcast_tensors(lin.mask, mask))
    assert torch.allclose(lin.weight_masked, mask * lin.weight)

    # unstructured masking
    mask = torch.randint(2, size=(
        shape[1],
        shape[0],
    )).float()
    lin.mask_(mask)
    assert torch.allclose(*torch.broadcast_tensors(lin.mask, mask))
    assert torch.allclose(lin.weight_masked, mask * lin.weight)

    # mask is overwritten (recreated), so `torch.expand` raises
    with pytest.raises(RuntimeError, match=r"must match the existing size"):
        lin.mask_(torch.ones(shape[1], 2))

    # mask is set anew, so `torch.expand` raises
    lin.mask_(None)
    with pytest.raises(RuntimeError, match=r"must match the existing size"):
        lin.mask_(torch.ones(0, shape[0]))
Ejemplo n.º 6
0
 def forward(self, x):
     feature = self.layer(x).view(-1, self.numkernel, self.dimsize)
     feature = feature.unsqueeze(3)
     feature_ = feature.permute(3, 1, 2, 0)
     feature, feature_ = torch.broadcast_tensors(feature, feature_)
     norm = torch.sum(torch.abs(feature - feature_), dim=2)
     eraser = torch.eye(feature.shape[0], device=torch.device('cuda')).view(
         feature.shape[0], 1, feature.shape[0])
     eraser, norm = torch.broadcast_tensors(eraser, norm)
     c_b = torch.exp(-(norm + eraser * 1e6))
     o_b = torch.sum(c_b, dim=2)
     x = torch.cat((x, o_b), dim=1)
     return x
Ejemplo n.º 7
0
def tridiagonal_solve(b, A_upper, A_diagonal, A_lower):
    """Solves a tridiagonal system Ax = b.
    The arguments A_upper, A_digonal, A_lower correspond to the three diagonals of A. Letting U = A_upper, D=A_digonal
    and L = A_lower, and assuming for simplicity that there are no batch dimensions, then the matrix A is assumed to be
    of size (k, k), with entries:
    D[0] U[0]
    L[0] D[1] U[1]
         L[1] D[2] U[2]                     0
              L[2] D[3] U[3]
                  .    .    .
                       .      .      .
                           .        .        .
                        L[k - 3] D[k - 2] U[k - 2]
           0                     L[k - 2] D[k - 1] U[k - 1]
                                          L[k - 1]   D[k]
    Arguments:
        b: A tensor of shape (..., k), where '...' is zero or more batch dimensions
        A_upper: A tensor of shape (..., k - 1).
        A_diagonal: A tensor of shape (..., k).
        A_lower: A tensor of shape (..., k - 1).
    Returns:
        A tensor of shape (..., k), corresponding to the x solving Ax = b
    Warning:
        This implementation isn't super fast. You probably want to cache the result, if possible.
    """

    # This implementation is very much written for clarity rather than speed.

    A_upper, _ = torch.broadcast_tensors(A_upper, b[..., :-1])
    A_lower, _ = torch.broadcast_tensors(A_lower, b[..., :-1])
    A_diagonal, b = torch.broadcast_tensors(A_diagonal, b)

    channels = b.size(-1)

    new_b = np.empty(channels, dtype=object)
    new_A_diagonal = np.empty(channels, dtype=object)
    outs = np.empty(channels, dtype=object)

    new_b[0] = b[..., 0]
    new_A_diagonal[0] = A_diagonal[..., 0]
    for i in range(1, channels):
        w = A_lower[..., i - 1] / new_A_diagonal[i - 1]
        new_A_diagonal[i] = A_diagonal[..., i] - w * A_upper[..., i - 1]
        new_b[i] = b[..., i] - w * new_b[i - 1]

    outs[channels - 1] = new_b[channels - 1] / new_A_diagonal[channels - 1]
    for i in range(channels - 2, -1, -1):
        outs[i] = (new_b[i] -
                   A_upper[..., i] * outs[i + 1]) / new_A_diagonal[i]

    return torch.stack(outs.tolist(), dim=-1)
Ejemplo n.º 8
0
    def __init__(self,
                 loc,
                 covariance_matrix=None,
                 precision_matrix=None,
                 scale_tril=None,
                 validate_args=None):
        if loc.dim() < 1:
            raise ValueError("loc must be at least one-dimensional.")
        if (covariance_matrix is not None) + (scale_tril is not None) + (
                precision_matrix is not None) != 1:
            raise ValueError(
                "Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified."
            )

        loc_ = loc.unsqueeze(-1)  # temporarily add dim on right
        if scale_tril is not None:
            if scale_tril.dim() < 2:
                raise ValueError(
                    "scale_tril matrix must be at least two-dimensional, "
                    "with optional leading batch dimensions")
            self.scale_tril, loc_ = torch.broadcast_tensors(scale_tril, loc_)
        elif covariance_matrix is not None:
            if covariance_matrix.dim() < 2:
                raise ValueError(
                    "covariance_matrix must be at least two-dimensional, "
                    "with optional leading batch dimensions")
            self.covariance_matrix, loc_ = torch.broadcast_tensors(
                covariance_matrix, loc_)
        else:
            if precision_matrix.dim() < 2:
                raise ValueError(
                    "precision_matrix must be at least two-dimensional, "
                    "with optional leading batch dimensions")
            self.precision_matrix, loc_ = torch.broadcast_tensors(
                precision_matrix, loc_)
        self.loc = loc_[..., 0]  # drop rightmost dim
        self.normalizing_constant = torch.nn.Parameter(torch.tensor([1.]))

        batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
        super(UnnormMVGaussian, self).__init__(batch_shape,
                                               event_shape,
                                               validate_args=validate_args)

        if scale_tril is not None:
            self._unbroadcasted_scale_tril = scale_tril
        else:
            if precision_matrix is not None:
                self.covariance_matrix = torch.inverse(
                    precision_matrix).expand_as(loc_)
            self._unbroadcasted_scale_tril = torch.cholesky(
                self.covariance_matrix)
    def __init__(self,
                 loc,
                 covariance_matrix=None,
                 precision_matrix=None,
                 scale_tril=None,
                 validate_args=None):
        if loc.dim() < 1:
            raise ValueError("loc must be at least one-dimensional.")
        if (covariance_matrix is not None) + (scale_tril is not None) + (
                precision_matrix is not None) != 1:
            raise ValueError(
                "Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified."
            )

        loc_ = loc.unsqueeze(-1)  # temporarily add dim on right
        if scale_tril is not None:
            if scale_tril.dim() < 2:
                raise ValueError(
                    "scale_tril matrix must be at least two-dimensional, "
                    "with optional leading batch dimensions")
            self.scale_tril, loc_ = torch.broadcast_tensors(scale_tril, loc_)
        elif covariance_matrix is not None:
            if covariance_matrix.dim() < 2:
                raise ValueError(
                    "covariance_matrix must be at least two-dimensional, "
                    "with optional leading batch dimensions")
            self.covariance_matrix, loc_ = torch.broadcast_tensors(
                covariance_matrix, loc_)
        else:
            if precision_matrix.dim() < 2:
                raise ValueError(
                    "precision_matrix must be at least two-dimensional, "
                    "with optional leading batch dimensions")
            self.precision_matrix, loc_ = torch.broadcast_tensors(
                precision_matrix, loc_)
        self.loc = loc_[..., 0]  # drop rightmost dim

        batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
        super(MultivariateNormal, self).__init__(batch_shape,
                                                 event_shape,
                                                 validate_args=validate_args)

        if scale_tril is not None:
            self._unbroadcasted_scale_tril = scale_tril
        elif covariance_matrix is not None:
            #self._unbroadcasted_scale_tril = torch.cholesky(covariance_matrix)
            self._unbroadcasted_scale_tril = torch.sqrt(covariance_matrix)
        else:  # precision_matrix is not None
            raise NotImplementedError(
                'Only covariance_matrix or scale_tril may be specified')
Ejemplo n.º 10
0
    def generate_binary_triple(size0, size1, device=None):
        """Generate binary triples of given size"""
        generator = TTPClient.get().get_generator(device=device)

        a = generate_kbit_random_tensor(size0,
                                        generator=generator,
                                        device=device)
        b = generate_kbit_random_tensor(size1,
                                        generator=generator,
                                        device=device)

        if comm.get().get_rank() == 0:
            # Request c from TTP
            c = TTPClient.get().ttp_request("binary", device, size0, size1)
        else:
            size2 = torch.broadcast_tensors(a, b)[0].size()
            c = generate_kbit_random_tensor(size2,
                                            generator=generator,
                                            device=device)

        # Stack to vectorize scatter function
        a = BinarySharedTensor.from_shares(a)
        b = BinarySharedTensor.from_shares(b)
        c = BinarySharedTensor.from_shares(c)
        return a, b, c
Ejemplo n.º 11
0
 def log_prob(self, value):
     if self._validate_args:
         self._validate_sample(value)
     value = value.long().unsqueeze(-1)
     value, log_pmf = torch.broadcast_tensors(value, self.logits)
     value = value[..., :1]
     return log_pmf.gather(-1, value).squeeze(-1)
Ejemplo n.º 12
0
    def forward(self, inputs, caps, mask=None):
        B, R, input_dim = inputs.shape  # (B, R, in)
        C, output_dim = caps.shape[1], self.output_dim  #

        inputs = inputs[None, :, :, None, :]  # (1, B, R, 1, in)
        caps = caps[None, :, :, None, :].transpose(0, 2)  # (C, B, 1, 1, in)
        inputs, caps = torch.broadcast_tensors(inputs,
                                               caps)  # (C, B, R, 1, in)
        u = torch.cat([inputs, caps], axis=-1)  # (C, B, R, 1, 2*in)
        u = self.dense_out(u)  # (C, B, R, 1, out)

        if mask is not None:
            float_mask = mask.float()[None, :, :, None,
                                      None]  # (B, R) => (1, B, R, 1, 1)
        else:
            float_mask = 1.

        b = torch.zeros(C, B, R, 1, 1).to(inputs.device)  # (C, B, R, 1, 1)
        for i in range(self.n_iter):
            v = b.softmax(dim=0) * float_mask  # (C, B, R, 1, 1)
            c_hat = (u * v).sum(2, keepdims=True)  # (C, B, 1, 1, out)
            c = squash(c_hat, dim=-1)  # (C, B, 1, 1, out)
            b = b + (c * u).sum(-1, keepdims=True)  # (C, B, R, 1, 1)

        out_c = c[:, :, 0, 0, :].permute(1, 0, 2)  # (B, C, out)
        out_v = v[:, :, :, 0, 0].permute(1, 2, 0)  # (B, R, C)
        out_b = b[:, :, :, 0, 0].permute(1, 2, 0)  # (B, R, C)

        #         print(out_v[0, 0])

        return out_c, out_v, out_b
Ejemplo n.º 13
0
def proj_tangent(x, u):
    assert x.shape[-2:] == u.shape[-2:], "Wrong shapes"
    x, u = torch.broadcast_tensors(x, u)
    x_shape = x.shape
    x = x.reshape(-1, x_shape[-2], x_shape[-1])
    u = u.reshape(-1, x_shape[-2], x_shape[-1])
    xt = x.transpose(-1, -2)
    batch_size, n = x.shape[0:2]

    I = torch.eye(n, dtype=x.dtype, device=x.device)
    I = I.expand_as(x)

    mu = x * u

    A = linalg.block_matrix([[I, x], [xt, I]])

    B = A[:, :, 1:]

    z1 = mu.sum(dim=-1).unsqueeze(-1)
    zt1 = mu.sum(dim=-2).unsqueeze(-1)

    b = torch.cat(
        [z1, zt1],
        dim=1,
    )
    rhs = B.transpose(1, 2) @ (b - A[:, :, 0:1])
    lhs = B.transpose(1, 2) @ B
    zeta, _ = torch.solve(rhs, lhs)
    alpha = torch.cat(
        [torch.ones(batch_size, 1, 1, dtype=x.dtype), zeta[:, 0:n - 1]], dim=1)
    beta = zeta[:, n - 1:2 * n - 1]
    rgrad = mu - (alpha + beta.transpose(-1, -2)) * x

    rgrad = rgrad.reshape(x_shape)
    return rgrad
    def __init__(self, loc, cov_factor, cov_diag, validate_args=None):
        if loc.dim() < 1:
            raise ValueError("loc must be at least one-dimensional.")
        event_shape = loc.shape[-1:]
        if cov_factor.dim() < 2:
            raise ValueError("cov_factor must be at least two-dimensional, "
                             "with optional leading batch dimensions")
        if cov_factor.shape[-2:-1] != event_shape:
            raise ValueError("cov_factor must be a batch of matrices with shape {} x m"
                             .format(event_shape[0]))
        if cov_diag.shape[-1:] != event_shape:
            raise ValueError("cov_diag must be a batch of vectors with shape {}".format(event_shape))

        loc_ = loc.unsqueeze(-1)
        cov_diag_ = cov_diag.unsqueeze(-1)
        try:
            loc_, self.cov_factor, cov_diag_ = torch.broadcast_tensors(loc_, cov_factor, cov_diag_)
        except RuntimeError:
            raise ValueError("Incompatible batch shapes: loc {}, cov_factor {}, cov_diag {}"
                             .format(loc.shape, cov_factor.shape, cov_diag.shape))
        self.loc = loc_[..., 0]
        self.cov_diag = cov_diag_[..., 0]
        batch_shape = self.loc.shape[:-1]

        self._unbroadcasted_cov_factor = cov_factor
        self._unbroadcasted_cov_diag = cov_diag
        self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag)
        super(LowRankMultivariateNormal, self).__init__(batch_shape, event_shape,
                                                        validate_args=validate_args)
Ejemplo n.º 15
0
 def forward(self, x):
     set_feat = self.small_xf(x)
     comb_feat = torch.cat(torch.broadcast_tensors(x, set_feat.unsqueeze(-2)), dim=-1)
     heads = self.w(comb_feat).view(comb_feat.size()[:3]+(self.heads, -1))
     q = self.q.view((1, 1, 1)+self.q.size())
     e_heads = (heads * q).sum(dim=-1)
     return F.softmax(e_heads, dim=-2)
Ejemplo n.º 16
0
def _broadcast_prediction_target(prediction, target):
    """ broadcast prediction and target in identical shapes

    Args:
        prediction (Tensor): prediction
        target (Tensor): target

    Returns:
        prediction and target in the same shape

    """
    if not torch.is_tensor(prediction):
        prediction = torch.tensor(prediction)
    if not torch.is_tensor(target):
        target = torch.tensor(target, dtype=prediction.dtype, device=prediction.device)
    target = target.to(dtype=prediction.dtype, device=prediction.device)

    while len(prediction.shape) < 4:
        prediction = prediction[:, None]
    while len(target.shape) < 4:
        target = target[:, None]
    try:
        prediction, target = torch.broadcast_tensors(prediction.clone(), target.clone())
    except RuntimeError:
        raise RuntimeError("failed to broadcast target in the same shape as prediction")
    return prediction, target
Ejemplo n.º 17
0
def broadcast_all(*values):
    r"""
    Given a list of values (possibly containing numbers), returns a list where each
    value is broadcasted based on the following rules:
      - `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`.
      - numbers.Number instances (scalars) are upcast to tensors having
        the same size and type as the first tensor passed to `values`.  If all the
        values are scalars, then they are upcasted to scalar Tensors.

    Args:
        values (list of `numbers.Number` or `torch.*Tensor`)

    Raises:
        ValueError: if any of the values is not a `numbers.Number` or
            `torch.*Tensor` instance
    """
    if not all(
            isinstance(v, torch.Tensor) or isinstance(v, Number)
            for v in values):
        raise ValueError(
            'Input arguments must all be instances of numbers.Number or torch.tensor.'
        )
    if not all([isinstance(v, torch.Tensor) for v in values]):
        options = dict(dtype=torch.get_default_dtype())
        for value in values:
            if isinstance(value, torch.Tensor):
                options = dict(dtype=value.dtype, device=value.device)
                break
        values = [
            v if isinstance(v, torch.Tensor) else torch.tensor(v, **options)
            for v in values
        ]
    return torch.broadcast_tensors(*values)
Ejemplo n.º 18
0
def color_loss(input, target, reduction='mean'):
    pixDictionary = buildPixDictionary(target)
    print("Len pixDict", len(pixDictionary))
    print(pixDictionary)
    print(input.size())
    batch, channel, height, width = input.size()
    ret = torch.empty(batch, channel, height, width)
    nbPixelDif = len(pixDictionary)
    nbPixel = height * width * batch
    if target.requires_grad:
        for img in range(batch):
            for i in range(height):
                for j in range(width):
                    mse = (input[img, :, i, j] - target[img, :, i, j])**2
                    nbApparition = pixDictionary[getIndexPix(
                        input[img, :, i, j].detach())]
                    color_coef = nbPixel / (nbPixelDif * nbApparition)
                    ret[img, :, i, j] = mse * color_coef
    else:
        expanded_input, expanded_target = torch.broadcast_tensors(
            input, target)
        for i in range(height):
            for j in range(width):
                mse = (expanded_input[i][j] - expanded_target[i][j])**2
                nbApparition = pixDictionary[input[i][j]]
                color_coef = nbPixel / (nbPixelDif * nbApparition)
                ret[i][j] = mse * color_coef
    if reduction != 'none':
        ret = torch.mean(ret) if reduction == 'mean' else torch.sum(ret)
Ejemplo n.º 19
0
 def __init__(self, name: str, shape: tc.Size, device=None, **params):
     # for distributions whose parameter and random variable (or, one sample) have the same shape
     if device is None: device = Distr.default_device
     fnnames, fnvals, tennames, tenvals = [], [], [], []
     for pmname, pmval in params.items():
         if callable(pmval):
             fnnames.append(pmname)
             fnvals.append(pmval)
         else:
             tennames.append(pmname)
             tenvals.append(pmval)
     tenvals = tensorify(device, *tenvals)
     if shape is None:
         if Distr.has_name(name): shape = Distr.shape_var(name)
         elif tenvals: shape = tc.broadcast_tensors(*tenvals)[0].shape
         else: shape = tc.Size()
     parents = set()
     for fname, fval in zip(fnnames, fnvals):
         parents_inc = fargnames(fval)
         if parents_inc:
             parents |= parents_inc
             setattr(self, fname, fedic(fval))
         else:
             setattr(self,
                     fname,
                     lambda conds, fval=fval: tensorify(device, fval())[0].
                     expand(Distr.shape_bat(conds) + shape))
     for tname, tval in zip(tennames, tenvals):
         setattr(self,
                 tname,
                 lambda conds, tval=tval: tval.expand(
                     Distr.shape_bat(conds) + shape))
     super(DistrElem, self).__init__(names_shapes={name: shape},
                                     parents=parents)
     self._name, self._shape, self._device = name, shape, device
Ejemplo n.º 20
0
def broadcast_all(*values):
    r"""
    Given a list of values (possibly containing numbers), returns a list where each
    value is broadcasted based on the following rules:
      - `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`.
      - numbers.Number instances (scalars) are upcast to tensors having
        the same size and type as the first tensor passed to `values`.  If all the
        values are scalars, then they are upcasted to scalar Tensors.

    Args:
        values (list of `numbers.Number` or `torch.*Tensor`)

    Raises:
        ValueError: if any of the values is not a `numbers.Number` or
            `torch.*Tensor` instance
    """
    if not all(torch.is_tensor(v) or isinstance(v, Number) for v in values):
        raise ValueError(
            'Input arguments must all be instances of numbers.Number or torch.tensor.'
        )
    if not all(map(torch.is_tensor, values)):
        new_tensor = _default_promotion
        for value in values:
            if torch.is_tensor(value):
                new_tensor = value.new_tensor
                break
        values = [v if torch.is_tensor(v) else new_tensor(v) for v in values]
    return torch.broadcast_tensors(*values)
Ejemplo n.º 21
0
    def __init__(self,
                 loc,
                 scale=None,
                 link="probit",
                 sampling=5000,
                 validate_args=None):
        if loc.dim() < 1:
            raise ValueError("loc must be at least one-dimensional.")

        loc_ = loc.unsqueeze(-1)  # temporarily add dim on right

        if scale.dim() < 2:
            raise ValueError("scale must be at least two-dimensional, "
                             "with optional leading batch dimensions")
        self.scale, loc_ = torch.broadcast_tensors(scale, loc_)
        self.scale = scale
        self._covariance_matrix = scale
        self.loc = loc_[..., 0]  # drop rightmost dim
        self.df = scale.shape[-1]
        self.sampling = sampling
        self.alpha = 1.0

        if link == "logit":
            self.link = lambda value: torch.sigmoid(value)
            self.alpha = 1.7012
        elif link == "probit":
            self.link = lambda value: torch.distributions.Normal(0.0, 1.0).cdf(
                value)
        elif link == "linear":
            self.link = lambda value: torch.clamp(value, 0.0, 1.0)

        batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
        super(MultivariateProbit, self).__init__(batch_shape,
                                                 event_shape,
                                                 validate_args=validate_args)
Ejemplo n.º 22
0
def pairwise_soft_dtw(data1, data2, sdtw=None, device=torch.device('cpu')):
    if sdtw is None:
        raise ValueError('sdtw is None - initialize it with SoftDTW')

    # transfer to device
    data1, data2 = data1.to(device), data2.to(device)

    # (batch_size, seq_len, feature_dim=1)
    A = data1.unsqueeze(dim=2)

    # (cluster_size, seq_len, feature_dim=1)
    B = data2.unsqueeze(dim=2)

    distances = []
    for b in B:
        # (1, seq_len, 1)
        b = b.unsqueeze(dim=0)
        A, b = torch.broadcast_tensors(A, b)
        # (batch_size, 1)
        sdtw_distance = sdtw(b, A).view(-1, 1)
        distances.append(sdtw_distance)

    # (batch_size, cluster_size)
    dis = torch.cat(distances, dim=1)
    return dis
Ejemplo n.º 23
0
    def forward(self, predicted_mesh):
        predicted_mesh_verts = predicted_mesh.vertices
        predicted_mesh_indices = predicted_mesh.indices
        lap = laplacian(predicted_mesh_verts, predicted_mesh_indices)
        # just measure squared distance from smooth version
        avg_edge_l = average_edge_length_by_vertex(predicted_mesh_verts,
                                                   predicted_mesh_indices)
        avg_edge_l, _ = torch.broadcast_tensors(avg_edge_l.unsqueeze(1), lap)
        if self.edge_length_scale is not None:
            loss_per_vertex = (predicted_mesh_verts -
                               lap) * self.edge_length_scale / avg_edge_l
        else:
            loss_per_vertex = predicted_mesh_verts - lap
        if self.scale_by_orig_smoothness:
            if self.orig is None:
                # Add a little bit so that nothing's / 0
                self.orig = loss_per_vertex.clone().detach() + 0.01
            # Scale by orig. If the orig loss for a given vertex was HIGH,
            # it's not so bad (divide)
            # if the orig loss for a given vertex was LOW, then it is bad!
            loss_per_vertex = loss_per_vertex - self.orig

        sqrt_error = torch.sqrt(
            torch.sum(loss_per_vertex * loss_per_vertex, dim=1))
        mean_error = torch.mean(sqrt_error)
        return mean_error
Ejemplo n.º 24
0
def test(args, model, device, test_loader, epoch, writer):
    model.eval()
    test_loss = 0
    correct = 0
    isEval = False
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)

            data, _ = torch.broadcast_tensors(
                data, torch.zeros((steps, ) + data.shape))
            data = data.permute(1, 2, 3, 4, 0)

            output = model(data)
            test_loss += F.cross_entropy(
                output, target, reduction='sum').item()  # sum up batch loss
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    writer.add_scalar('Test Loss /epoch', test_loss, epoch)
    writer.add_scalar('Test Acc /epoch',
                      100. * correct / len(test_loader.dataset), epoch)
    for i, (name, param) in enumerate(model.named_parameters()):
        if '_s' in name:
            writer.add_histogram(name, param, epoch)

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
Ejemplo n.º 25
0
def broadcast_except(*tensors: torch.Tensor, dim=-1):
    shape = torch.broadcast_tensors(*[t.select(dim, 0)
                                      for t in tensors])[0].shape
    return [
        t.expand(*shape[:t.ndim + dim + 1], t.shape[dim],
                 *shape[t.ndim + dim + 1:]) for t in tensors
    ]
Ejemplo n.º 26
0
def broadcast_and_squeeze(*args):
    assert all([is_tensor(ar) for ar in args]), 'at least 1 object is not torch tensor'
    if all([np.prod(val.shape[2:]) == 1 for val in args]):
        args = [val.contiguous().view(size=val.shape[:2] + tuple([1, 1])) for val in args]
    uniformed_values = uniform_shapes(*args)
    broadcasted_values = torch.broadcast_tensors(*uniformed_values)
    return broadcasted_values
Ejemplo n.º 27
0
 def _feature_dropout(self, p=0.5, training=True, inplace=False):
     """Randomly zeros out entire channels in the input tensor with probability
     :attr:`p`. (a channel is a nD feature map, e.g., the :math:`j`-th channel
     of the :math:`i`-th sample in the batched input is a nD tensor
     :math:`\text{input}[i, j]`)."""
     assert self.dim(
     ) >= 2, "feature dropout requires dimension to be at least 2"
     assert p >= 0.0 and p <= 1.0, "dropout probability has to be between 0 and 1"
     if training is False:
         if inplace:
             return self
         else:
             return self.clone()
     # take first 2 dimensions
     feature_dropout_size = self.size()[0:2]
     # create dropout tensor over the first two dimensions
     rand_tensor = crypten.mpc.rand(feature_dropout_size)
     feature_dropout_tensor = rand_tensor > p
     # Broadcast to remaining dimensions
     for i in range(2, self.dim()):
         feature_dropout_tensor = feature_dropout_tensor.unsqueeze(i)
     feature_dropout_tensor.share, self.share = torch.broadcast_tensors(
         feature_dropout_tensor.share, self.share)
     if inplace:
         result_tensor = self.mul_(feature_dropout_tensor).div_(1 - p)
     else:
         result_tensor = self.mul(feature_dropout_tensor).div_(1 - p)
     return result_tensor
Ejemplo n.º 28
0
 def loss(self, item, pred):
     actual = item['pos'].unsqueeze(-2)
     mask = item['mask'].unsqueeze(-1)
     pred, actual = torch.broadcast_tensors(pred, actual)
     loss = F.mse_loss(pred, actual, reduction='none').sum(-1)
     loss *= mask
     return loss.mean()
Ejemplo n.º 29
0
 def _grid_offsets(H, W, dtype, device):
     """
     Returns the grid offsets HxWx2 within [-1,1]
     """
     if (H, W) in VideoTools._offset_cache:
         return VideoTools._offset_cache[(H, W)]
     else:
         print("Create grid offsets for warping: W=%d, H=%d" % (W, H))
         grid_offsetsH = torch.linspace(-1,
                                        +1,
                                        H,
                                        dtype=dtype,
                                        device=device)
         grid_offsetsW = torch.linspace(-1,
                                        +1,
                                        W,
                                        dtype=dtype,
                                        device=device)
         grid_offsetsH = torch.unsqueeze(grid_offsetsH, 1)
         grid_offsetsW = torch.unsqueeze(grid_offsetsW, 0)
         grid_offsets = torch.stack(torch.broadcast_tensors(
             grid_offsetsW, grid_offsetsH),
                                    dim=2)
         grid_offsets = torch.unsqueeze(grid_offsets, 0)  # batch dimension
         grid_offsets = grid_offsets.detach()
         VideoTools._offset_cache[(H, W)] = grid_offsets
         return grid_offsets
Ejemplo n.º 30
0
    def __init__(self, loc, precision_diag, belief, df, validate_args=True):
        precision_diag, belief, df = self.convert_float_params_to_tensor(
            loc, precision_diag, belief, df)
        if loc.dim() < 1 or precision_diag.dim() < 1 or df.dim(
        ) < 1 or belief.dim() < 1:
            raise ValueError(
                "loc, precision_diag, df, belief must be at least one-dimensional."
            )
        if belief.size(-1) == 1 and precision_diag.size(-1) != 1:
            raise ValueError(
                "belief shouldn't end with dimensionality 1 if precision_diag doesn't"
            )
        if df.size(-1) == 1 and precision_diag.size(-1) != 1:
            raise ValueError(
                "df shouldn't end with dimensionality 1 if precision_diag doesn't"
            )
        df_, belief_ = df.unsqueeze(-1), belief.unsqueeze(
            -1)  # add dim on right
        self.loc, self.precision_diag, df_, belief_ = torch.broadcast_tensors(
            loc, precision_diag, df_, belief_)
        self.df, self.belief = df_[..., 0], belief_[...,
                                                    0]  # drop rightmost dim

        batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
        self.dimensionality = event_shape.numel()
        if (self.df <= (self.dimensionality + 1)).any():
            raise ValueError(
                "df must be greater than dimensionality + 1 to have expectation"
            )
        super(NormalDiagonalWishart,
              self).__init__(batch_shape,
                             event_shape,
                             validate_args=validate_args)