Exemplo n.º 1
0
    def ista(self, x: torch.tensor, r: torch.tensor):
        """ISTA steps for sparsification

        Args:
            x ([torch.tensor]): Input for reconstruction
            r ([torch.tensor]): Initialization of the code

        Returns:
            [torch.tensor]: the sparse code fitted to x
        """
        r.requires_grad_(True)
        converged = False
        # update R
        optim = torch.optim.SGD([{'params': r, "lr": self.lr_r}])
        # train
        while not converged:
            old_r = r.clone().detach()
            # prediction
            x_hat = self.U(r)
            # loss
            loss = ((x - x_hat)**2).sum()
            loss.backward()
            # update R in place
            optim.step()
            # print(r.grad)
            # zero grad
            optim.zero_grad()
            self.zero_grad()
            # prox
            r.data = self.soft_thresholding_(r, self.lmda)
            # convergence
            converged = torch.norm(r - old_r) / torch.norm(old_r) < 0.01
            #print(torch.norm(r - old_r) / torch.norm(old_r))
        return r
Exemplo n.º 2
0
def perturb(ten: torch.tensor, repr_width, p):
    # TODO smart kernel for GPU/CPU vs multiplication to concatenate mask
    # bitrank vector
    # compact_sample = torch.sum(compact_sample * bitrank, dim=-1)

    ten_repr = Cpp_Pert.generateTensorMask(ten, repr_width, p)

    print(ten_repr)

    ten_np = ten_repr.numpy()
    packed = np.packbits(
        ten_np.astype(int), axis=-1,
        bitorder="little")  # Packing bits in order to create the mask
    ten_packed = torch.from_numpy(packed)
    mask = torch.flatten(
        ten_packed,
        start_dim=-2)  # Removing the extra dimension caused by the bit packing

    ten.data = torch.bitwise_xor(ten, mask).data