def ne(self, y, _scale=True): """Returns self != y""" if comm.get().get_world_size() == 2: return 1 - self.eq(y, _scale=_scale) difference = self - y difference.share = torch_stack([difference.share, -(difference.share)]) return difference._ltz(_scale=_scale).sum(0)
def square(size, device=None): """Generate square double of given size""" r = generate_random_ring_element(size, device=device) r2 = r.mul(r) # Stack to vectorize scatter function stacked = torch_stack([r, r2]) stacked = ArithmeticSharedTensor(stacked, precision=0, src=0) return stacked[0], stacked[1]
def stack(seq, *args, **kwargs): """Stacks a list of tensors along a given dimension""" assert isinstance(seq, list), "Stack input must be a list" assert isinstance( seq[0], BinarySharedTensor ), "Sequence must contain BinarySharedTensors" result = seq[0].shallow_copy() result.share = torch_stack( [BinarySharedTensor.share for BinarySharedTensor in seq], *args, **kwargs ) return result
def stack(tensors, *args, **kwargs): """Perform tensor stacking""" for i, tensor in enumerate(tensors): if is_tensor(tensor): tensors[i] = ArithmeticSharedTensor(tensor) assert isinstance( tensors[i], ArithmeticSharedTensor ), "Can't stack %s with ArithmeticSharedTensor" % type(tensor) result = tensors[0].shallow_copy() result.share = torch_stack([tensor.share for tensor in tensors], *args, **kwargs) return result
def _get_additive_PRSS(self, size, remove_rank=False): """ Generates a plaintext value from a set of random additive secret shares generated by each party """ gens = self._get_generators(device=self.device) if remove_rank: gens = gens[1:] result = torch_stack([ generate_random_ring_element(size, generator=g, device=g.device) for g in gens ]) return result.sum(0)