def fix_prec(self, *args, storage="auto", field_type="int100", **kwargs): if not kwargs.get("owner"): kwargs["owner"] = self.owner if self.is_wrapper: self.child = self.child.fix_prec(*args, **kwargs) return self base = kwargs.get("base", 10) prec_fractional = kwargs.get("precision_fractional", 3) max_precision = _get_maximum_precision() need_large_prec = self._requires_large_precision(max_precision, base, prec_fractional) if storage == "crt": assert ( "field" not in kwargs ), 'When storage is set to "crt", choose the field size with the field_type argument' possible_field_types = list(_moduli_for_fields.keys()) assert ( field_type in possible_field_types ), f"Choose field_type in {possible_field_types} to build CRT tensors" residues = {} for mod in _moduli_for_fields[field_type]: residues[mod] = ( syft.FixedPrecisionTensor(*args, field=mod, **kwargs) .on(self) .child.fix_precision(check_range=False) .wrap() ) return syft.CRTPrecisionTensor(residues, *args, **kwargs).wrap() if need_large_prec or storage == "large": return ( syft.LargePrecisionTensor(*args, **kwargs) .on(self) .child.fix_large_precision() .wrap() ) else: assert not need_large_prec, "This tensor needs large precision to be correctly stored" if "internal_type" in kwargs: warnings.warn( "do not provide internal_type if data does not need LargePrecisionTensor to be stored" ) del kwargs["internal_type"] return syft.FixedPrecisionTensor(*args, **kwargs).on(self).enc_fix_prec()
def detail(worker, tensor_tuple: tuple) -> "CRTPrecisionTensor": """ This function reconstructs a CRTPrecisionTensor given its attributes in form of a tuple. Args: worker: the worker doing the deserialization tensor_tuple: a tuple holding the attributes of the CRTPrecisionTensor Returns: CRTPrecisionTensor: a CRTPrecisionTensor """ tensor_id, tensor_base, tensor_precision_fractional, chain = tensor_tuple tensor = syft.CRTPrecisionTensor( base=tensor_base, precision_fractional=tensor_precision_fractional, owner=worker, id=tensor_id, ) if chain is not None: chain = syft.serde._detail(worker, chain) tensor.child = chain return tensor
def fix_prec(self, *args, storage="auto", field_type="int100", no_wrap: bool = False, **kwargs): """ Convert a tensor or syft tensor to fixed precision Args: *args (tuple): args to transmit to the fixed precision tensor storage (str): code to define the type of fixed precision tensor (values in (auto, crt, large)) field_type (str): code to define a storage type (only for CRTPrecisionTensor) no_wrap (bool): if True, we don't add a wrapper on top of the fixed precision tensor **kwargs (dict): kwargs to transmit to the fixed precision tensor """ if not kwargs.get("owner"): kwargs["owner"] = self.owner if self.is_wrapper: self.child = self.child.fix_prec(*args, **kwargs) if no_wrap: return self.child else: return self base = kwargs.get("base", 10) prec_fractional = kwargs.get("precision_fractional", 3) max_precision = _get_maximum_precision() need_large_prec = self._requires_large_precision(max_precision, base, prec_fractional) if storage == "crt": assert ( "field" not in kwargs ), 'When storage is set to "crt", choose the field size with the field_type argument' possible_field_types = list(_moduli_for_fields.keys()) assert ( field_type in possible_field_types ), f"Choose field_type in {possible_field_types} to build CRT tensors" residues = {} for mod in _moduli_for_fields[field_type]: residues[mod] = ( syft.FixedPrecisionTensor(*args, field=mod, **kwargs) .on(self, wrap=False) .fix_precision(check_range=False) .wrap() ) fpt_tensor = syft.CRTPrecisionTensor(residues, *args, **kwargs) elif need_large_prec or storage == "large": fpt_tensor = ( syft.LargePrecisionTensor(*args, **kwargs) .on(self, wrap=False) .fix_large_precision() ) else: assert not need_large_prec, "This tensor needs large precision to be correctly stored" if "internal_type" in kwargs: warnings.warn( "do not provide internal_type if data does not need LargePrecisionTensor to be stored" ) del kwargs["internal_type"] fpt_tensor = ( syft.FixedPrecisionTensor(*args, **kwargs).on(self, wrap=False).fix_precision() ) if not no_wrap: fpt_tensor = fpt_tensor.wrap() return fpt_tensor