def __init__(self, arr_t, axes=None): Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')) ]) if axes is None: axes = tuple(range(len(arr_t.shape))) else: axes = tuple(axes) self._axes = axes
def __init__(self, shape, lwe_size): a = Type(Torus32, shape + (lwe_size, )) b = Type(Torus32, shape) key = Type(Int32, (lwe_size, )) Computation.__init__(self, [ Parameter('result', Annotation(b, 'o')), Parameter('lwe_a', Annotation(a, 'i')), Parameter('lwe_b', Annotation(b, 'i')), Parameter('key', Annotation(key, 'i')) ])
def get_tlwe_transformed_add_mul_to_trf( params: 'TGswParams', shape, bk_len: int, perf_params: PerformanceParametersForDevice): tlwe_params = params.tlwe_params decomp_length = params.decomp_length mask_size = tlwe_params.mask_size polynomial_degree = tlwe_params.polynomial_degree transform = get_transform(params.tlwe_params.transform_type) tdtype = transform.transformed_dtype() tlength = transform.transformed_length(polynomial_degree) tr_ctype = transform.transformed_internal_ctype() result = Type(tdtype, shape + (mask_size + 1, tlength)) sample = Type(tdtype, shape + (mask_size + 1, decomp_length, tlength)) bootstrap_key = Type( tdtype, (bk_len, mask_size + 1, decomp_length, mask_size + 1, tlength)) return Transformation([ Parameter('result', Annotation(result, 'o')), Parameter('sample', Annotation(sample, 'i')), Parameter('bootstrap_key', Annotation(bootstrap_key, 'i')), Parameter('bk_row_idx', Annotation(numpy.int32)) ], """ ${tr_ctype} result = ${tr_ctype}pack(${dtypes.c_constant(0, result.dtype)}); %for mask_idx in range(mask_size + 1): %for decomp_idx in range(decomp_length): { ${tr_ctype} a = ${tr_ctype}pack( ${sample.load_idx}( ${", ".join(idxs[:-2])}, ${mask_idx}, ${decomp_idx}, ${idxs[-1]}) ); ${tr_ctype} b = ${tr_ctype}pack( ${bootstrap_key.load_idx}( ${bk_row_idx}, ${mask_idx}, ${decomp_idx}, ${idxs[-2]}, ${idxs[-1]}) ); result = ${add}(result, ${mul}(a, b)); } %endfor %endfor ${result.store_same}(${tr_ctype}unpack(result)); """, connectors=['result'], render_kwds=dict( mask_size=mask_size, decomp_length=decomp_length, add=transform.transformed_add(perf_params), mul=transform.transformed_mul(perf_params), tr_ctype=tr_ctype))
def __init__(self, x, NFFT=256, noverlap=128, pad_to=None, window=hanning_window): assert dtypes.is_real(x.dtype) assert x.ndim == 1 rolling_frame_trf = rolling_frame(x, NFFT, noverlap, pad_to) complex_dtype = dtypes.complex_for(x.dtype) fft_arr = Type(complex_dtype, rolling_frame_trf.output.shape) real_fft_arr = Type(x.dtype, rolling_frame_trf.output.shape) window_trf = window(real_fft_arr, NFFT) broadcast_zero_trf = transformations.broadcast_const(real_fft_arr, 0) to_complex_trf = transformations.combine_complex(fft_arr) amplitude_trf = transformations.norm_const(fft_arr, 1) crop_trf = crop_frequencies(amplitude_trf.output) fft = FFT(fft_arr, axes=(1, )) fft.parameter.input.connect(to_complex_trf, to_complex_trf.output, input_real=to_complex_trf.real, input_imag=to_complex_trf.imag) fft.parameter.input_imag.connect(broadcast_zero_trf, broadcast_zero_trf.output) fft.parameter.input_real.connect(window_trf, window_trf.output, unwindowed_input=window_trf.input) fft.parameter.unwindowed_input.connect( rolling_frame_trf, rolling_frame_trf.output, flat_input=rolling_frame_trf.input) fft.parameter.output.connect(amplitude_trf, amplitude_trf.input, amplitude=amplitude_trf.output) fft.parameter.amplitude.connect(crop_trf, crop_trf.input, cropped_amplitude=crop_trf.output) self._fft = fft self._transpose = Transpose(fft.parameter.cropped_amplitude) Computation.__init__(self, [ Parameter('output', Annotation(self._transpose.parameter.output, 'o')), Parameter('input', Annotation(fft.parameter.flat_input, 'i')) ])
def get_multiply(output): return Transformation([ Parameter('output', Annotation(output, 'o')), Parameter('a', Annotation(output, 'i')), Parameter('b', Annotation(Type(output.dtype, (output.shape[-1], )), 'i')) ], """ ${output.store_same}(${mul}(${a.load_same}, ${b.load_idx}(${idxs[-1]}))); """, connectors=['output', 'a'], render_kwds=dict( mul=functions.mul(output.dtype, output.dtype)))
def tr_2_to_1(arr, scalar): return Transformation( [Parameter('o1', Annotation(arr, 'o')), Parameter('i1', Annotation(arr, 'i')), Parameter('i2', Annotation(arr, 'i')), Parameter('s1', Annotation(scalar))], """ ${o1.ctype} t = ${mul}(${cast}(${s1}), ${i1.load_same}); ${o1.store_same}(t + ${i2.load_same}); """, render_kwds= dict( mul=functions.mul(arr.dtype, arr.dtype), cast=functions.cast(arr.dtype, scalar.dtype)))
def __init__(self, params: 'TLweParams', shape): a_type = Type(Torus32, shape + (params.mask_size + 1, params.polynomial_degree)) cv_type = Type(ErrorFloat, shape) mu_type = Type(Torus32, shape + (params.polynomial_degree, )) self._mask_size = params.mask_size Computation.__init__(self, [ Parameter('a', Annotation(a_type, 'o')), Parameter('current_variances', Annotation(cv_type, 'o')), Parameter('mu', Annotation(mu_type, 'i')) ])
def tr_1_to_2(arr): return Transformation( [ Parameter('o1', Annotation(arr, 'o')), Parameter('o2', Annotation(arr, 'o')), Parameter('i1', Annotation(arr, 'i')) ], """ ${o1.ctype} t = ${mul}(${i1.load_same}, 0.5); ${o1.store_same}(t); ${o2.store_same}(t); """, render_kwds=dict(mul=functions.mul(arr.dtype, numpy.float32)))
def div_const(arr_t, param): """ Returns a scaling transformation with a fixed parameter (1 output, 1 input): ``output = input / param``. """ param_dtype = dtypes.detect_type(param) return Transformation( [Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i'))], "${output.store_same}(${div}(${input.load_same}, ${param}));", render_kwds=dict( div=functions.div(arr_t.dtype, param_dtype, out_dtype=arr_t.dtype), param=dtypes.c_constant(param, dtype=param_dtype)))
def cast(arr_t, dtype): """ Returns a typecast transformation of ``arr_t`` to ``dtype`` (1 output, 1 input): ``output = cast[dtype](input)``. """ dest = Type.from_value(arr_t).with_dtype(dtype) return Transformation( [ Parameter('output', Annotation(dest, 'o')), Parameter('input', Annotation(arr_t, 'i')) ], "${output.store_same}(${cast}(${input.load_same}));", render_kwds=dict(cast=functions.cast(dtype, arr_t.dtype)))
def __init__(self, params: 'TLweParams', shape): self._mask_size = params.mask_size self._polynomial_degree = params.polynomial_degree result_a = Type(Torus32, shape + (params.extracted_lweparams.size,)) result_b = Type(Torus32, shape) tlwe_a = Type(Torus32, shape + (params.mask_size + 1, params.polynomial_degree)) Computation.__init__(self, [ Parameter('result_a', Annotation(result_a, 'o')), Parameter('result_b', Annotation(result_b, 'o')), Parameter('tlwe_a', Annotation(tlwe_a, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_): plan = plan_factory() N = input_.shape[-1] * 4 batch_shape = input_.shape[:-1] batch_size = helpers.product(batch_shape) # The first element is unused coeffs = numpy.concatenate( [[0], 1 / (4 * numpy.sin(2 * numpy.pi * numpy.arange(1, N // 2) / N))]) coeffs_arr = plan.persistent_array(coeffs) prepare_iprfft_input = get_prepare_iprfft_input(input_) prepare_iprfft_output = get_prepare_iprfft_output(output) irfft = IRFFT(prepare_iprfft_input.Y) irfft.parameter.input.connect(prepare_iprfft_input, prepare_iprfft_input.Y, X=prepare_iprfft_input.X) irfft.parameter.output.connect(prepare_iprfft_output, prepare_iprfft_output.y, x=prepare_iprfft_output.x, x0=prepare_iprfft_output.x0, coeffs=prepare_iprfft_output.coeffs) real = Transformation([ Parameter( 'output', Annotation(Type(dtypes.real_for(input_.dtype), input_.shape), 'o')), Parameter('input', Annotation(input_, 'i')), ], """ ${output.store_same}((${input.load_same}).x); """, connectors=['output']) rd_t = Type(output.dtype, input_.shape) rd = Reduce(rd_t, predicate_sum(rd_t.dtype), axes=(len(input_.shape) - 1, )) rd.parameter.input.connect(real, real.output, X=real.input) x0 = plan.temp_array_like(rd.parameter.output) plan.computation_call(rd, x0, input_) plan.computation_call(irfft, output, x0, coeffs_arr, input_) return plan
def __init__( self, result_shape_info, input_size: int, output_size: int, decomp_length: int, log2_base: int): base = 2**log2_base a = result_shape_info.a b = result_shape_info.b cv = result_shape_info.current_variances ks_a = Type(Torus32, (input_size, decomp_length, base, output_size)) ks_b = Type(Torus32, (input_size, decomp_length, base)) ks_cv = Type(Float, (input_size, decomp_length, base)) source_a = Type(Torus32, result_shape_info.shape + (input_size,)) source_b = Type(Torus32, result_shape_info.shape) self._decomp_length = decomp_length self._input_size = input_size self._output_size = output_size self._log2_base = log2_base Computation.__init__(self, [ Parameter('result_a', Annotation(a, 'io')), Parameter('result_b', Annotation(b, 'io')), Parameter('result_cv', Annotation(cv, 'io')), Parameter('ks_a', Annotation(ks_a, 'i')), Parameter('ks_b', Annotation(ks_b, 'i')), Parameter('ks_cv', Annotation(ks_cv, 'i')), Parameter('source_a', Annotation(source_a, 'i')), Parameter('source_b', Annotation(source_b, 'i'))])
def add_param(arr_t, param_dtype): """ Returns an addition transformation with a dynamic parameter (1 output, 1 input, 1 scalar): ``output = input + param``. """ return Transformation( [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')), Parameter('param', Annotation(param_dtype)) ], "${output.store_same}(${add}(${input.load_same}, ${param}));", render_kwds=dict(add=functions.add( arr_t.dtype, param_dtype, out_dtype=arr_t.dtype)))
def __init__(self, arr_t, predicate, axes=None, output_arr_t=None): dims = len(arr_t.shape) if axes is None: axes = tuple(range(dims)) else: axes = tuple(sorted(helpers.wrap_in_tuple(axes))) if len(set(axes)) != len(axes): raise ValueError("Cannot reduce twice over the same axis") if min(axes) < 0 or max(axes) >= dims: raise ValueError("Axes numbers are out of bounds") if hasattr(predicate.empty, 'dtype'): if arr_t.dtype != predicate.empty.dtype: raise ValueError( "The predicate and the array must use the same data type") empty = predicate.empty else: empty = dtypes.cast(arr_t.dtype)(predicate.empty) remaining_axes = tuple(a for a in range(dims) if a not in axes) output_shape = tuple(arr_t.shape[a] for a in remaining_axes) if axes == tuple(range(dims - len(axes), dims)): self._transpose_axes = None else: self._transpose_axes = remaining_axes + axes self._operation = predicate.operation self._empty = empty if output_arr_t is None: output_arr_t = Type(arr_t.dtype, shape=output_shape) else: if output_arr_t.dtype != arr_t.dtype: raise ValueError( "The dtype of the output array must be the same as that of the input array" ) if output_arr_t.shape != output_shape: raise ValueError("Expected the output array shape " + str(output_shape) + ", got " + str(output_arr_t.shape)) Computation.__init__(self, [ Parameter('output', Annotation(output_arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')) ])
def div_param(arr_t, param_dtype): """ Returns a scaling transformation with a dynamic parameter (1 output, 1 input, 1 scalar): ``output = input / param``. """ return Transformation( [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')), Parameter('param', Annotation(param_dtype)) ], "${output.store_same}(${div}(${input.load_same}, ${param}));", render_kwds=dict(div=functions.div( arr_t.dtype, param_dtype, out_dtype=arr_t.dtype)))
def __init__(self, click_probability_meter, system, representation, samples): assert representation == Representation.POSITIVE_P self._system = system state = Type(numpy.complex128, (samples, system.modes)) output = Type(numpy.float64, (system.modes,)) Computation.__init__( self, [ Parameter('output', Annotation(output, 'o')), Parameter('alpha', Annotation(state, 'i')), Parameter('beta', Annotation(state, 'i')), ])
def split_complex(input_arr_t): """ Returns a transformation that splits complex input into two real outputs (2 outputs, 1 input): ``real = Re(input), imag = Im(input)``. """ output_t = Type(dtypes.real_for(input_arr_t.dtype), shape=input_arr_t.shape) return Transformation( [Parameter('real', Annotation(output_t, 'o')), Parameter('imag', Annotation(output_t, 'o')), Parameter('input', Annotation(input_arr_t, 'i'))], """ ${real.store_same}(${input.load_same}.x); ${imag.store_same}(${input.load_same}.y); """)
def __init__(self, meter, system, representation, samples): self._system = system self._representation = representation state = Type(numpy.complex128, (samples, system.modes)) output = Type(numpy.float64, (system.modes,)) Computation.__init__( self, [ Parameter('output', Annotation(output, 'o')), Parameter('alpha', Annotation(state, 'i')), Parameter('beta', Annotation(state, 'i')), ])
def fftshift(arr_t, axes=None): """ Returns a frequency shift transformation (1 output, 1 input) that works as ``output = numpy.fft.fftshift(input, axes=axes)``. .. warning:: Involves repositioning of the elements, so cannot be used on inplace kernels. """ if axes is None: axes = tuple(range(len(arr_t.shape))) else: axes = tuple(sorted(axes)) # The code taken from the FFTShift template for odd problem sizes # (at the moment of the writing). # Note the use of ``idxs`` template parameter to get access to element indices. return Transformation([ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')) ], """ <% dimensions = len(output.shape) new_idx_names = ['new_idx' + str(i) for i in range(dimensions)] %> %for dim in range(dimensions): VSIZE_T ${new_idx_names[dim]} = ${idxs[dim]} %if dim in axes: %if output.shape[dim] % 2 == 0: + (${idxs[dim]} < ${output.shape[dim] // 2} ? ${output.shape[dim] // 2} : ${-output.shape[dim] // 2}) %else: + (${idxs[dim]} <= ${output.shape[dim] // 2} ? ${output.shape[dim] // 2} : ${-(output.shape[dim] // 2 + 1)}) %endif %endif ; %endfor ${output.ctype} val = ${input.load_same}; ${output.store_idx}(${', '.join(new_idx_names)}, val); """, connectors=['input'], render_kwds=dict(axes=axes))
def __init__(self, arr_t, axes=None): if not dtypes.is_complex(arr_t.dtype): raise ValueError("FFT computation requires array of a complex dtype") Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')), Parameter('inverse', Annotation(numpy.int32), default=0)]) if axes is None: axes = tuple(range(len(arr_t.shape))) else: axes = tuple(axes) self._axes = axes
def __init__(self, randoms_arr, generators_dim, sampler, seed=None): self._sampler = sampler self._keygen = KeyGenerator.create(sampler.bijection, seed=seed, reserve_id_space=True) assert sampler.dtype == randoms_arr.dtype counters_size = randoms_arr.shape[-generators_dim:] self._generators_dim = generators_dim self._counters_t = Type(sampler.bijection.counter_dtype, shape=counters_size) Computation.__init__(self, [ Parameter('counters', Annotation(self._counters_t, 'io')), Parameter('randoms', Annotation(randoms_arr, 'o'))])
def __init__(self, params: 'TGswParams', shape): self._params = params decomp_length = params.decomp_length mask_size = params.tlwe_params.mask_size polynomial_degree = params.tlwe_params.polynomial_degree result_a = Type( Torus32, shape + (mask_size + 1, decomp_length, mask_size + 1, polynomial_degree)) messages = Type(Torus32, shape) Computation.__init__(self, [Parameter('result_a', Annotation(result_a, 'o')), Parameter('messages', Annotation(messages, 'i'))])
def copy(arr_t, out_arr_t=None): """ Returns an identity transformation (1 output, 1 input): ``output = input``. Output array type ``out_arr_t`` may have different strides, but must have the same shape and data type. """ if out_arr_t is None: out_arr_t = arr_t else: if out_arr_t.shape != arr_t.shape or out_arr_t.dtype != arr_t.dtype: raise ValueError("Input and output arrays must have the same shape and data type") return Transformation( [Parameter('output', Annotation(out_arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i'))], "${output.store_same}(${input.load_same});")
def ignore(arr_t): """ Returns a transformation that ignores the output it is attached to. """ return Transformation([Parameter('input', Annotation(arr_t, 'i'))], """ // Ignoring intentionally """)
def __init__(self, size, dtype): Computation.__init__(self, [ Parameter('output', Annotation(Type(dtype, shape=size), 'o')), Parameter('input', Annotation(Type(dtype, shape=size), 'i')) ]) self._p = PureParallel([ Parameter('output', Annotation(Type(dtype, shape=size), 'o')), Parameter('i1', Annotation(Type(dtype, shape=size), 'i')), Parameter('i2', Annotation(Type(dtype, shape=size), 'i')) ], """ ${i1.ctype} t1 = ${i1.load_idx}(${idxs[0]}); ${i2.ctype} t2 = ${i2.load_idx}(${idxs[0]}); ${output.store_idx}(${idxs[0]}, t1 + t2); """)
def unimod_gen(size, single=True): if single: dtype = np.complex64 else: dtype = np.complex128 unimod = Transformation([ Parameter('output', Annotation(Type(dtype, size), 'o')), Parameter('input', Annotation(Type(dtype, size), 'i')) ], ''' ${input.ctype} val = ${input.load_same}; ${output.store_same}(${polar_unit}(atan2(val.y, val.x))); ''', render_kwds=dict(polar_unit=functions.polar_unit( dtype=np.float32 if single else np.double))) return unimod
def combine_complex(output_arr_t): """ Returns a transformation that joins two real inputs into complex output (1 output, 2 inputs): ``output = real + 1j * imag``. """ input_t = Type(dtypes.real_for(output_arr_t.dtype), shape=output_arr_t.shape) return Transformation( [Parameter('output', Annotation(output_arr_t, 'o')), Parameter('real', Annotation(input_t, 'i')), Parameter('imag', Annotation(input_t, 'i'))], """ ${output.store_same}( COMPLEX_CTR(${output.ctype})( ${real.load_same}, ${imag.load_same})); """)
def prepare_rfft_input(arr): res = Type(dtypes.complex_for(arr.dtype), arr.shape[:-1] + (arr.shape[-1] // 2, )) return Transformation([ Parameter('output', Annotation(res, 'o')), Parameter('input', Annotation(arr, 'i')), ], """ <% batch_idxs = " ".join((idx + ", ") for idx in idxs[:-1]) %> ${input.ctype} re = ${input.load_idx}(${batch_idxs} ${idxs[-1]} * 2); ${input.ctype} im = ${input.load_idx}(${batch_idxs} ${idxs[-1]} * 2 + 1); ${output.store_same}(COMPLEX_CTR(${output.ctype})(re, im)); """, connectors=['output'])
def prepare_irfft_output(arr): res = Type(dtypes.real_for(arr.dtype), arr.shape[:-1] + (arr.shape[-1] * 2, )) return Transformation([ Parameter('output', Annotation(res, 'o')), Parameter('input', Annotation(arr, 'i')), ], """ <% batch_idxs = " ".join((idx + ", ") for idx in idxs[:-1]) %> ${input.ctype} x = ${input.load_same}; ${output.store_idx}(${batch_idxs} ${idxs[-1]} * 2, x.x); ${output.store_idx}(${batch_idxs} ${idxs[-1]} * 2 + 1, x.y); """, connectors=['output'])