def __init__( self, arr1, arr2, coeff, same_A_B=False, test_incorrect_parameter_name=False, test_untyped_scalar=False, test_kernel_adhoc_array=False, ): assert len(arr1.shape) == 2 assert len(arr2.shape) == (2 if same_A_B else 1) assert arr1.dtype == arr2.dtype if same_A_B: assert arr1.shape == arr2.shape else: assert arr1.shape[0] == arr1.shape[1] self._same_A_B = same_A_B self._persistent_array = numpy.arange(arr2.size).reshape(arr2.shape).astype(arr2.dtype) self._test_untyped_scalar = test_untyped_scalar self._test_kernel_adhoc_array = test_kernel_adhoc_array Computation.__init__( self, [ Parameter(("_C" if test_incorrect_parameter_name else "C"), Annotation(arr1, "o")), Parameter("D", Annotation(arr2, "o")), Parameter("A", Annotation(arr1, "i")), Parameter("B", Annotation(arr2, "i")), Parameter("coeff", Annotation(coeff)), ], )
def __init__(self, mode_arr, add_points=None, inverse=False, order=1, axes=None): if axes is None: axes = tuple(range(len(mode_arr.shape))) else: axes = tuple(axes) self._axes = list(sorted(axes)) if add_points is None: add_points = [0] * len(mode_arr.shape) else: add_points = list(add_points) self._add_points = add_points coord_shape = list(mode_arr.shape) for axis in range(len(mode_arr.shape)): if axis in axes: coord_shape[axis] = get_spatial_points( mode_arr.shape[axis], order, add_points=add_points[axis]) coord_arr = Type(mode_arr.dtype, shape=coord_shape) self._inverse = inverse self._order = order if not inverse: parameters = [ Parameter('modes', Annotation(mode_arr, 'o')), Parameter('coords', Annotation(coord_arr, 'i'))] else: parameters = [ Parameter('coords', Annotation(coord_arr, 'o')), Parameter('modes', Annotation(mode_arr, 'i'))] Computation.__init__(self, parameters)
def __init__( self, arr_t, predicate, axes=None, exclusive=False, max_work_group_size=None, seq_size=None): self._max_work_group_size = max_work_group_size self._seq_size = seq_size self._exclusive = exclusive ndim = len(arr_t.shape) self._axes = helpers.normalize_axes(ndim, axes) if not helpers.are_axes_innermost(ndim, self._axes): self._transpose_to, self._transpose_from = ( helpers.make_axes_innermost(ndim, self._axes)) self._axes = tuple(range(ndim - len(self._axes), ndim)) else: self._transpose_to = None self._transpose_from = None if len(set(self._axes)) != len(self._axes): raise ValueError("Cannot scan twice over the same axis") if hasattr(predicate.empty, 'dtype'): if arr_t.dtype != predicate.empty.dtype: raise ValueError("The predicate and the array must use the same data type") empty = predicate.empty else: empty = dtypes.cast(arr_t.dtype)(predicate.empty) self._predicate = predicate Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__(self, params: 'TGswParams', shape, bk_len, perf_params: PerformanceParameters): mask_size = params.tlwe_params.mask_size polynomial_degree = params.tlwe_params.polynomial_degree decomp_length = params.decomp_length transform = get_transform(params.tlwe_params.transform_type) tdtype = transform.transformed_dtype() tlength = transform.transformed_length(polynomial_degree) accum = Type(Torus32, shape + (mask_size + 1, polynomial_degree)) bootstrap_key = Type( tdtype, (bk_len, mask_size + 1, decomp_length, mask_size + 1, tlength)) self._params = params self._perf_params = perf_params self._shape = shape self._bk_len = bk_len Computation.__init__(self, [ Parameter('accum', Annotation(accum, 'io')), Parameter('bootstrap_key', Annotation(bootstrap_key, 'i')), Parameter('bk_row_idx', Annotation(numpy.int32)) ])
def __init__( self, result_shape_info, input_size: int, output_size: int, decomp_length: int, log2_base: int): base = 2**log2_base a = result_shape_info.a b = result_shape_info.b cv = result_shape_info.current_variances ks_a = Type(Torus32, (input_size, decomp_length, base, output_size)) ks_b = Type(Torus32, (input_size, decomp_length, base)) ks_cv = Type(Float, (input_size, decomp_length, base)) source_a = Type(Torus32, result_shape_info.shape + (input_size,)) source_b = Type(Torus32, result_shape_info.shape) self._decomp_length = decomp_length self._input_size = input_size self._output_size = output_size self._log2_base = log2_base Computation.__init__(self, [ Parameter('result_a', Annotation(a, 'io')), Parameter('result_b', Annotation(b, 'io')), Parameter('result_cv', Annotation(cv, 'io')), Parameter('ks_a', Annotation(ks_a, 'i')), Parameter('ks_b', Annotation(ks_b, 'i')), Parameter('ks_cv', Annotation(ks_cv, 'i')), Parameter('source_a', Annotation(source_a, 'i')), Parameter('source_b', Annotation(source_b, 'i'))])
def __init__(self, polynomial_degree, shape, powers_shape, powers_view=False, minus_one=False, invert_powers=False): self._batch_shape = powers_shape[:-1] if powers_view else powers_shape assert self._batch_shape == shape[:len(self._batch_shape)] self._powers_view = powers_view self._minus_one = minus_one self._invert_powers = invert_powers polynomials = Type(Torus32, shape + (polynomial_degree, )) powers = Type(Int32, powers_shape) Computation.__init__( self, [ Parameter('result', Annotation(polynomials, 'o')), Parameter('source', Annotation(polynomials, 'i')), Parameter('powers', Annotation(powers, 'i')), Parameter('powers_idx', Annotation( Type(Int32))) # unused if powers_view==False ])
def __init__( self, input_size: int, output_size: int, decomp_length: int, log2_base: int, noise: float): base = 2**log2_base a = Type(Torus32, (input_size, decomp_length, base, output_size)) b = Type(Torus32, (input_size, decomp_length, base)) cv = Type(Float, (input_size, decomp_length, base)) in_key = Type(Int32, input_size) out_key = Type(Int32, output_size) noises_a = Type(Torus32, (input_size, decomp_length, base - 1, output_size)) noises_b = Type(Float, (input_size, decomp_length, base - 1)) self._output_size = output_size self._log2_base = log2_base self._noise = noise Computation.__init__(self, [ Parameter('ks_a', Annotation(a, 'o')), Parameter('ks_b', Annotation(b, 'o')), Parameter('ks_cv', Annotation(cv, 'o')), Parameter('in_key', Annotation(in_key, 'i')), Parameter('out_key', Annotation(out_key, 'i')), Parameter('noises_a', Annotation(noises_a, 'i')), Parameter('noises_b', Annotation(noises_b, 'i'))])
def __init__(self, arr1, arr2, coeff, same_A_B=False, test_incorrect_parameter_name=False, test_untyped_scalar=False, test_kernel_adhoc_array=False): assert len(arr1.shape) == 2 assert len(arr2.shape) == (2 if same_A_B else 1) assert arr1.dtype == arr2.dtype if same_A_B: assert arr1.shape == arr2.shape else: assert arr1.shape[0] == arr1.shape[1] self._same_A_B = same_A_B self._persistent_array = numpy.arange(arr2.size).reshape( arr2.shape).astype(arr2.dtype) self._test_untyped_scalar = test_untyped_scalar self._test_kernel_adhoc_array = test_kernel_adhoc_array Computation.__init__(self, [ Parameter(('_C' if test_incorrect_parameter_name else 'C'), Annotation(arr1, 'o')), Parameter('D', Annotation(arr2, 'o')), Parameter('A', Annotation(arr1, 'i')), Parameter('B', Annotation(arr2, 'i')), Parameter('coeff', Annotation(coeff)) ])
def __init__(self, shape, box, drift, trajectories=1, kinetic_coeffs=0.5j, diffusion=None, iterations=3, noise_type=None): real_dtype = dtypes.real_for(drift.dtype) state_type = Type(drift.dtype, (trajectories, drift.components) + shape) self._noise = diffusion is not None Computation.__init__(self, [Parameter('output', Annotation(state_type, 'o')), Parameter('input', Annotation(state_type, 'i'))] + ([Parameter('dW', Annotation(noise_type, 'i'))] if self._noise else []) + [Parameter('t', Annotation(real_dtype)), Parameter('dt', Annotation(real_dtype))]) self._ksquared = get_ksquared(shape, box).astype(real_dtype) # '/2' because we want to propagate only to dt/2 kprop_trf = get_kprop_trf(state_type, self._ksquared, kinetic_coeffs / 2, exp=True) self._fft = FFT(state_type, axes=range(2, len(state_type.shape))) self._fft_with_kprop = FFT(state_type, axes=range(2, len(state_type.shape))) self._fft_with_kprop.parameter.output.connect( kprop_trf, kprop_trf.input, output_prime=kprop_trf.output, ksquared=kprop_trf.ksquared, dt=kprop_trf.dt) self._prop_iter = get_prop_iter( state_type, drift, iterations, diffusion=diffusion, noise_type=noise_type)
def __init__(self, params: 'TLweParams', shape, noise: float, perf_params: PerformanceParametersForDevice): polynomial_degree = params.polynomial_degree mask_size = params.mask_size result_a = Type(Torus32, shape + (mask_size + 1, polynomial_degree)) result_cv = Type(ErrorFloat, shape) key = Type(Int32, (mask_size, polynomial_degree)) noises1 = Type(Torus32, shape + (mask_size, polynomial_degree)) noises2 = Type(Torus32, shape + (polynomial_degree, )) self._transform_type = params.transform_type self._noise = noise self._mask_size = mask_size self._polynomial_degree = polynomial_degree self._perf_params = perf_params Computation.__init__(self, [ Parameter('result_a', Annotation(result_a, 'o')), Parameter('result_cv', Annotation(result_cv, 'o')), Parameter('key', Annotation(key, 'i')), Parameter('noises1', Annotation(noises1, 'i')), Parameter('noises2', Annotation(noises2, 'i')) ])
def __init__( self, arr1, arr2, coeff, second_coeff, same_A_B=False, test_computation_adhoc_array=False, test_computation_incorrect_role=False, test_computation_incorrect_type=False, test_same_arg_as_i_and_o=False, ): self._second_coeff = second_coeff self._same_A_B = same_A_B self._test_same_arg_as_i_and_o = test_same_arg_as_i_and_o self._test_computation_adhoc_array = test_computation_adhoc_array self._test_computation_incorrect_role = test_computation_incorrect_role self._test_computation_incorrect_type = test_computation_incorrect_type Computation.__init__( self, [ Parameter("C", Annotation(arr1, "o")), Parameter("D", Annotation(arr2, "o")), Parameter("A", Annotation(arr1, "i")), Parameter("B", Annotation(arr2, "i")), Parameter("coeff", Annotation(coeff)), ], )
def __init__(self, arr_t, output_arr_t=None, axes=None, block_width_override=None): self._block_width_override = block_width_override all_axes = range(len(arr_t.shape)) if axes is None: axes = tuple(reversed(all_axes)) else: assert set(axes) == set(all_axes) self._axes = tuple(axes) self._transposes = get_transposes(arr_t.shape, self._axes) output_shape = transpose_shape(arr_t.shape, self._axes) if output_arr_t is None: output_arr = Type(arr_t.dtype, output_shape) else: if output_arr_t.shape != output_shape: raise ValueError("Expected output array shape: {exp_shape}, got {got_shape}".format( exp_shape=output_arr_t, got_shape=output_arr_t.shape)) if output_arr_t.dtype != arr_t.dtype: raise ValueError("Input and output array must have the same dtype") output_arr = output_arr_t Computation.__init__(self, [ Parameter('output', Annotation(output_arr, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__( self, transform, batch_shape, inverse=False, i32_conversion=False, transforms_per_block=4, kernel_repetitions=1): self._inverse = inverse self._transform = transform self._transforms_per_block = transforms_per_block self._kernel_repetitions = kernel_repetitions self._i32_conversion = i32_conversion tr_arr = Type(self._transform.elem_dtype, batch_shape + (transform.transform_length,)) if i32_conversion: arr = Type(numpy.int32, batch_shape + (transform.polynomial_length,)) if inverse: oarr = arr iarr = tr_arr else: oarr = tr_arr iarr = arr else: oarr = tr_arr iarr = tr_arr Computation.__init__(self, [ Parameter('output', Annotation(oarr, 'o')), Parameter('input', Annotation(iarr, 'i'))])
def __init__(self, params: TGswParams, in_out_params: LweParams, shape, perf_params: PerformanceParameters): tlwe_params = params.tlwe_params decomp_length = params.decomp_length mask_size = tlwe_params.mask_size polynomial_degree = tlwe_params.polynomial_degree input_size = params.tlwe_params.extracted_lweparams.size output_size = in_out_params.size assert mask_size == 1 and decomp_length == 2 transform_type = params.tlwe_params.transform_type transform = get_transform(transform_type) tlength = transform.transformed_length(polynomial_degree) tdtype = transform.transformed_dtype() out_a = Type(Torus32, shape + (input_size, )) out_b = Type(Torus32, shape) accum_a = Type(Torus32, shape + (mask_size + 1, polynomial_degree)) gsw = Type(tdtype, (output_size, mask_size + 1, decomp_length, mask_size + 1, tlength)) bara = Type(Torus32, shape + (output_size, )) self._params = params self._in_out_params = in_out_params self._perf_params = perf_params Computation.__init__(self, [ Parameter('lwe_a', Annotation(out_a, 'io')), Parameter('lwe_b', Annotation(out_b, 'io')), Parameter('accum_a', Annotation(accum_a, 'io')), Parameter('gsw', Annotation(gsw, 'i')), Parameter('bara', Annotation(bara, 'i')) ])
def __init__(self, arr, coeff): Computation.__init__(self, [ Parameter('C', Annotation(arr, 'io')), Parameter('D', Annotation(arr, 'io')), Parameter('coeff1', Annotation(coeff)), Parameter('coeff2', Annotation(coeff)) ])
def __init__(self, arr1, arr2, coeff, second_coeff, same_A_B=False, test_computation_adhoc_array=False, test_computation_incorrect_role=False, test_computation_incorrect_type=False, test_same_arg_as_i_and_o=False): self._second_coeff = second_coeff self._same_A_B = same_A_B self._test_same_arg_as_i_and_o = test_same_arg_as_i_and_o self._test_computation_adhoc_array = test_computation_adhoc_array self._test_computation_incorrect_role = test_computation_incorrect_role self._test_computation_incorrect_type = test_computation_incorrect_type Computation.__init__(self, [ Parameter('C', Annotation(arr1, 'o')), Parameter('D', Annotation(arr2, 'o')), Parameter('A', Annotation(arr1, 'i')), Parameter('B', Annotation(arr2, 'i')), Parameter('coeff', Annotation(coeff)) ])
def __init__(self, shape_info): Computation.__init__(self, [ Parameter('result_a', Annotation(shape_info.a, 'o')), Parameter('result_b', Annotation(shape_info.b, 'o')), Parameter('result_cv', Annotation(shape_info.current_variances, 'o')), Parameter('mu', Annotation(Type(Torus32))) ])
def __init__(self, length, arr1, arr2): assert arr1.shape == (length, ) assert arr2.shape == (2, length) self._arr1 = arr1 self._arr2 = arr2 Computation.__init__(self, [ Parameter('output', Annotation(Type(numpy.float32, length), 'o')), ])
def __init__(self, arr): copy_trf = copy(arr, out_arr_t=arr) self._copy_comp = PureParallel.from_trf(copy_trf, copy_trf.input) Computation.__init__(self, [ Parameter('outer_output', Annotation(arr, 'o')), Parameter('outer_input', Annotation(arr, 'i'))])
def __init__(self, result_shape_info, source_shape): Computation.__init__(self, [ Parameter('result_a', Annotation(result_shape_info.a, 'o')), Parameter('result_b', Annotation(result_shape_info.b, 'o')), Parameter('result_cv', Annotation(result_shape_info.current_variances, 'o')), Parameter('mus', Annotation(Type(Torus32, source_shape), 'i')) ])
def __init__(self, arr): copy_trf = copy(arr, out_arr_t=arr) self._copy_comp = PureParallel.from_trf(copy_trf, copy_trf.input) Computation.__init__(self, [ Parameter('outer_output', Annotation(arr, 'o')), Parameter('outer_input', Annotation(arr, 'i')) ])
def __init__(self, arr_t): out_arr = Type(dtypes.real_for(arr_t.dtype), arr_t.shape[:-1] + (arr_t.shape[-1] * 2, )) Computation.__init__(self, [ Parameter('output', Annotation(out_arr, 'o')), Parameter('input', Annotation(arr_t, 'i')) ])
def __init__(self, arr_t): out_arr = Type( dtypes.real_for(arr_t.dtype), arr_t.shape[:-1] + (arr_t.shape[-1] * 2,)) Computation.__init__(self, [ Parameter('output', Annotation(out_arr, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__(self, arr_t, axes=None): Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')), Parameter('inverse', Annotation(numpy.int32), default=0)]) if axes is None: axes = range(len(arr_t.shape)) self._axes = tuple(sorted(axes))
def __init__(self, arr, coeff): Computation.__init__( self, [ Parameter("C", Annotation(arr, "io")), Parameter("D", Annotation(arr, "io")), Parameter("coeff1", Annotation(coeff)), Parameter("coeff2", Annotation(coeff)), ], )
def __init__(self, matrix_t): Computation.__init__(self, [ Parameter( 'output', Annotation(Type(matrix_t.dtype, matrix_t.shape[:-1]), 'o')), Parameter('matrix', Annotation(matrix_t, 'i')), Parameter( 'vector', Annotation(Type(matrix_t.dtype, matrix_t.shape[-1]), 'i')) ])
def __init__(self, system, representation, samples): state = Type(numpy.complex128, (samples, system.modes)) Computation.__init__(self, [ Parameter('alpha', Annotation(state, 'o')), Parameter('beta', Annotation(state, 'o')), Parameter('seed', Annotation(numpy.int32)), ]) self._system = system self._representation = representation
def __init__(self, x, NFFT=256, noverlap=128, pad_to=None, window=hanning_window): # print("x Data type = %s" % x.dtype) # print("Is Real = %s" % dtypes.is_real(x.dtype)) # print("dim = %s" % x.ndim) assert dtypes.is_real(x.dtype) assert x.ndim == 1 rolling_frame_trf = rolling_frame(x, NFFT, noverlap, pad_to) complex_dtype = dtypes.complex_for(x.dtype) fft_arr = Type(complex_dtype, rolling_frame_trf.output.shape) real_fft_arr = Type(x.dtype, rolling_frame_trf.output.shape) window_trf = window(real_fft_arr, NFFT) broadcast_zero_trf = transformations.broadcast_const(real_fft_arr, 0) to_complex_trf = transformations.combine_complex(fft_arr) amplitude_trf = transformations.norm_const(fft_arr, 1) crop_trf = crop_frequencies(amplitude_trf.output) fft = FFT(fft_arr, axes=(1, )) fft.parameter.input.connect(to_complex_trf, to_complex_trf.output, input_real=to_complex_trf.real, input_imag=to_complex_trf.imag) fft.parameter.input_imag.connect(broadcast_zero_trf, broadcast_zero_trf.output) fft.parameter.input_real.connect(window_trf, window_trf.output, unwindowed_input=window_trf.input) fft.parameter.unwindowed_input.connect( rolling_frame_trf, rolling_frame_trf.output, flat_input=rolling_frame_trf.input) fft.parameter.output.connect(amplitude_trf, amplitude_trf.input, amplitude=amplitude_trf.output) fft.parameter.amplitude.connect(crop_trf, crop_trf.input, cropped_amplitude=crop_trf.output) self._fft = fft self._transpose = Transpose(fft.parameter.cropped_amplitude) Computation.__init__(self, [ Parameter('output', Annotation(self._transpose.parameter.output, 'o')), Parameter('input', Annotation(fft.parameter.flat_input, 'i')) ])
def __init__(self, params: 'TLweParams', shape): a_type = Type(Torus32, shape + (params.mask_size + 1, params.polynomial_degree)) cv_type = Type(ErrorFloat, shape + (params.mask_size + 1,)) mu_type = Type(Torus32, shape + (params.polynomial_degree,)) self._mask_size = params.mask_size Computation.__init__(self, [Parameter('a', Annotation(a_type, 'o')), Parameter('current_variances', Annotation(cv_type, 'o')), Parameter('mu', Annotation(mu_type, 'i'))])
def __init__(self, arr_t, axes=None): Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i'))]) if axes is None: axes = tuple(range(len(arr_t.shape))) else: axes = tuple(axes) self._axes = axes
def __init__(self, shape, mspace_size): self._mspace_size = mspace_size messages = Type(Torus32, shape) result = Type(Int32, shape) Computation.__init__(self, [ Parameter('result', Annotation(result, 'o')), Parameter('messages', Annotation(messages, 'i')) ])
def __init__(self, shape, lwe_size): a = Type(Torus32, shape + (lwe_size,)) b = Type(Torus32, shape) key = Type(Int32, (lwe_size,)) Computation.__init__(self, [ Parameter('result', Annotation(b, 'o')), Parameter('lwe_a', Annotation(a, 'i')), Parameter('lwe_b', Annotation(b, 'i')), Parameter('key', Annotation(key, 'i'))])
def __init__(self, arr_t, dont_store_last=False): self._dont_store_last = dont_store_last output_size = arr_t.shape[-1] // 2 + (0 if dont_store_last else 1) out_arr = Type( dtypes.complex_for(arr_t.dtype), arr_t.shape[:-1] + (output_size,)) Computation.__init__(self, [ Parameter('output', Annotation(out_arr, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__(self, arr_t, axes=None): Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')) ]) if axes is None: axes = tuple(range(len(arr_t.shape))) else: axes = tuple(axes) self._axes = axes
def __init__(self, arr_t, dont_store_last=False): self._dont_store_last = dont_store_last output_size = arr_t.shape[-1] // 2 + (0 if dont_store_last else 1) out_arr = Type(dtypes.complex_for(arr_t.dtype), arr_t.shape[:-1] + (output_size, )) Computation.__init__(self, [ Parameter('output', Annotation(out_arr, 'o')), Parameter('input', Annotation(arr_t, 'i')) ])
def __init__(self, result_shape_info, source_shape_info, add_result=False): self._add_result = add_result Computation.__init__(self, [ Parameter('result_a', Annotation(result_shape_info.a, 'o')), Parameter('result_b', Annotation(result_shape_info.b, 'o')), Parameter('result_cv', Annotation(result_shape_info.current_variances, 'o')), Parameter('source_a', Annotation(source_shape_info.a, 'i')), Parameter('source_b', Annotation(source_shape_info.b, 'i')), Parameter('source_cv', Annotation(source_shape_info.current_variances, 'i')), Parameter('coeff', Annotation(Type(Torus32)))])
def __init__(self, state_arr, dt, box=None, kinetic_coeff=1, nonlinear_module=None): scalar_dtype = dtypes.real_for(state_arr.dtype) Computation.__init__(self, [ Parameter('output', Annotation(state_arr, 'o')), Parameter('input', Annotation(state_arr, 'i')), Parameter('t', Annotation(scalar_dtype)) ]) self._box = box self._kinetic_coeff = kinetic_coeff self._nonlinear_module = nonlinear_module self._components = state_arr.shape[0] self._ensembles = state_arr.shape[1] self._grid_shape = state_arr.shape[2:] ksquared = get_ksquared(self._grid_shape, self._box) self._kprop = numpy.exp( ksquared * (-1j * kinetic_coeff * dt / 2)).astype(state_arr.dtype) self._kprop_trf = Transformation( [ Parameter('output', Annotation(state_arr, 'o')), Parameter('input', Annotation(state_arr, 'i')), Parameter('kprop', Annotation(self._kprop, 'i')) ], """ ${kprop.ctype} kprop_coeff = ${kprop.load_idx}(${', '.join(idxs[2:])}); ${output.store_same}(${mul}(${input.load_same}, kprop_coeff)); """, render_kwds=dict( mul=functions.mul(state_arr.dtype, self._kprop.dtype))) self._fft = FFT(state_arr, axes=range(2, len(state_arr.shape))) self._fft_with_kprop = FFT(state_arr, axes=range(2, len(state_arr.shape))) self._fft_with_kprop.parameter.output.connect( self._kprop_trf, self._kprop_trf.input, output_prime=self._kprop_trf.output, kprop=self._kprop_trf.kprop) nonlinear_wrapper = get_nonlinear_wrapper(state_arr.dtype, nonlinear_module, dt) self._N1 = get_nonlinear1(state_arr, scalar_dtype, nonlinear_wrapper) self._N2 = get_nonlinear2(state_arr, scalar_dtype, nonlinear_wrapper, dt) self._N3 = get_nonlinear3(state_arr, scalar_dtype, nonlinear_wrapper, dt)
def __init__(self, params: 'TLweParams', shape): self._mask_size = params.mask_size self._polynomial_degree = params.polynomial_degree result_a = Type(Torus32, shape + (params.extracted_lweparams.size,)) result_b = Type(Torus32, shape) tlwe_a = Type(Torus32, shape + (params.mask_size + 1, params.polynomial_degree)) Computation.__init__(self, [ Parameter('result_a', Annotation(result_a, 'o')), Parameter('result_b', Annotation(result_b, 'o')), Parameter('tlwe_a', Annotation(tlwe_a, 'i'))])
def __init__(self, arr_t, predicate, axes=None, output_arr_t=None): dims = len(arr_t.shape) if axes is None: axes = tuple(range(dims)) else: axes = tuple(sorted(helpers.wrap_in_tuple(axes))) if len(set(axes)) != len(axes): raise ValueError("Cannot reduce twice over the same axis") if min(axes) < 0 or max(axes) >= dims: raise ValueError("Axes numbers are out of bounds") if hasattr(predicate.empty, 'dtype'): if arr_t.dtype != predicate.empty.dtype: raise ValueError( "The predicate and the array must use the same data type") empty = predicate.empty else: empty = dtypes.cast(arr_t.dtype)(predicate.empty) remaining_axes = tuple(a for a in range(dims) if a not in axes) output_shape = tuple(arr_t.shape[a] for a in remaining_axes) if axes == tuple(range(dims - len(axes), dims)): self._transpose_axes = None else: self._transpose_axes = remaining_axes + axes self._operation = predicate.operation self._empty = empty if output_arr_t is None: output_arr_t = Type(arr_t.dtype, shape=output_shape) else: if output_arr_t.dtype != arr_t.dtype: raise ValueError( "The dtype of the output array must be the same as that of the input array" ) if output_arr_t.shape != output_shape: raise ValueError("Expected the output array shape " + str(output_shape) + ", got " + str(output_arr_t.shape)) Computation.__init__(self, [ Parameter('output', Annotation(output_arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')) ])
def __init__(self, randoms_arr, generators_dim, sampler, seed=None): self._sampler = sampler self._keygen = KeyGenerator.create(sampler.bijection, seed=seed, reserve_id_space=True) assert sampler.dtype == randoms_arr.dtype counters_size = randoms_arr.shape[-generators_dim:] self._generators_dim = generators_dim self._counters_t = Type(sampler.bijection.counter_dtype, shape=counters_size) Computation.__init__(self, [ Parameter('counters', Annotation(self._counters_t, 'io')), Parameter('randoms', Annotation(randoms_arr, 'o'))])
def __init__(self, size, dtype): Computation.__init__(self, [ Parameter('output', Annotation(Type(dtype, shape=size), 'o')), Parameter('input', Annotation(Type(dtype, shape=size), 'i'))]) self._p = PureParallel([ Parameter('output', Annotation(Type(dtype, shape=size), 'o')), Parameter('i1', Annotation(Type(dtype, shape=size), 'i')), Parameter('i2', Annotation(Type(dtype, shape=size), 'i'))], """ ${i1.ctype} t1 = ${i1.load_idx}(${idxs[0]}); ${i2.ctype} t2 = ${i2.load_idx}(${idxs[0]}); ${output.store_idx}(${idxs[0]}, t1 + t2); """)
def __init__(self, arr_t, axes=None): if not dtypes.is_complex(arr_t.dtype): raise ValueError("FFT computation requires array of a complex dtype") Computation.__init__(self, [ Parameter('output', Annotation(arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i')), Parameter('inverse', Annotation(numpy.int32), default=0)]) if axes is None: axes = tuple(range(len(arr_t.shape))) else: axes = tuple(axes) self._axes = axes
def __init__(self, arr_t, predicate, axes=None, output_arr_t=None): dims = len(arr_t.shape) if axes is None: axes = tuple(range(dims)) else: axes = tuple(sorted(helpers.wrap_in_tuple(axes))) if len(set(axes)) != len(axes): raise ValueError("Cannot reduce twice over the same axis") if min(axes) < 0 or max(axes) >= dims: raise ValueError("Axes numbers are out of bounds") if hasattr(predicate.empty, 'dtype'): if arr_t.dtype != predicate.empty.dtype: raise ValueError("The predicate and the array must use the same data type") empty = predicate.empty else: empty = dtypes.cast(arr_t.dtype)(predicate.empty) remaining_axes = tuple(a for a in range(dims) if a not in axes) output_shape = tuple(arr_t.shape[a] for a in remaining_axes) if axes == tuple(range(dims - len(axes), dims)): self._transpose_axes = None else: self._transpose_axes = remaining_axes + axes self._operation = predicate.operation self._empty = empty if output_arr_t is None: output_arr_t = Type(arr_t.dtype, shape=output_shape) else: if output_arr_t.dtype != arr_t.dtype: raise ValueError( "The dtype of the output array must be the same as that of the input array") if output_arr_t.shape != output_shape: raise ValueError( "Expected the output array shape " + str(output_shape) + ", got " + str(output_arr_t.shape)) Computation.__init__(self, [ Parameter('output', Annotation(output_arr_t, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__(self, arr_t, order=2, axes=None): tr_elems = norm_const(arr_t, order) out_dtype = tr_elems.output.dtype rd = Reduce(Type(out_dtype, arr_t.shape), predicate_sum(out_dtype), axes=axes) res_t = rd.parameter.output tr_sum = norm_const(res_t, 1. / order) rd.parameter.input.connect(tr_elems, tr_elems.output, input_prime=tr_elems.input) rd.parameter.output.connect(tr_sum, tr_sum.input, output_prime=tr_sum.output) self._rd = rd Computation.__init__(self, [ Parameter('output', Annotation(res_t, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__(self, state_arr, dt, box=None, kinetic_coeff=1, nonlinear_module=None): scalar_dtype = dtypes.real_for(state_arr.dtype) potential_arr = Type(scalar_dtype, shape=state_arr.shape[2:]) Computation.__init__(self, [ Parameter('output', Annotation(state_arr, 'o')), Parameter('input', Annotation(state_arr, 'i')), Parameter('potential1', Annotation(potential_arr, 'i')), Parameter('potential2', Annotation(potential_arr, 'i')), Parameter('t_potential1', Annotation(scalar_dtype)), Parameter('t_potential2', Annotation(scalar_dtype)), Parameter('t', Annotation(scalar_dtype))]) self._box = box self._kinetic_coeff = kinetic_coeff self._nonlinear_module = nonlinear_module self._components = state_arr.shape[0] self._ensembles = state_arr.shape[1] self._grid_shape = state_arr.shape[2:] ksquared = get_ksquared(self._grid_shape, self._box) self._kprop = numpy.exp(ksquared * (-1j * kinetic_coeff * dt / 2)).astype(state_arr.dtype) self._kprop_trf = Transformation( [ Parameter('output', Annotation(state_arr, 'o')), Parameter('input', Annotation(state_arr, 'i')), Parameter('kprop', Annotation(self._kprop, 'i'))], """ ${kprop.ctype} kprop_coeff = ${kprop.load_idx}(${', '.join(idxs[2:])}); ${output.store_same}(${mul}(${input.load_same}, kprop_coeff)); """, render_kwds=dict(mul=functions.mul(state_arr.dtype, self._kprop.dtype))) self._fft = FFT(state_arr, axes=range(2, len(state_arr.shape))) self._fft_with_kprop = FFT(state_arr, axes=range(2, len(state_arr.shape))) self._fft_with_kprop.parameter.output.connect( self._kprop_trf, self._kprop_trf.input, output_prime=self._kprop_trf.output, kprop=self._kprop_trf.kprop) nonlinear_wrapper = get_nonlinear_wrapper( state_arr.shape[0], state_arr.dtype, nonlinear_module, dt) self._N1 = get_nonlinear1(state_arr, potential_arr, scalar_dtype, nonlinear_wrapper) self._N2 = get_nonlinear2(state_arr, potential_arr, scalar_dtype, nonlinear_wrapper, dt) self._N3 = get_nonlinear3(state_arr, potential_arr, scalar_dtype, nonlinear_wrapper, dt) self._potential_interpolator = get_potential_interpolator(potential_arr, dt)
def __init__(self, arr_t, axes=None, block_width_override=None): self._block_width_override = block_width_override all_axes = range(len(arr_t.shape)) if axes is None: axes = tuple(reversed(all_axes)) else: assert set(axes) == set(all_axes) self._axes = tuple(axes) output_shape = transpose_shape(arr_t.shape, self._axes) output_arr = Type(arr_t.dtype, output_shape) Computation.__init__(self, [ Parameter('output', Annotation(output_arr, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def __init__(self, shape, box, drift, trajectories=1, kinetic_coeffs=0.5j, diffusion=None, ksquared_cutoff=None, noise_type=None): real_dtype = dtypes.real_for(drift.dtype) state_type = Type(drift.dtype, (trajectories, drift.components) + shape) self._noise = diffusion is not None Computation.__init__(self, [Parameter('output', Annotation(state_type, 'o')), Parameter('input', Annotation(state_type, 'i'))] + ([Parameter('dW', Annotation(noise_type, 'i'))] if self._noise else []) + [Parameter('t', Annotation(real_dtype)), Parameter('dt', Annotation(real_dtype))]) self._ksquared = get_ksquared(shape, box).astype(real_dtype) kprop_trf = get_kprop_trf(state_type, self._ksquared, kinetic_coeffs) self._ksquared_cutoff = ksquared_cutoff if self._ksquared_cutoff is not None: project_trf = get_project_trf(state_type, self._ksquared, ksquared_cutoff) self._fft_with_project = FFT(state_type, axes=range(2, len(state_type.shape))) self._fft_with_project.parameter.output.connect( project_trf, project_trf.input, output_prime=project_trf.output, ksquared=project_trf.ksquared) self._fft = FFT(state_type, axes=range(2, len(state_type.shape))) self._fft_with_kprop = FFT(state_type, axes=range(2, len(state_type.shape))) self._fft_with_kprop.parameter.output.connect( kprop_trf, kprop_trf.input, output_prime=kprop_trf.output, ksquared=kprop_trf.ksquared, dt=kprop_trf.dt) self._xpropagate = get_xpropagate( state_type, drift, diffusion=diffusion, noise_type=noise_type) self._ai = numpy.array([ 0.0, -0.737101392796, -1.634740794341, -0.744739003780, -1.469897351522, -2.813971388035]) self._bi = numpy.array([ 0.032918605146, 0.823256998200, 0.381530948900, 0.200092213184, 1.718581042715, 0.27]) self._ci = numpy.array([ 0.0, 0.032918605146, 0.249351723343, 0.466911705055, 0.582030414044, 0.847252983783])
def __init__(self, parameters, code, guiding_array=None, render_kwds=None): Computation.__init__(self, parameters) self._root_parameters = list(self.signature.parameters.keys()) if isinstance(code, Snippet): self._snippet = code else: self._snippet = Snippet(helpers.template_def( ['idxs'] + self._root_parameters, code), render_kwds=render_kwds) if guiding_array is None: guiding_array = self._root_parameters[0] if isinstance(guiding_array, str): self._guiding_shape = self.signature.parameters[guiding_array].annotation.type.shape else: self._guiding_shape = guiding_array
def __init__(self, shape, drift, trajectories=1, diffusion=None, iterations=3, noise_type=None): if dtypes.is_complex(drift.dtype): real_dtype = dtypes.real_for(drift.dtype) else: real_dtype = drift.dtype state_type = Type(drift.dtype, (trajectories, drift.components) + shape) self._noise = diffusion is not None Computation.__init__(self, [Parameter('output', Annotation(state_type, 'o')), Parameter('input', Annotation(state_type, 'i'))] + ([Parameter('dW', Annotation(noise_type, 'i'))] if self._noise else []) + [Parameter('t', Annotation(real_dtype)), Parameter('dt', Annotation(real_dtype))]) self._prop_iter = get_prop_iter( state_type, drift, iterations, diffusion=diffusion, noise_type=noise_type)
def __init__(self, x, NFFT=256, noverlap=128, pad_to=None, window=hanning_window): assert dtypes.is_real(x.dtype) assert x.ndim == 1 rolling_frame_trf = rolling_frame(x, NFFT, noverlap, pad_to) complex_dtype = dtypes.complex_for(x.dtype) fft_arr = Type(complex_dtype, rolling_frame_trf.output.shape) real_fft_arr = Type(x.dtype, rolling_frame_trf.output.shape) window_trf = window(real_fft_arr, NFFT) broadcast_zero_trf = transformations.broadcast_const(real_fft_arr, 0) to_complex_trf = transformations.combine_complex(fft_arr) amplitude_trf = transformations.norm_const(fft_arr, 1) crop_trf = crop_frequencies(amplitude_trf.output) fft = FFT(fft_arr, axes=(1,)) fft.parameter.input.connect( to_complex_trf, to_complex_trf.output, input_real=to_complex_trf.real, input_imag=to_complex_trf.imag) fft.parameter.input_imag.connect( broadcast_zero_trf, broadcast_zero_trf.output) fft.parameter.input_real.connect( window_trf, window_trf.output, unwindowed_input=window_trf.input) fft.parameter.unwindowed_input.connect( rolling_frame_trf, rolling_frame_trf.output, flat_input=rolling_frame_trf.input) fft.parameter.output.connect( amplitude_trf, amplitude_trf.input, amplitude=amplitude_trf.output) fft.parameter.amplitude.connect( crop_trf, crop_trf.input, cropped_amplitude=crop_trf.output) self._fft = fft self._transpose = Transpose(fft.parameter.cropped_amplitude) Computation.__init__(self, [Parameter('output', Annotation(self._transpose.parameter.output, 'o')), Parameter('input', Annotation(fft.parameter.flat_input, 'i'))])
def __init__(self, a_arr, b_arr, out_arr=None, block_width_override=None, transposed_a=False, transposed_b=False): if len(a_arr.shape) == 1: a_arr = Type(a_arr.dtype, shape=(1,) + a_arr.shape) if len(b_arr.shape) == 1: b_arr = Type(b_arr.dtype, shape=b_arr.shape + (1,)) a_batch_shape = a_arr.shape[:-2] b_batch_shape = b_arr.shape[:-2] a_outer_size = a_arr.shape[-1 if transposed_a else -2] convolution_size = a_arr.shape[-2 if transposed_a else -1] b_outer_size = b_arr.shape[-2 if transposed_b else -1] if out_arr is None: out_dtype = dtypes.result_type(a_arr.dtype, b_arr.dtype) batch_len = max(len(a_batch_shape), len(b_batch_shape)) batch_shape = b_batch_shape if helpers.product(a_batch_shape) == 1 else a_batch_shape batch_shape = (1,) * (batch_len - len(batch_shape)) + batch_shape out_shape = batch_shape + (a_outer_size, b_outer_size) out_arr = Type(out_dtype, shape=out_shape) Computation.__init__(self, [ Parameter('output', Annotation(out_arr, 'o')), Parameter('matrix_a', Annotation(a_arr, 'i')), Parameter('matrix_b', Annotation(b_arr, 'i'))]) self._block_width_override = block_width_override self._a_outer_size = a_outer_size self._convolution_size = convolution_size self._b_outer_size = b_outer_size self._transposed_a = transposed_a self._transposed_b = transposed_b
def __init__(self, array, axis): self._axis = axis Computation.__init__(self, [ Parameter('array', Annotation(array, 'io')), Parameter('shift', Annotation(Type(numpy.int32)))])