def sample_preimage(self, point): """ Sample a short vector s such that s[0] + s[1] * h = point. """ [[a, b], [c, d]] = self.B0_fft # We compute a vector t_fft such that: # (fft(point), fft(0)) * B0_fft = t_fft # Because fft(0) = 0 and the inverse of B has a very specific form, # we can do several optimizations. point_fft = fft(point) t0_fft = [(point_fft[i] * d[i]) / q for i in range(self.n)] t1_fft = [(-point_fft[i] * b[i]) / q for i in range(self.n)] t_fft = [t0_fft, t1_fft] # We now compute v such that: # v = z * B0 for an integral vector z # v is close to (point, 0) z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin) v0_fft = add_fft(mul_fft(z_fft[0], a), mul_fft(z_fft[1], c)) v1_fft = add_fft(mul_fft(z_fft[0], b), mul_fft(z_fft[1], d)) v0 = [int(round(elt)) for elt in ifft(v0_fft)] v1 = [int(round(elt)) for elt in ifft(v1_fft)] # The difference s = (point, 0) - v is such that: # s is short # s[0] + s[1] * h = point s = [sub(point, v0), neg(v1)] return s
def sample_preimage_fft(self, point): """Sample preimage.""" B = self.B0_fft c = point, [0] * self.n t_fft = self.get_coord_in_fft(c) z_fft = ffsampling_fft(t_fft, self.T_fft) v0_fft = add_fft(mul_fft(z_fft[0], B[0][0]), mul_fft(z_fft[1], B[1][0])) v1_fft = add_fft(mul_fft(z_fft[0], B[0][1]), mul_fft(z_fft[1], B[1][1])) v0 = [int(round(elt)) for elt in ifft(v0_fft)] v1 = [int(round(elt)) for elt in ifft(v1_fft)] v = v0, v1 s = [sub(c[0], v[0]), sub(c[1], v[1])] return s
def sample_preimage_fft(self, point): """ Sample preimage. Input: self The private key point An element of Z_q[x] / (x ** d + 1) Output: s A short element such that s * B = point Format: Coefficient """ d = self.d m = self.m # Compute large preimage c = [point] + [[0] * d for _ in range(m)] # Move to FFT domain c_fft = [fft(elt) for elt in c] # Compute coefficients in span(B) t_fft = vecmatmul_fft(c_fft, self.invB_fft) # Fast Fourier sampling z_fft = ffsampling_fft(t_fft, self.T_fft) # Compute short preimage s = (c - v) = (t - z) * B v_fft = vecmatmul_fft(z_fft, self.B_fft) v = [[int(round(coef)) for coef in ifft(elt)] for elt in v_fft] s = [sub(c[i], v[i]) for i in range(m + 1)] return s
def __fitcf__(self, **args): train_objects_files = returnFiles(args['train_object_folder']) train_objects = returnImages(train_objects_files) transformed_train_objects = preproc(train_objects) filter_raw = cf.synthesize( transformed_train_objects, train_object_labels=args['train_object_labels'], filter_type=self.args['filter_type']) if self.type is 'cf': self.data = filter_raw self.__setthr__(transformed_train_objects, args['train_object_labels']) else: filter_holo = synthesizeHolo(ifft(filter_raw)) self.data = proc(filter_holo, self.processing, **self.args) transformed_train_objects = [ square(element, np.shape(self.data)[0], 0, True) for element in transformed_train_objects ] self.__setthr__(transformed_train_objects, args['train_object_labels']) if args['is_save']: self.__save()
def __save(self): """\n For saving of classifier. In progress... """ full_path = pjoin('data', 'model', self.type) full_classifier_name = pjoin(full_path, self.name) + '.pkl' if not (os.path.isdir(full_path)): try: os.mkdir(full_path) except OSError: raise OSError( "directory {} can't be created!".format(full_path)) with open(full_classifier_name, 'wb') as output: dump(self.data, output) if self.type is 'cf': full_image_name = pjoin(full_path, self.name) + '.png' img = np.real(ifft(self.data)) img = img.astype(float) * 255 / np.max(img) cv.imwrite(full_image_name, img) elif self.type is 'cf_holo': full_image_name = pjoin(full_path, self.name) + '.png' img = np.abs(self.data) img = img.astype(float) * 255 / np.max(img) cv.imwrite(full_image_name, img)
def conv_fft(y, z, n): y_value = fft(y) z_value = fft(z) result = [y_value[i] * z_value[i] / n for i in xrange(n)] return [i.real for i in ifft(result)]
def reduce(f, g, F, G): """ Reduce (F, G) relatively to (f, g). This is done via Babai's reduction. (F, G) <-- (F, G) - k * (f, g), where k = round((F f* + G g*) / (f f* + g g*)). Corresponds to algorithm 7 (Reduce) of Falcon's documentation. """ n = len(f) size = max(53, bitsize(min(f)), bitsize(max(f)), bitsize(min(g)), bitsize(max(g))) f_adjust = [elt >> (size - 53) for elt in f] g_adjust = [elt >> (size - 53) for elt in g] fa_fft = fft(f_adjust) ga_fft = fft(g_adjust) while (1): # Because we work in finite precision to reduce very large polynomials, # we may need to perform the reduction several times. Size = max(53, bitsize(min(F)), bitsize(max(F)), bitsize(min(G)), bitsize(max(G))) if Size < size: break F_adjust = [elt >> (Size - 53) for elt in F] G_adjust = [elt >> (Size - 53) for elt in G] Fa_fft = fft(F_adjust) Ga_fft = fft(G_adjust) den_fft = add_fft(mul_fft(fa_fft, adj_fft(fa_fft)), mul_fft(ga_fft, adj_fft(ga_fft))) num_fft = add_fft(mul_fft(Fa_fft, adj_fft(fa_fft)), mul_fft(Ga_fft, adj_fft(ga_fft))) k_fft = div_fft(num_fft, den_fft) k = ifft(k_fft) k = [int(round(elt)) for elt in k] if all(elt == 0 for elt in k): break # The two next lines are the costliest operations in ntru_gen # (more than 75% of the total cost in dimension n = 1024). # There are at least two ways to make them faster: # - replace Karatsuba with Toom-Cook # - mutualized Karatsuba, see ia.cr/2020/268 # For simplicity reasons, we didn't implement these optimisations here. fk = karamul(f, k) gk = karamul(g, k) for i in range(n): F[i] -= fk[i] << (Size - size) G[i] -= gk[i] << (Size - size) return F, G
def sample_preimage(self, point, seed=None): """ Sample a short vector s such that s[0] + s[1] * h = point. """ [[a, b], [c, d]] = self.B0_fft # We compute a vector t_fft such that: # (fft(point), fft(0)) * B0_fft = t_fft # Because fft(0) = 0 and the inverse of B has a very specific form, # we can do several optimizations. point_fft = fft(point) t0_fft = [(point_fft[i] * d[i]) / q for i in range(self.n)] t1_fft = [(-point_fft[i] * b[i]) / q for i in range(self.n)] t_fft = [t0_fft, t1_fft] # We now compute v such that: # v = z * B0 for an integral vector z # v is close to (point, 0) if seed is None: # If no seed is defined, use urandom as the pseudo-random source. z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin, urandom) else: # If a seed is defined, initialize a ChaCha20 PRG # that is used to generate pseudo-randomness. chacha_prng = ChaCha20(seed) z_fft = ffsampling_fft(t_fft, self.T_fft, self.sigmin, chacha_prng.randombytes) v0_fft = add_fft(mul_fft(z_fft[0], a), mul_fft(z_fft[1], c)) v1_fft = add_fft(mul_fft(z_fft[0], b), mul_fft(z_fft[1], d)) v0 = [int(round(elt)) for elt in ifft(v0_fft)] v1 = [int(round(elt)) for elt in ifft(v1_fft)] # The difference s = (point, 0) - v is such that: # s is short # s[0] + s[1] * h = point s = [sub(point, v0), neg(v1)] return s
def radon(tomo_stack, deapodization_factor, ST, k_r, num_angles, xp, fft): num_slices = tomo_stack.shape[0] num_rays = tomo_stack.shape[2] sinogram_stack = xp.empty((num_slices, num_angles, num_rays), dtype=xp.float32) deapodization_factor.shape = (num_rays, num_rays) #go through each slice #for i in range(0,num_slices): # tomo_slice = tomo_stack[i] # two slices at once by merrging into a complex for i in range(0, num_slices, 2): # merge two slices into complex if i > num_slices - 2: tomo_slice = tomo_stack[i] else: tomo_slice = tomo_stack[i] + 1j * tomo_stack[i + 1] ################################### # non uniform FFT cartesian (x,y) to Fourier polar (q,theta): tomo_slice = tomo_slice * deapodization_factor tomo_slice = fft.fft2(tomo_slice) # gridding from cartiesian (qx,qy) to polar (q-theta) sinogram = (ST) * tomo_slice.ravel() #SpMV sinogram.shape = (num_angles, num_rays) # end of non uniform FFT ################################### # (q-theta) to radon (r-theta) : sinogram = fft.ifft(sinogram) # put the sinogram in the stack # extract two slices out of complex if i > num_slices - 2: #print("ratio gridrec_transpose i/r=", xp.max(xp.abs(sinogram.imag)/xp.max(xp.abs(sinogram.real)))) sinogram_stack[i] = sinogram.real else: sinogram_stack[i] = sinogram.real sinogram_stack[i + 1] = sinogram.imag return sinogram_stack
def reduce(f, g, F, G): """ Reduce (F, G) relatively to (f, g). This is done via Babai's reduction. (F, G) <-- (F, G) - k * (f, g), where k = round((F f* + G g*) / (f f* + g g*)). Similar to algorithm Reduce of Falcon's documentation. Input: f, g, F, G Four polynomials mod (x ** n + 1) Output: None The inputs are reduced as detailed above. Format: Coefficient """ n = len(f) size = max(53, bitsize(min(f)), bitsize(max(f)), bitsize(min(g)), bitsize(max(g))) f_adjust = [elt >> (size - 53) for elt in f] g_adjust = [elt >> (size - 53) for elt in g] fa_fft = fft(f_adjust) ga_fft = fft(g_adjust) while(1): # Because we are working in finite precision to reduce very large polynomials, # we may need to perform the reduction several times. Size = max(53, bitsize(min(F)), bitsize(max(F)), bitsize(min(G)), bitsize(max(G))) if Size < size: break F_adjust = [elt >> (Size - 53) for elt in F] G_adjust = [elt >> (Size - 53) for elt in G] Fa_fft = fft(F_adjust) Ga_fft = fft(G_adjust) den_fft = add_fft(mul_fft(fa_fft, adj_fft(fa_fft)), mul_fft(ga_fft, adj_fft(ga_fft))) num_fft = add_fft(mul_fft(Fa_fft, adj_fft(fa_fft)), mul_fft(Ga_fft, adj_fft(ga_fft))) k_fft = div_fft(num_fft, den_fft) k = ifft(k_fft) k = [int(round(elt)) for elt in k] if all(elt == 0 for elt in k): break fk = karamul(f, k) gk = karamul(g, k) for i in range(n): F[i] -= fk[i] << (Size - size) G[i] -= gk[i] << (Size - size) return F, G
def corr(img, flt): """\n Calculate correlation of input image and filter. Fourier image of filter is used. Be careful! Parameters ---------- img : ndarray Input image flt : ndarray Input filter Fourier image. Returns ------- corr : ndarray """ return ifft(fft(img) * np.conj(flt))
def computeCMBY(d0): """ For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's) """ # N.B. Reshaping operations required to go between 2D pixel arrays and # 1D vector (for linear system) d2 = 0 for freq in range(nFreq): d1 = d0[freq].data.copy().reshape((ny,nx)) d1 *= ninvs[freq] a_l = fft.fft(d1,axes=[-2,-1]) a_l *= beams[freq]*precond_2d d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True)) d1 = numpy.reshape(d1,(nx*ny)) d2 += d1 return d2
def test_ffnp(d, m, iterations): """Test ffnp. This functions check that: 1. the two versions (coefficient and FFT embeddings) of ffnp are consistent 2. ffnp output lattice vectors close to the targets. """ q = q_12289 A, B, inv_B, sqr_gsnorm = module_ntru_gen(d, q, m) G0 = gram(B) G0_fft = [[fft(elt) for elt in row] for row in G0] T = ffldl(G0) T_fft = ffldl_fft(G0_fft) th_bound = (m + 1) * d * sqr_gsnorm / 4. mn = 0 for i in range(iterations): t = [[random() for coef in range(d)] for poly in range(m + 1)] t_fft = [fft(elt) for elt in t] z = ffnp(t, T) z_fft = ffnp_fft(t_fft, T_fft) zb = [ifft(elt) for elt in z_fft] zb = [[round(coef) for coef in elt] for elt in zb] if z != zb: print("ffnp and ffnp_fft are not consistent") return False diff = [sub(t[i], z[i]) for i in range(m + 1)] diffB = vecmatmul(diff, B) norm_zmc = int(round(sqnorm(diffB))) mn = max(mn, norm_zmc) if mn > th_bound: print("z = {z}".format(z=z)) print("t = {t}".format(t=t)) print("mn = {mn}".format(mn=mn)) print("th_bound = {th_bound}".format(th_bound=th_bound)) print("sqr_gsnorm = {sqr_gsnorm}".format(sqr_gsnorm=sqr_gsnorm)) print("Warning: the algorithm outputs vectors longer than expected") return False else: return True
def restoreHolo(holo_image, show_image=True): """\n Restore Fourier hologram of input image. Parameters ---------- holo_image : ndarray show_image : bool, default=True If True, shows image in new figure. Returns ------- restored_holo_image : ndarray """ restored_holo_image = np.abs(ifft(holo_image)) image_shape = np.shape(restored_holo_image) restored_holo_image[image_shape[0] / 2, image_shape[1] / 2] = 0 restored_holo_image -= np.min(restored_holo_image) restored_holo_image /= np.max(restored_holo_image) if show_image: cv.imshow("Restored Holo", restored_holo_image) return restored_holo_image
def test_ffnp(n, iterations): """Test ffnp. This functions check that: 1. the two versions (coefficient and FFT embeddings) of ffnp are consistent 2. ffnp output lattice vectors close to the targets. """ f = sign_KAT[n][0]["f"] g = sign_KAT[n][0]["g"] F = sign_KAT[n][0]["F"] G = sign_KAT[n][0]["G"] B = [[g, neg(f)], [G, neg(F)]] G0 = gram(B) G0_fft = [[fft(elt) for elt in row] for row in G0] T = ffldl(G0) T_fft = ffldl_fft(G0_fft) sqgsnorm = gs_norm(f, g, q) m = 0 for i in range(iterations): t = [[random() for i in range(n)], [random() for i in range(n)]] t_fft = [fft(elt) for elt in t] z = ffnp(t, T) z_fft = ffnp_fft(t_fft, T_fft) zb = [ifft(elt) for elt in z_fft] zb = [[round(coef) for coef in elt] for elt in zb] if z != zb: print("ffnp and ffnp_fft are not consistent") return False diff = [sub(t[0], z[0]), sub(t[1], z[1])] diffB = vecmatmul(diff, B) norm_zmc = int(round(sqnorm(diffB))) m = max(m, norm_zmc) th_bound = (n / 4.) * sqgsnorm if m > th_bound: print("Warning: ffnp does not output vectors as short as expected") return False else: return True
for iY in range(maxfreq, len(Y)-maxfreq ) : Y[iY] = complex(0,0) #Y[iY] = Y[iY] * (0.5 - 0.5 * math.cos(2*math.pi*iY/float(N-1))) #for iY in range(0,N) : # Y[iY] = Y[iY] * math.exp(-1.0*iY / 50.0) powery = fft_power(Y) powerx = array([ float(i) for i in xrange(len(powery)) ] ) Yre = [math.sqrt(Y[i].real**2+Y[i].imag**2) for i in xrange(len(Y))] import heapq print powery.index(heapq.nlargest(2,powery)[1]) ysmoothed = ifft(Y) ysmoothedreal = real(ysmoothed) if window : for iy in range(1,len(yinput)-1) : ysmoothedreal[iy] = ysmoothedreal[iy] / (0.5 - 0.5 * math.cos(2*math.pi*iy/float(N-1))) y[iy]= y[iy] / (0.5 - 0.5 * math.cos(2*math.pi*iy/float(N-1))) ax1 = plt.subplot(2, 1, 1) y= y[:(N-padlength)]
pads = [0.0] * (pow(2, int(log2N)+1) - N) #now paddle with 0s ma = m + pads ca = c + pads N = len(m) print len(m) M = fft(ma) C = fft(ca) maxfreq = 15 # Now smooth the data for i in range(maxfreq, len(M)-maxfreq ) : M[i] = complex(0,0) C[i] = complex(0,0) cp = ifft(C) mp = ifft(M) cpr = real(cp) mpr = real(mp) mf = mpr[0:len(m)] cf = cpr[0:len(c)] plt.subplot(2, 1, 1) plt.plot(T,mf) plt.xlabel('Temperature') plt.ylabel('m') plt.subplot(2,1,2) plt.plot(T,cf)
L = 532*0.000000001 # wavelength in nm w = 0.002 # width field of view z = 0.05 # z step in meter imp = IJ.getImage() #synthetic amplitude and phase am = [math.exp(-1.6*val) for val in imp.getProcessor().getPixels()] ph = [-3*val for val in imp.getProcessor().getPixels()] t = [x*cmath.exp(-1j*y) for x, y in zip(am, ph)] real = ImagePlus("real", FloatProcessor(N,N,[val.real for val in t])) imaginary = ImagePlus("IMAGINARY", FloatProcessor(N,N,[val.imag for val in t])) fft_1 = fft.fft(real,imaginary,N,N) #projection to detector plane z = Propagator.Zone(L, N, z, w) real2 = [(x * y).real for x , y in zip(fft_1, [val.conjugate() for val in z])] imag2 = [(x * y).imag for x , y in zip(fft_1, [val.conjugate() for val in z])] R2 = ImagePlus("real", FloatProcessor(N, N, real2, None)) I2 = ImagePlus("imaginary", FloatProcessor(N, N, imag2, None)) ifft_1 = fft.ifft(R2,I2,N,N) #amplitude at detector plane Amplitude = ImagePlus("Amplitude_det", FloatProcessor(N, N, [abs(x) for x in ifft_1], None)).show()
m = [val for val in imp.getProcessor().getPixels()] sin = [math.sin(val) for val in imp2.getProcessor().getPixels()] cos = [math.cos(val) for val in imp2.getProcessor().getPixels()] imp_r = ImagePlus( "", FloatProcessor(N, N, [x * y for x, y in zip(m, cos)])) imp_i = ImagePlus( "", FloatProcessor(N, N, [x * y for x, y in zip(m, sin)])) fft_1 = fft.fft(imp_r, imp_i, N, N) #multiply image by fresnel kernel: propagate to object plane real = [(x * y).real for x, y in zip(fft_1, z)] imag = [(x * y).imag for x, y in zip(fft_1, z)] R = ImagePlus("", FloatProcessor(N, N, real, None)) I = ImagePlus("", FloatProcessor(N, N, imag, None)) fft_2 = fft.ifft(R, I, N, N) phase = [cmath.phase(x) for x in fft_2] #phase m = [abs(x) for x in fft_2] #filter in object domain, no non negative absorbances fil = [-1 * math.log(val) for val in [abs(x) for x in fft_2]] ind = [i for i, x in enumerate(fil) if x < 0] for i in ind: phase[i] = 0 fil[i] = 0 m = [math.exp(-1 * val) for val in fil] #amplitude after filtering sin = [math.sin(val) for val in phase] cos = [math.cos(val) for val in phase]
import sys import numpy as np from matplotlib import pyplot as plt import fft N = 8 * 2 X = np.arange(N) V = np.cos(2*np.pi*X/N+0.5) + np.sin(2*np.pi*0.5*X/N) - 0.3*np.cos(2*np.pi*X/N) + 5.0*np.sin(2*np.pi*0.3*X/N) print 'V:', V Q1 = fft.fft(V) P1 = fft.ifft(Q1) plt.plot(X, V, '-b', X, np.real(Q1), '-g', X, np.real(P1), '-r') plt.show() print 'Q1:', Q1 print 'P1:', np.real(P1) Q2 = np.fft.fft(V) P2 = np.fft.ifft(Q2) plt.plot(X, V, '-b', X, np.real(Q2/np.sqrt(N)), '-g', X, np.real(P2), '-r') plt.show() print 'Q2:', Q2 print 'P2:', P2 max_dev = np.max(np.abs(P1-P2))
maxfreq = 50 # Now smooth the data for iY in range(maxfreq, len(Y) - maxfreq): Y[iY] = complex(0, 0) #Y[iY] = Y[iY] * (0.5 - 0.5 * math.cos(2*math.pi*iY/float(N-1))) #for iY in range(0,N) : # Y[iY] = Y[iY] * math.exp(-1.0*iY / 50.0) powery = fft_power(Y) powerx = array([float(i) for i in xrange(len(powery))]) Yre = [math.sqrt(Y[i].real**2 + Y[i].imag**2) for i in xrange(len(Y))] ysmoothed = ifft(Y) ysmoothedreal = real(ysmoothed) ax1 = plt.subplot(2, 1, 1) p1, = plt.plot(x, y) p2, = plt.plot(x, ysmoothedreal) ax1.legend([p1, p2], ['Original', 'Smoothed']) ax2 = plt.subplot(2, 1, 2) p3, = plt.plot(powerx, powery) p4, = plt.plot(x, Yre) ax2.legend([p3, p4], ["Power", "Magnitude"]) plt.yscale('log') plt.show()
def call(self, inputs): channel_axis = 1 if self.data_format == 'channels_first' else -1 input_dim = K.shape(inputs)[channel_axis] // 2 if self.rank == 1: f_real = self.kernel[:, :, :self.filters] f_imag = self.kernel[:, :, self.filters:] elif self.rank == 2: f_real = self.kernel[:, :, :, :self.filters] f_imag = self.kernel[:, :, :, self.filters:] elif self.rank == 3: f_real = self.kernel[:, :, :, :, :self.filters] f_imag = self.kernel[:, :, :, :, self.filters:] convArgs = { "strides": self.strides[0] if self.rank == 1 else self.strides, "padding": self.padding, "data_format": self.data_format, "dilation_rate": self.dilation_rate[0] if self.rank == 1 else self.dilation_rate } convFunc = {1: K.conv1d, 2: K.conv2d, 3: K.conv3d}[self.rank] # processing if the weights are assumed to be represented in the spectral domain if self.spectral_parametrization: if self.rank == 1: f_real = K.permute_dimensions(f_real, (2, 1, 0)) f_imag = K.permute_dimensions(f_imag, (2, 1, 0)) f = K.concatenate([f_real, f_imag], axis=0) fshape = K.shape(f) f = K.reshape(f, (fshape[0] * fshape[1], fshape[2])) f = ifft(f) f = K.reshape(f, fshape) f_real = f[:fshape[0] // 2] f_imag = f[fshape[0] // 2:] f_real = K.permute_dimensions(f_real, (2, 1, 0)) f_imag = K.permute_dimensions(f_imag, (2, 1, 0)) elif self.rank == 2: f_real = K.permute_dimensions(f_real, (3, 2, 0, 1)) f_imag = K.permute_dimensions(f_imag, (3, 2, 0, 1)) f = K.concatenate([f_real, f_imag], axis=0) fshape = K.shape(f) f = K.reshape(f, (fshape[0] * fshape[1], fshape[2], fshape[3])) f = ifft2(f) f = K.reshape(f, fshape) f_real = f[:fshape[0] // 2] f_imag = f[fshape[0] // 2:] f_real = K.permute_dimensions(f_real, (2, 3, 1, 0)) f_imag = K.permute_dimensions(f_imag, (2, 3, 1, 0)) # In case of weight normalization, real and imaginary weights are normalized if self.normalize_weight: ker_shape = self.kernel_shape nb_kernels = ker_shape[-2] * ker_shape[-1] kernel_shape_4_norm = (np.prod(self.kernel_size), nb_kernels) reshaped_f_real = K.reshape(f_real, kernel_shape_4_norm) reshaped_f_imag = K.reshape(f_imag, kernel_shape_4_norm) reduction_axes = list(range(2)) del reduction_axes[-1] mu_real = K.mean(reshaped_f_real, axis=reduction_axes) mu_imag = K.mean(reshaped_f_imag, axis=reduction_axes) broadcast_mu_shape = [1] * 2 broadcast_mu_shape[-1] = nb_kernels broadcast_mu_real = K.reshape(mu_real, broadcast_mu_shape) broadcast_mu_imag = K.reshape(mu_imag, broadcast_mu_shape) reshaped_f_real_centred = reshaped_f_real - broadcast_mu_real reshaped_f_imag_centred = reshaped_f_imag - broadcast_mu_imag Vrr = K.mean(reshaped_f_real_centred**2, axis=reduction_axes) + self.epsilon Vii = K.mean(reshaped_f_imag_centred**2, axis=reduction_axes) + self.epsilon Vri = K.mean(reshaped_f_real_centred * reshaped_f_imag_centred, axis=reduction_axes) + self.epsilon normalized_weight = complex_normalization(K.concatenate( [reshaped_f_real, reshaped_f_imag], axis=-1), Vrr, Vii, Vri, beta=None, gamma_rr=self.gamma_rr, gamma_ri=self.gamma_ri, gamma_ii=self.gamma_ii, scale=True, center=False, axis=-1) normalized_real = normalized_weight[:, :nb_kernels] normalized_imag = normalized_weight[:, nb_kernels:] f_real = K.reshape(normalized_real, self.kernel_shape) f_imag = K.reshape(normalized_imag, self.kernel_shape) # Performing complex convolution f_real._keras_shape = self.kernel_shape f_imag._keras_shape = self.kernel_shape cat_kernels_4_real = K.concatenate([f_real, -f_imag], axis=-2) cat_kernels_4_imag = K.concatenate([f_imag, f_real], axis=-2) cat_kernels_4_complex = K.concatenate( [cat_kernels_4_real, cat_kernels_4_imag], axis=-1) cat_kernels_4_complex._keras_shape = self.kernel_size + ( 2 * input_dim, 2 * self.filters) output = convFunc(inputs, cat_kernels_4_complex, **convArgs) if self.use_bias: output = K.bias_add(output, self.bias, data_format=self.data_format) if self.activation is not None: output = self.activation(output) return output
import numpy as np from matplotlib import pyplot as plt import fft N = 8 * 2 X = np.arange(N) V = np.cos(2 * np.pi * X / N + 0.5) + np.sin( 2 * np.pi * 0.5 * X / N) - 0.3 * np.cos(2 * np.pi * X / N) + 5.0 * np.sin( 2 * np.pi * 0.3 * X / N) print 'V:', V Q1 = fft.fft(V) P1 = fft.ifft(Q1) plt.plot(X, V, '-b', X, np.real(Q1), '-g', X, np.real(P1), '-r') plt.show() print 'Q1:', Q1 print 'P1:', np.real(P1) Q2 = np.fft.fft(V) P2 = np.fft.ifft(Q2) plt.plot(X, V, '-b', X, np.real(Q2 / np.sqrt(N)), '-g', X, np.real(P2), '-r') plt.show() print 'Q2:', Q2 print 'P2:', P2 max_dev = np.max(np.abs(P1 - P2))
def module_ntru_gen(d, q, m): """ Take as input system parameters, and output two "module-NTRU" matrices A and B such that: - B * A = 0 [mod q] - B has small polynomials - A is in Hermite normal form Also compute the inverse of B (over the field K = Q[x] / (x ** d + 1)). Input: d The degree of the underlying ring R = Z[x] / (x ** d + 1) q An integer m An integer Output: A A matrix in R ^ ((m + 1) x 1) B A matrix in R ^ ((m + 1) x (m + 1)) inv_B A matrix in K ^ ((m + 1) x (m + 1)) sq_gs_norm A real number, the square of the Gram-Schmidt norm of B Format: Coefficient """ if m == 1: magic_constant = [1.15] gs_slack = 1.17 elif m == 2: magic_constant = [1.07, 1.14] gs_slack = 1.17 elif m == 3: magic_constant = [1.21, 1.10, 1.06] gs_slack = 1.24 else: print("No parameters implemented yet for m = {m}".format(m=m)) return max_gs_norm = gs_slack * (q ** (1 / (m + 1))) while True: # We generate all rows of B except the last B = [[None for j in range(m + 1)] for i in range(m + 1)] for i in range(m): for j in range(m + 1): # Each coefficient B[i][j] is a polynomial sigma = magic_constant[i] * (q ** (1 / (m + 1))) # ==> ||bi~|| = gs_slack * q^(1/(m+1)) sig = sqrt(1 / (d * (m + 1 - i))) * sigma # sig = stdv. dev. des coef de bi B[i][j] = [int(round(gauss(0, sig))) for k in range(d)] # We check that the GS norm is not larger than tolerated Bp_fft = [[fft(poly) for poly in row] for row in B[:-1]] Gp = gram_fft(Bp_fft) [Lp_fft, Dp_fft] = ldl_fft(Gp) Dp = [[[0] * d for col in range(m)] for row in range(m)] for i in range(m): Dp[i][i] = ifft(Dp_fft[i][i]) prod_di = [1] + [0] * (d - 1) for i in range(m): prod_di = mul(prod_di, Dp[i][i]) last = div([q ** 2] + [0] * (d - 1), prod_di) norms = [Dp[i][i][0] for i in range(m)] + [last[0]] # If the GS norm is too large, restart if sqrt(max(norms)) > max_gs_norm: continue # Try to solve the module-NTRU equation f = submatrix(B, m, 0) f = [[neg(elt) for elt in row] for row in f] g = [B[j][0] for j in range(m)] fp = my_det(f) adjf = my_adjugate(f) gp = [0] * d for i in range(m): gp = add(gp, karamul(adjf[0][i], g[i])) try: # Compute f^(-1) mod q fp_q = [elt % q for elt in fp] inv_f = [[elt[:] for elt in row] for row in adjf] for i in range(m): for j in range(m): inv_f[i][j] = [elt % q for elt in inv_f[i][j]] inv_f[i][j] = div_zq(inv_f[i][j], fp_q, q) # Compute h = f^(-1) * g mod q and A = [1 | h] h = [None] * m for i in range(m): elt = [0] * d for j in range(m): elt = add_zq(elt, mul_zq(inv_f[i][j], g[j], q), q) h[i] = elt one = [1] + [0 for _ in range(1, d)] A = [one] + h Fp, Gp = ntru_solve(fp, gp, q) B[m][0] = Gp B[m][1] = [- coef for coef in Fp] for i in range(2, m + 1): B[m][i] = [0 for _ in range(d)] # Compute the inverse of B det_B = my_det(B) inv_B = my_adjugate(B) inv_B = [[div(poly, det_B) for poly in row] for row in inv_B] return A, B, inv_B, max(norms) # If any step failed, restart except (ZeroDivisionError, ValueError): continue
def conv(a, b): assert len(a) == len(b) n = len(a) a, b = fft(a), fft(b) return ifft(list(a[i] * b[i] / n for i in range(len(a))))