def __init__(self, is_stationary, sqrt_beta, order_number, n_samples, pcn, init_sample): self.is_stationary = is_stationary self.sqrt_beta = sqrt_beta self.order_number = order_number self.n_samples = n_samples self.pcn = pcn zero_compl_dummy = np.zeros(self.pcn.fourier.basis_number_2D_ravel, dtype=np.complex128) ones_compl_dummy = np.ones(self.pcn.fourier.basis_number_2D_ravel, dtype=np.complex128) self.stdev = ones_compl_dummy self.stdev_sym = util.symmetrize(self.stdev) self.samples_history = np.empty( (self.n_samples, self.pcn.fourier.basis_number_2D_ravel), dtype=np.complex128) self.LMat = Lmatrix_2D(self.pcn.fourier, self.sqrt_beta) self.current_noise_sample = self.pcn.random_gen.construct_w( ) #noise sample always symmetric self.new_noise_sample = self.current_noise_sample.copy() if self.is_stationary: self.current_sample = init_sample self.new_sample = init_sample self.new_sample_sym = self.pcn.random_gen.symmetrize( self.new_sample) self.new_sample_scaled_norm = 0 self.new_log_L_det = 0 #numba need this initialization. otherwise it will not compile self.current_sample = init_sample.copy() self.current_sample_scaled_norm = 0 self.current_log_L_det = 0 else: self.LMat.construct_from(init_sample) self.LMat.set_current_L_to_latest() self.new_sample_sym = np.linalg.solve( self.LMat.current_L, self.pcn.random_gen.construct_w()) self.new_sample = self.new_sample_sym[self.pcn.fourier. basis_number_2D_ravel - 1:] self.new_sample_scaled_norm = util.norm2( self.LMat.current_L @ self.new_sample_sym) #ToDO: Modify this self.new_log_L_det = self.LMat.logDet(True) #ToDO: Modify this # #numba need this initialization. otherwise it will not compile self.current_sample = init_sample.copy() self.current_sample_sym = self.new_sample_sym.copy() self.current_sample_scaled_norm = self.new_sample_scaled_norm self.current_log_L_det = self.new_log_L_det # self.update_current_sample() self.i_record = 0
def negLogPosterior(x,Layers): """ Layers are numba typed List """ negLogPost = 0.0 uHalf_all = xToUHalf(x) # this is just like having new sample at the bottom layer n = Layers[0].pcn.fourier.fourier_basis_number uHalf_0 = uHalf_all[0:n] Layers[0].new_sample = uHalf_0 Layers[0].new_sample_sym = Layers[0].pcn.random_gen.symmetrize(Layers[0].new_sample) Layers[0].new_sample_scaled_norm = util.norm2(Layers[0].new_sample/Layers[0].stdev) Layers[0].update_current_sample() negLogPost += Layers[0].current_sample_scaled_norm for i in range(1,len(Layers)): Layers[i].LMat.construct_from(Layers[i-1].new_sample) Layers[i].new_log_L_det = np.linalg.slogdet(Layers[i].LMat.latest_computed_L)[1] Layers[i].LMat.set_current_L_to_latest() # Layers[i].sample() if i== len(Layers)-1: wNew = util.symmetrize(uHalf_all[n*(i-1):n*i]) eNew = np.random.randn(Layers[i].pcn.measurement.num_sample) wBar = np.concatenate((eNew,wNew)) LBar = np.vstack((Layers[i].pcn.H,Layers[i].LMat.current_L)) Layers[i].new_sample_sym, res, rnk, s = np.linalg.lstsq(LBar,Layers[i].pcn.yBar-wBar )#,rcond=None) Layers[i].new_sample = Layers[i].new_sample_sym[Layers[i].pcn.fourier.fourier_basis_number-1:] else: uHalf_i = uHalf_all[n*(i-1):n*i] Layers[i].new_sample = uHalf_i Layers[i].current_sample_scaled_norm = util.norm2(Layers[i].LMat.current_L@Layers[i].new_sample_sym) Layers[i].update_current_sample() negLogPost += 0.5*Layers[i].current_sample_scaled_norm negLogPost -= Layers[i].current_log_L_det # self.current_neg_log_posterior = negLogPost return negLogPost
def __init__(self, is_stationary, sqrt_beta, order_number, n_samples, pcn, init_sample): self.is_stationary = is_stationary self.sqrt_beta = sqrt_beta self.order_number = order_number # self.current_above_sample = above_sample self.n_samples = n_samples #dummy declaration # a_pcn = pCN.pCN(pcn.n_layers,pcn.random_gen,pcn.measurement,pcn.fourier,pcn.beta)#numba cannot understand without this # self.pcn = a_pcn self.pcn = pcn # self.current_sample = np.zeros(f.basis_number,dtype=np.complex128) zero_compl_dummy = np.zeros(self.pcn.fourier.basis_number, dtype=np.complex128) ones_compl_dummy = np.ones(self.pcn.fourier.basis_number, dtype=np.complex128) self.stdev = ones_compl_dummy self.stdev_sym = util.symmetrize(self.stdev) self.samples_history = np.empty( (self.n_samples, self.pcn.fourier.basis_number), dtype=np.complex128) #dummy declaration fpcn = self.pcn.fourier f = fourier.FourierAnalysis( fpcn.basis_number, fpcn.extended_basis_number, fpcn.t_start, fpcn.t_end) #numba cannot understand without this LMat = L.Lmatrix(f, self.sqrt_beta) # LMat.fourier = self.pcn.fourier self.LMat = LMat self.current_noise_sample = self.pcn.random_gen.construct_w( ) #noise sample always symmetric self.new_noise_sample = self.current_noise_sample.copy() if self.is_stationary: self.current_sample = init_sample self.new_sample = init_sample self.new_sample_sym = self.pcn.random_gen.symmetrize( self.new_sample) self.new_sample_scaled_norm = 0 self.new_log_L_det = 0 #numba need this initialization. otherwise it will not compile self.current_sample = init_sample.copy() self.current_sample_sym = self.new_sample_sym.copy() self.current_sample_scaled_norm = 0 self.current_log_L_det = 0 else: zero_init = np.zeros(self.pcn.fourier.basis_number, dtype=np.complex128) self.LMat.construct_from(init_sample) self.LMat.set_current_L_to_latest() self.new_sample_sym = np.linalg.solve( self.LMat.current_L, self.pcn.random_gen.construct_w()) self.new_sample = self.new_sample_sym[self.pcn.fourier. basis_number - 1:] self.new_sample_scaled_norm = util.norm2( self.LMat.current_L @ self.new_sample_sym) #ToDO: Modify this self.new_log_L_det = self.LMat.logDet(True) #ToDO: Modify this # #numba need this initialization. otherwise it will not compile self.current_sample = init_sample.copy() self.current_sample_sym = self.new_sample_sym.copy() self.current_sample_scaled_norm = self.new_sample_scaled_norm self.current_log_L_det = self.new_log_L_det # self.update_current_sample() self.i_record = 0
lay.update_current_sample() #TODO: toggle this if pcn.one_step_one_element is not used # lay.samples_history = np.empty((lay.n_samples*f.fourier_basis_number, f.fourier_basis_number), dtype=np.complex128) Layers.append(lay) #allowable methods: ‘Nelder-Mead’,‘Powell’,‘COBYLA’,‘trust-constr’, '‘L-BFGS-B' method = 'L-BFGS-B' optimizer = optm.Optimizer(Layers, method=method, max_iter=1000000) opt_Result = optimizer.optimize() uHalf_all = optm.xToUHalf(opt_Result.x) Layers[0].new_sample = uHalf_all[0:n] Layers[0].update_current_sample() for i in range(1, len(Layers)): if i == len(Layers) - 1: wNew = util.symmetrize(uHalf_all[n * (i - 1):n * i]) eNew = np.random.randn(Layers[i].pcn.measurement.num_sample) wBar = np.concatenate((eNew, wNew)) LBar = np.vstack((Layers[i].pcn.H, Layers[i].LMat.current_L)) Layers[i].new_sample_symmetrized, res, rnk, s = np.linalg.lstsq( LBar, Layers[i].pcn.yBar - wBar) #,rcond=None) Layers[i].new_sample = Layers[i].new_sample_symmetrized[ Layers[i].pcn.fourier.fourier_basis_number - 1:] else: uHalf_i = uHalf_all[n * (i - 1):n * i] Layers[i].new_sample = uHalf_i Layers[i].new_sample = uHalf_all[n * (i - 1):i * n] Layers[i].update_current_sample() # negLogPost += 0.5*Layers[i].current_sample_scaled_norm # negLogPost -= Layers[i].current_log_L_det
def construct_from(self, uHalf): uHalf2D = u2.from_u_2D_ravel_to_uHalf_2D(util.symmetrize(uHalf), self.fourier.basis_number) return self.construct_from_2D(uHalf2D)
def constructU_with_Index(self,uHalf): uprepared = util.extend(util.symmetrize(uHalf),2*self.basis_number) # with nb.objmode(U='complex128[:,:]'): # res = u.extend2D(symmetrize_2D(uHalf),2*n-1)[index] U = uprepared[self.index] return U # spec2D = [ # ('basis_number', nb.int64), # ('extended_basis_number', nb.int64), # ('basis_number_2D', nb.int64), # ('basis_number_2D_sym', nb.int64), # ('extended_basis_number_2D', nb.int64), # ('extended_basis_number_2D_sym', nb.int64), # ('t_end', nb.float64), # ('t_start', nb.float64), # ('dt', nb.float64), # ('t', nb.float64[::1]), # ('Dmatrix', nb.float64[:,::1]), # ('Imatrix', nb.float64[:,::1]), # ('ix', nb.int64[:,::1]), # ('iy', nb.int64[:,::1]), # ('Index',nb.typeof(u2.createUindex(2))) # ] # ORDER = 'C' # @nb.jitclass(spec2D) # class FourierAnalysis_2D: # def __init__(self,basis_number,extended_basis_number,t_start = 0,t_end=1): # self.basis_number = basis_number # self.extended_basis_number = extended_basis_number # self.basis_number_2D = (2*basis_number-1)*basis_number # self.basis_number_2D_sym = (2*basis_number-1)*(2*basis_number-1) # self.extended_basis_number_2D = (2*extended_basis_number-1)*extended_basis_number # self.extended_basis_number_2D_sym = (2*extended_basis_number-1)*(2*extended_basis_number-1) # self.t_end = t_end # self.t_start = t_start # self.ix = np.zeros(2*self.basis_number-1,2*self.basis_number-1,dtype=np.int64) # self.iy = np.zeros(2*self.basis_number-1,2*self.basis_number-1,dtype=np.int64) # temp = np.arange(-(self.basis_number-1),self.basis_number) # for i in range(2*self.basis_number-1) # self.ix[i,:] = temp # self.iy[:,i] = temp # # d_diag = np.zeros((2*self.basis_number-1)**2) # # for i in range(2*self.basis_number-1): # # for j in range(2*self.basis_number-1): # # d_diag[i*10+j] = (i**2+j**2) # # self.Dmatrix = -(2*np.pi)**2*np.diag(d_diag) # self.Imatrix = np.eye((2*self.basis_number-1)**2) # Index = u2.createUindex(self.basis_number) # self.Index = Index # def inverseFourierLimited(self,uHalf): # return u2.irfft2(uHalf,self.extended_basis_number) # def fourierTransformHalf(self,z): # return u2.rfft2(z,self.basis_number) # def constructU(self,uHalf): # """ # Construct Toeplitz Matrix # """ # return u2.constructU(uHalf,self.Index) # def constructMatexplicit(self,uHalf,fun): # temp = fun(self.inverseFourierLimited(uHalf)) # temp2 = self.fourierTransformHalf(temp) # return self.constructU(temp2)