Exemple #1
0
    def one_step_for_sqrtBetas(self, Layers):
        sqrt_beta_noises = self.stdev_sqrtBetas * np.random.randn(
            self.n_layers)
        propSqrtBetas = np.zeros(self.n_layers, dtype=np.float64)

        for i in range(self.n_layers):

            temp = np.sqrt(1 - self.pcn_step_sqrtBetas**2) * Layers[
                i].sqrt_beta + self.pcn_step_sqrtBetas * sqrt_beta_noises[i]
            propSqrtBetas[i] = max(temp, 1e-4)
            if i == 0:
                stdev_sym_temp = (propSqrtBetas[i] /
                                  Layers[i].sqrt_beta) * Layers[i].stdev_sym
                Layers[i].new_sample_sym = stdev_sym_temp * Layers[
                    i].current_noise_sample
            else:
                Layers[i].LMat.construct_from_with_sqrt_beta(
                    Layers[i - 1].new_sample, propSqrtBetas[i])
                if i < self.n_layers - 1:
                    Layers[i].new_sample_sym = np.linalg.solve(
                        Layers[i].LMat.latest_computed_L,
                        Layers[i].current_noise_sample)
                else:
                    wNew = Layers[-1].current_noise_sample
                    eNew = np.random.randn(self.measurement.num_sample)
                    wBar = np.concatenate((eNew, wNew))
                    LBar = np.vstack(
                        (self.H, Layers[-1].LMat.latest_computed_L))
                    v, res, rnk, s = np.linalg.lstsq(LBar, self.yBar - wBar)
                    Layers[-1].new_sample_sym = v
                    Layers[i].new_sample = Layers[i].new_sample_sym[
                        self.fourier.basis_number_2D_ravel - 1:]

        logRatio = 0.5 * (util.norm2(self.y / self.measurement.stdev -
                                     self.H @ Layers[-1].current_sample_sym))
        logRatio -= 0.5 * (util.norm2(self.y / self.measurement.stdev -
                                      self.H @ Layers[-1].new_sample_sym))

        if logRatio > np.log(np.random.rand()):
            # print('Proposal sqrt_beta accepted!')
            self.Layers_sqrtBetas = propSqrtBetas
            for i in range(self.n_layers):
                Layers[i].sqrt_beta = propSqrtBetas[i]
                Layers[i].LMat.set_current_L_to_latest()
                if Layers[i].is_stationary:
                    Layers[i].stdev_sym = stdev_sym_temp
                    Layers[i].stdev = Layers[i].stdev_sym[
                        self.fourier.basis_number_2D_ravel - 1:]
Exemple #2
0
    def one_step_non_centered_sari(self,Layers):
        accepted = 0
        Layers[self.n_layers-1].sample_non_centered()
        wNew = Layers[self.n_layers-1].new_noise_sample
        eNew = np.random.randn(self.measurement.num_sample)
        wBar = np.concatenate((eNew,wNew))
        LBar = np.vstack((self.H,Layers[self.n_layers-1].LMat.current_L))
        v, res, rnk, s = np.linalg.lstsq(LBar,self.yBar-wBar )#,rcond=None)
        Layers[self.n_layers-1].new_sample_sym = v
        Layers[self.n_layers-1].new_sample = v[self.fourier.basis_number-1:]
        logRatio = 0.0
        for i in range(self.n_layers):
            if i<self.n_layers-1:
                Layers[i].sample_non_centered()

            if i>0:
                Layers[i].LMat.construct_from(Layers[i-1].new_sample)
                if i<self.n_layers-1:
                    Layers[i].new_sample_sym = np.linalg.solve(Layers[i].LMat.latest_computed_L,Layers[i].new_noise_sample)
            else:
                Layers[i].new_sample_sym = Layers[i].stdev_sym*Layers[i].new_noise_sample
            Layers[i].new_sample = Layers[i].new_sample_sym[self.fourier.basis_number-1:]

            if i == self.n_layers-1:
                logRatio += Layers[i].LMat.logDet(True) - Layers[i].LMat.logDet(False)
                logRatio += 0.5*(util.norm2(Layers[i].LMat.current_L@v)-util.norm2(Layers[i].LMat.latest_computed_L@v))
            if i<self.n_layers-1:
                logRatio += 0.5*(util.norm2(Layers[i].current_noise_sample) - util.norm2(Layers[i].new_noise_sample))
                


            
        if logRatio>np.log(np.random.rand()):
            accepted = 1
            for i in range(self.n_layers):
                Layers[i].update_current_sample()
                if i<self.n_layers-1 and not Layers[i+1].is_stationary:
                    Layers[i+1].LMat.set_current_L_to_latest()

            # only record when needed
        if (self.record_count%self.record_skip) == 0:
            # print('recorded')
            for i in range(self.n_layers):
                Layers[i].record_sample()
        self.record_count += 1
        return accepted
Exemple #3
0
    def __init__(self, is_stationary, sqrt_beta, order_number, n_samples, pcn,
                 init_sample):
        self.is_stationary = is_stationary
        self.sqrt_beta = sqrt_beta
        self.order_number = order_number
        self.n_samples = n_samples
        self.pcn = pcn

        zero_compl_dummy = np.zeros(self.pcn.fourier.basis_number_2D_ravel,
                                    dtype=np.complex128)
        ones_compl_dummy = np.ones(self.pcn.fourier.basis_number_2D_ravel,
                                   dtype=np.complex128)

        self.stdev = ones_compl_dummy
        self.stdev_sym = util.symmetrize(self.stdev)
        self.samples_history = np.empty(
            (self.n_samples, self.pcn.fourier.basis_number_2D_ravel),
            dtype=np.complex128)

        self.LMat = Lmatrix_2D(self.pcn.fourier, self.sqrt_beta)
        self.current_noise_sample = self.pcn.random_gen.construct_w(
        )  #noise sample always symmetric
        self.new_noise_sample = self.current_noise_sample.copy()

        if self.is_stationary:

            self.current_sample = init_sample
            self.new_sample = init_sample
            self.new_sample_sym = self.pcn.random_gen.symmetrize(
                self.new_sample)
            self.new_sample_scaled_norm = 0
            self.new_log_L_det = 0
            #numba need this initialization. otherwise it will not compile
            self.current_sample = init_sample.copy()
            self.current_sample_scaled_norm = 0
            self.current_log_L_det = 0

        else:
            self.LMat.construct_from(init_sample)
            self.LMat.set_current_L_to_latest()
            self.new_sample_sym = np.linalg.solve(
                self.LMat.current_L, self.pcn.random_gen.construct_w())
            self.new_sample = self.new_sample_sym[self.pcn.fourier.
                                                  basis_number_2D_ravel - 1:]
            self.new_sample_scaled_norm = util.norm2(
                self.LMat.current_L @ self.new_sample_sym)  #ToDO: Modify this
            self.new_log_L_det = self.LMat.logDet(True)  #ToDO: Modify this
            # #numba need this initialization. otherwise it will not compile
            self.current_sample = init_sample.copy()
            self.current_sample_sym = self.new_sample_sym.copy()
            self.current_sample_scaled_norm = self.new_sample_scaled_norm
            self.current_log_L_det = self.new_log_L_det

        # self.update_current_sample()
        self.i_record = 0
Exemple #4
0
    def oneStep(self,Layers):
        logRatio = 0.0
        for i in range(self.n_layers):
        # i = int(self.gibbs_step//len(Layers))
            
            Layers[i].sample()
            # new_sample = Layers[i].new_sample
            if i> 0:
                Layers[i].LMat.construct_from(Layers[i-1].new_sample)
                Layers[i].new_log_L_det = np.linalg.slogdet(Layers[i].LMat.latest_computed_L)[1]
                # if i < self.n_layers - 1 :
                #     Layers[i].current_sample_scaled_norm = util.norm2(Layers[i].LMat.current_L@Layers[i].current_sample_sym)
                # else:
                Layers[i].current_sample_scaled_norm = util.norm2(Layers[i].LMat.current_L@Layers[i].new_sample_sym)
                Layers[i].new_sample_scaled_norm = util.norm2(Layers[i].LMat.latest_computed_L@Layers[i].new_sample_sym)

                logRatio += 0.5*(Layers[i].current_sample_scaled_norm-Layers[i].new_sample_scaled_norm)
                logRatio += (Layers[i].new_log_L_det-Layers[i].current_log_L_det)
            else:
                #TODO: Check whether 0.5 factor should be added below
                Layers[i].new_sample_scaled_norm = util.norm2(Layers[i].new_sample/Layers[i].stdev)
                logRatio += (Layers[i].current_sample_scaled_norm-Layers[i].new_sample_scaled_norm)
            
        if logRatio>np.log(np.random.rand()):
            for i in range(self.n_layers):
                Layers[i].update_current_sample()
                if not Layers[i].is_stationary:
                    Layers[i].LMat.set_current_L_to_latest()
                
            accepted = 1
        else:
            accepted=0
        # self.gibbs_step +=1
        # only record when needed
        if (self.record_count%self.record_skip) == 0:
            # print('recorded')
            for i in range(self.n_layers):
                Layers[i].record_sample()
        self.record_count += 1

        return accepted
Exemple #5
0
    def oneStep(self, v):
        norm_L_v_2 = util.norm2(self.LMat.current_L @ v)

        newSample = self.sample()
        newL = self.LMat.construct_from(newSample)

        log_det_newL = (np.linalg.slogdet(newL)[1])
        norm_newL_v_2 = util.norm2(newL @ v)
        norm_new_sample = util.norm2(newSample / self.uStdev)

        logRatio = self.computelogRatio(norm_L_v_2, norm_newL_v_2,
                                        norm_new_sample, log_det_newL)

        if logRatio > np.log(np.random.rand()):
            self.current_sample = newSample
            self.norm_current_sample = norm_new_sample
            self.current_log_det_L = log_det_newL
            self.LMat.set_current_L_to_latest()
            accepted = 1
        else:
            accepted = 0

        return accepted
Exemple #6
0
    def __init__(self, LMat, rg, uStdev, init_sample, beta=1):
        self.LMat = LMat
        self.rand_gen = rg
        self.uStdev = uStdev
        self.beta = beta
        self.betaZ = 1 - np.sqrt(beta**2)

        #TODO: modify this
        self.current_sample = init_sample
        self.LMat.construct_from(init_sample)
        self.LMat.set_current_L_to_latest()
        self.norm_current_sample = util.norm2(self.current_sample /
                                              self.uStdev)
        self.current_log_det_L = self.LMat.logDet()
Exemple #7
0
def negLogPosterior(x,Layers):
    """
    Layers are numba typed List
    """
    negLogPost = 0.0
    uHalf_all = xToUHalf(x) # this is just like having new sample at the bottom layer
    n = Layers[0].pcn.fourier.fourier_basis_number
    uHalf_0 = uHalf_all[0:n]
    Layers[0].new_sample = uHalf_0
    Layers[0].new_sample_sym = Layers[0].pcn.random_gen.symmetrize(Layers[0].new_sample)
    Layers[0].new_sample_scaled_norm = util.norm2(Layers[0].new_sample/Layers[0].stdev)
    Layers[0].update_current_sample()
    negLogPost += Layers[0].current_sample_scaled_norm
    for i in range(1,len(Layers)):
        Layers[i].LMat.construct_from(Layers[i-1].new_sample)
        Layers[i].new_log_L_det = np.linalg.slogdet(Layers[i].LMat.latest_computed_L)[1]
        Layers[i].LMat.set_current_L_to_latest()
        # Layers[i].sample()
        if i== len(Layers)-1:
            wNew = util.symmetrize(uHalf_all[n*(i-1):n*i]) 
            eNew = np.random.randn(Layers[i].pcn.measurement.num_sample)
            wBar = np.concatenate((eNew,wNew))
            
            LBar = np.vstack((Layers[i].pcn.H,Layers[i].LMat.current_L))
            Layers[i].new_sample_sym, res, rnk, s = np.linalg.lstsq(LBar,Layers[i].pcn.yBar-wBar )#,rcond=None)
            Layers[i].new_sample = Layers[i].new_sample_sym[Layers[i].pcn.fourier.fourier_basis_number-1:]
        else:
            uHalf_i = uHalf_all[n*(i-1):n*i]
            Layers[i].new_sample = uHalf_i
        Layers[i].current_sample_scaled_norm = util.norm2(Layers[i].LMat.current_L@Layers[i].new_sample_sym)
        Layers[i].update_current_sample()
        negLogPost += 0.5*Layers[i].current_sample_scaled_norm
        negLogPost -= Layers[i].current_log_L_det    
    
    # self.current_neg_log_posterior = negLogPost
    return negLogPost
Exemple #8
0
    def __init__(self, is_stationary, sqrt_beta, order_number, n_samples, pcn,
                 init_sample):
        self.is_stationary = is_stationary
        self.sqrt_beta = sqrt_beta
        self.order_number = order_number
        # self.current_above_sample = above_sample
        self.n_samples = n_samples

        #dummy declaration
        # a_pcn = pCN.pCN(pcn.n_layers,pcn.random_gen,pcn.measurement,pcn.fourier,pcn.beta)#numba cannot understand without this
        # self.pcn = a_pcn
        self.pcn = pcn

        # self.current_sample = np.zeros(f.basis_number,dtype=np.complex128)
        zero_compl_dummy = np.zeros(self.pcn.fourier.basis_number,
                                    dtype=np.complex128)
        ones_compl_dummy = np.ones(self.pcn.fourier.basis_number,
                                   dtype=np.complex128)

        self.stdev = ones_compl_dummy
        self.stdev_sym = util.symmetrize(self.stdev)
        self.samples_history = np.empty(
            (self.n_samples, self.pcn.fourier.basis_number),
            dtype=np.complex128)

        #dummy declaration
        fpcn = self.pcn.fourier
        f = fourier.FourierAnalysis(
            fpcn.basis_number, fpcn.extended_basis_number, fpcn.t_start,
            fpcn.t_end)  #numba cannot understand without this
        LMat = L.Lmatrix(f, self.sqrt_beta)
        # LMat.fourier = self.pcn.fourier
        self.LMat = LMat
        self.current_noise_sample = self.pcn.random_gen.construct_w(
        )  #noise sample always symmetric
        self.new_noise_sample = self.current_noise_sample.copy()

        if self.is_stationary:

            self.current_sample = init_sample
            self.new_sample = init_sample
            self.new_sample_sym = self.pcn.random_gen.symmetrize(
                self.new_sample)
            self.new_sample_scaled_norm = 0
            self.new_log_L_det = 0
            #numba need this initialization. otherwise it will not compile
            self.current_sample = init_sample.copy()
            self.current_sample_sym = self.new_sample_sym.copy()
            self.current_sample_scaled_norm = 0
            self.current_log_L_det = 0

        else:
            zero_init = np.zeros(self.pcn.fourier.basis_number,
                                 dtype=np.complex128)
            self.LMat.construct_from(init_sample)
            self.LMat.set_current_L_to_latest()
            self.new_sample_sym = np.linalg.solve(
                self.LMat.current_L, self.pcn.random_gen.construct_w())
            self.new_sample = self.new_sample_sym[self.pcn.fourier.
                                                  basis_number - 1:]
            self.new_sample_scaled_norm = util.norm2(
                self.LMat.current_L @ self.new_sample_sym)  #ToDO: Modify this
            self.new_log_L_det = self.LMat.logDet(True)  #ToDO: Modify this
            # #numba need this initialization. otherwise it will not compile
            self.current_sample = init_sample.copy()
            self.current_sample_sym = self.new_sample_sym.copy()
            self.current_sample_scaled_norm = self.new_sample_scaled_norm
            self.current_log_L_det = self.new_log_L_det

        # self.update_current_sample()
        self.i_record = 0
# n_layers = 2
# Layers = List()
typed_list_status = importlib.util.find_spec('numba.typed.typedlist')
if typed_list_status is None:
    Layers = []
else:
    from numba.typed.typedlist import List
    Layers = List()
# factor = 1e-8
for i in range(n_layers):
    if i == 0:
        init_sample = np.linalg.solve(
            Lu, random_gen.construct_w())[f.fourier_basis_number - 1:]
        lay = layer.Layer(True, sqrtBeta_0, i, n_samples, pcn, init_sample)
        lay.stdev = uStdev
        lay.current_sample_scaled_norm = util.norm2(
            lay.current_sample / lay.stdev)  #ToDO: Modify this
        lay.new_sample_scaled_norm = lay.current_sample_scaled_norm
    else:

        if i == n_layers - 1:

            lay = layer.Layer(False, sqrtBeta_v, i, n_samples, pcn,
                              Layers[i - 1].current_sample)
            wNew = pcn.random_gen.construct_w()
            eNew = np.random.randn(pcn.measurement.num_sample)
            wBar = np.concatenate((eNew, wNew))

            LBar = np.vstack((pcn.H, lay.LMat.current_L))

            #update v
            lay.current_sample_symmetrized, res, rnk, s = np.linalg.lstsq(
Exemple #10
0
    def __init__(self, n_layers, n_samples, n, beta, num, kappa, sigma_0,
                 sigma_v, sigma_scaling, evaluation_interval, printProgress,
                 seed, burn_percentage, enable_beta_feedback, pcn_variant,
                 measurement_signal_type):
        self.n_samples = n_samples
        self.meas_samples_num = num
        self.evaluation_interval = evaluation_interval
        self.burn_percentage = burn_percentage
        #set random seed
        self.random_seed = seed
        self.printProgress = printProgress
        self.n_layers = n_layers
        self.kappa = kappa
        self.sigma_0 = sigma_0
        self.sigma_v = sigma_v
        self.sigma_scaling = sigma_scaling
        self.enable_beta_feedback = enable_beta_feedback
        np.random.seed(self.random_seed)

        #setup parameters for 1 Dimensional simulation
        self.d = 1
        self.nu = 2 - self.d / 2
        self.alpha = self.nu + self.d / 2
        self.t_start = 0.0
        self.t_end = 1.0
        self.beta_0 = (sigma_0**2) * (
            2**self.d * np.pi**(self.d / 2)
        ) * 1.1283791670955126  #<-- this numerical value is scp.special.gamma(alpha))/scp.special.gamma(nu)
        self.beta_v = self.beta_0 * (sigma_v / sigma_0)**2
        self.sqrtBeta_v = np.sqrt(self.beta_v)
        self.sqrtBeta_0 = np.sqrt(self.beta_0)

        f = fourier.FourierAnalysis(n, num, self.t_start, self.t_end)
        self.fourier = f

        rg = randomGenerator.RandomGenerator(f.basis_number)
        self.random_gen = rg

        LuReal = (1 / self.sqrtBeta_0) * (
            self.fourier.Dmatrix * self.kappa**(-self.nu) -
            self.kappa**(2 - self.nu) * self.fourier.Imatrix)
        Lu = LuReal + 1j * np.zeros(LuReal.shape)

        uStdev = -1 / np.diag(Lu)
        # uStdev = uStdev[self.fourier.basis_number-1:]
        # uStdev[0] /= 2 #scaled

        meas_std = 0.1
        measurement = meas.Measurement(num, meas_std, self.t_start, self.t_end,
                                       measurement_signal_type)
        # pcn = pCN.pCN(n_layers,rg,measurement,f,beta)
        self.pcn_variant = pcn_variant
        self.pcn = pCN.pCN(n_layers, rg, measurement, f, beta,
                           self.pcn_variant)
        # self.pcn_pair_layers = pcn_pair_layers

        #initialize Layers
        typed_list_status = importlib.util.find_spec('numba.typed.typedlist')
        if typed_list_status is None:
            Layers = []
        else:
            from numba.typed.typedlist import List
            Layers = List()
        # Layers = []
        # factor = 1e-8
        self.pcn.record_skip = np.max(
            [1, self.n_samples // self.pcn.max_record_history])
        history_length = np.min([self.n_samples, self.pcn.max_record_history])
        self.pcn.sqrtBetas_history = np.empty((history_length, self.n_layers),
                                              dtype=np.float64)

        for i in range(self.n_layers):
            if i == 0:
                init_sample = np.linalg.solve(
                    Lu,
                    self.random_gen.construct_w())[self.fourier.basis_number -
                                                   1:]
                lay = layer.Layer(True, self.sqrtBeta_0, i, self.n_samples,
                                  self.pcn, init_sample)
                lay.LMat.current_L = Lu
                lay.LMat.latest_computed_L = Lu
                lay.stdev_sym = uStdev
                lay.stdev = uStdev[self.fourier.basis_number - 1:]
                lay.stdev[0] /= 2
                lay.current_sample_scaled_norm = util.norm2(
                    lay.current_sample / lay.stdev)  #ToDO: Modify this
                lay.new_sample_scaled_norm = lay.current_sample_scaled_norm
            else:

                if i == n_layers - 1:

                    lay = layer.Layer(False, self.sqrtBeta_v, i,
                                      self.n_samples, self.pcn,
                                      Layers[i - 1].current_sample)
                    wNew = self.pcn.random_gen.construct_w()
                    eNew = np.random.randn(self.pcn.measurement.num_sample)
                    wBar = np.concatenate((eNew, wNew))

                    LBar = np.vstack((self.pcn.H, lay.LMat.current_L))

                    #update v
                    lay.current_sample_sym, res, rnk, s = np.linalg.lstsq(
                        LBar, self.pcn.yBar - wBar, rcond=-1)  #,rcond=None)
                    lay.current_sample = lay.current_sample_sym[
                        self.pcn.fourier.basis_number - 1:]
                else:
                    # lay = layer.Layer(False,self.sqrtBeta_v*np.sqrt(sigma_scaling),i,self.n_samples,self.pcn,Layers[i-1].current_sample)
                    lay = layer.Layer(False, self.sqrtBeta_v * sigma_scaling,
                                      i, self.n_samples, self.pcn,
                                      Layers[i - 1].current_sample)

            lay.update_current_sample()
            self.pcn.Layers_sqrtBetas[i] = lay.sqrt_beta
            # if self.pcn_variant:
            #     self.pcn.record_skip = np.max([1,(lay.n_samples*self.n_layers)//self.pcn.max_record_history])
            #     history_length = np.min([lay.n_samples*(self.n_layers),self.pcn.max_record_history])
            # else:

            lay.samples_history = np.empty(
                (history_length, self.fourier.basis_number),
                dtype=np.complex128)
            Layers.append(lay)

        self.Layers = Layers
        sim_result = simRes.SimulationResult()
        self.sim_result = sim_result
Exemple #11
0
    def one_step_for_sqrtBetas(self,Layers):
        accepted_SqrtBeta = 0
        sqrt_beta_noises = self.stdev_sqrtBetas*np.random.randn(self.n_layers)
        # sqrtBetas = np.zeros(self.n_layers,dtype=np.float64)
        propSqrtBetas = np.zeros(self.n_layers,dtype=np.float64)

        for i in range(self.n_layers):
            
            # temp = np.sqrt(1-self.pcn_step_sqrtBetas**2)*Layers[i].sqrt_beta + self.pcn_step_sqrtBetas*sqrt_beta_noises[i]
            temp = self.pcn_step_sqrtBetas_Z*np.log(Layers[i].sqrt_beta) + self.pcn_step_sqrtBetas*sqrt_beta_noises[i]
            propSqrtBetas[i] = np.exp(temp)
            if i==0:
                stdev_sym_temp = (propSqrtBetas[i]/Layers[i].sqrt_beta)*Layers[i].stdev_sym
                Layers[i].new_sample_sym = stdev_sym_temp*Layers[i].current_noise_sample
            else:
                Layers[i].LMat.construct_from_with_sqrt_beta(Layers[i-1].new_sample,propSqrtBetas[i])
                if i < self.n_layers-1:
                    Layers[i].new_sample_sym = np.linalg.solve(Layers[i].LMat.latest_computed_L,Layers[i].current_noise_sample)
                else:        
                    wNew = Layers[-1].current_noise_sample
                    eNew = np.random.randn(self.measurement.num_sample)
                    wBar = np.concatenate((eNew,wNew))
                    LBar = np.vstack((self.H,Layers[-1].LMat.latest_computed_L))
                    v, res, rnk, s = np.linalg.lstsq(LBar,self.yBar-wBar )
                    Layers[-1].new_sample_sym = v
            
            Layers[i].new_sample = Layers[i].new_sample_sym[self.fourier.basis_number-1:]

        logRatio = 0.5*(util.norm2(self.y/self.measurement.stdev - self.H@Layers[-1].current_sample_sym))
        logRatio -= 0.5*(util.norm2(self.y/self.measurement.stdev - self.H@Layers[-1].new_sample_sym))

        if logRatio>np.log(np.random.rand()):
            acceptedBeta = 1
            # print('Proposal sqrt_beta accepted!')
            self.Layers_sqrtBetas = propSqrtBetas
            for i in range(self.n_layers):
                Layers[i].sqrt_beta = propSqrtBetas[i]
                Layers[i].LMat.set_current_L_to_latest()
                if Layers[i].is_stationary:
                    Layers[i].stdev_sym = stdev_sym_temp
                    Layers[i].stdev = Layers[i].stdev_sym[self.fourier.basis_number-1:]

        return accepted_SqrtBeta

    # def one_step_non_centered_new(self,Layers):
    #     accepted = 0
        
        
    #     for i in range(self.n_layers):
    #         Layers[i].sample_non_centered()
    #         if i>0:
    #             Layers[i].LMat.construct_from(Layers[i-1].new_sample)
    #             # if i<self.n_layers-1:
    #             Layers[i].new_sample_sym = np.linalg.solve(Layers[i].LMat.latest_computed_L,Layers[i].new_noise_sample)
    #         else:
    #             Layers[i].new_sample_sym = Layers[i].stdev_sym*Layers[i].new_noise_sample
    #         Layers[i].new_sample = Layers[i].new_sample_sym[self.fourier.basis_number-1:]

    #     logRatio = 0.5*(util.norm2(self.measurement.yt/self.measurement.stdev - self.H@Layers[self.n_layers-1].current_sample_sym) - util.norm2(self.measurement.yt/self.measurement.stdev - self.H@Layers[self.n_layers-1].new_sample_sym))
    #     # a = np.min(np.array([1,np.exp(logRatio)]))
    #     if logRatio>np.log(np.random.rand()):
    #     # if a>np.random.rand():
    #         accepted = 1
    #         for i in range(self.n_layers):
    #             Layers[i].update_current_sample()
    #             if not Layers[i].is_stationary:
    #                 Layers[i].LMat.set_current_L_to_latest()

    #         # only record when needed
    #     if (self.record_count%self.record_skip) == 0:
    #         # print('recorded')
    #         for i in range(self.n_layers):
    #             Layers[i].record_sample()
    #     self.record_count += 1
    #     return accepted


    # def one_step_one_element(self,Layers,element_index):
    #     logRatio = 0.0
    #     for i in range(self.n_layers):
    #     # i = int(self.gibbs_step//len(Layers))
    #         if i == 0:
    #             Layers[i].sample_one_element(element_index)
    #         else:
    #             Layers[i].sample()
    #         # new_sample = Layers[i].new_sample
    #         if i> 0:
    #             Layers[i].LMat.construct_from(Layers[i-1].new_sample)
    #             Layers[i].new_log_L_det = (np.linalg.slogdet(Layers[i].LMat.latest_computed_L)[1])
    #             Layers[i].current_sample_scaled_norm = util.norm2(Layers[i].LMat.current_L@Layers[i].new_sample_sym)
    #             Layers[i].new_sample_scaled_norm = util.norm2(Layers[i].LMat.latest_computed_L@Layers[i].new_sample_sym)

    #             logRatio += 0.5*(Layers[i].current_sample_scaled_norm-Layers[i].new_sample_scaled_norm)
    #             logRatio += (Layers[i].new_log_L_det-Layers[i].current_log_L_det)
    #         else:
    #             #TODO: Check whether 0.5 factor should be added below
    #             # Layers[i].new_sample_scaled_norm = util.norm2(Layers[i].new_sample/Layers[i].stdev)
    #             # logRatio += (Layers[i].current_sample_scaled_norm-Layers[i].new_sample_scaled_norm)
    #             logRatio += np.abs((Layers[i].new_sample[element_index]-Layers[i].current_sample[element_index])/Layers[i].stdev[element_index].real)**2
            
    #     if logRatio>np.log(np.random.rand()):
    #         for i in range(self.n_layers):
    #             Layers[i].update_current_sample()
    #             if not Layers[i].is_stationary:
    #                 Layers[i].LMat.set_current_L_to_latest()
                
    #         accepted = 1
    #     else:
    #         accepted=0
    #     # self.gibbs_step +=1
        
    #     for i in range(self.n_layers):
    #         Layers[i].record_sample()

    #     return accepted
    
    # def oneStep_pair(self,Layers):
        
    #     accepted = 0
    #     for i in range(self.n_layers-1,0,-1):#do it from the back
    #     # i = int(self.gibbs_step//len(Layers))
    #         logRatio = 0.0
    #         if i == self.n_layers-1:    
    #             Layers[i].sample()
    #         Layers[i-1].sample()
    #         #Layer i
    #         Layers[i].LMat.construct_from(Layers[i-1].new_sample)
    #         Layers[i].new_log_L_det = np.linalg.slogdet(Layers[i].LMat.latest_computed_L)[1]
            
    #         Layers[i].current_sample_scaled_norm = util.norm2(Layers[i].LMat.current_L@Layers[i].new_sample_sym)
    #         # assert Layers[i].current_sample_scaled_norm != np.nan
    #         Layers[i].new_sample_scaled_norm = util.norm2(Layers[i].LMat.latest_computed_L@Layers[i].new_sample_sym)
    #         # assert Layers[i].new_sample_scaled_norm != np.nan

    #         logRatio += 0.5*(Layers[i].current_sample_scaled_norm-Layers[i].new_sample_scaled_norm)
    #         logRatio += (Layers[i].new_log_L_det-Layers[i].current_log_L_det)
            
    #         #Layer i-1
    #         if i-1>0:
    #             Layers[i-1].new_sample_scaled_norm = util.norm2(Layers[i-1].LMat.current_L@Layers[i-1].new_sample_sym)
    #         else:
    #             Layers[i-1].new_sample_scaled_norm = util.norm2(Layers[i-1].new_sample/Layers[i-1].stdev)
                    
    #         logRatio += 0.5*(Layers[i-1].current_sample_scaled_norm-Layers[i-1].new_sample_scaled_norm)
    #         if logRatio>np.log(np.random.rand()):
    #             for i in range(self.n_layers):
    #                 Layers[i].update_current_sample()
    #                 if not Layers[i].is_stationary:
    #                     Layers[i].LMat.set_current_L_to_latest()
                    
    #             accepted += 1
        
            
    #         # for i in range(self.n_layers):
    #         #     Layers[i].record_sample()
    #         #  only record when needed
    #         if (self.record_count%self.record_skip) == 0:
    #             # print('recorded')
    #             for i in range(self.n_layers):
    #                 Layers[i].record_sample()
    #         self.record_count += 1

    #     return accepted