Example #1
0
	
	for t in range(T):
		for i in range(N):
			# j = 1
			# Propagate
			Xcur[i,:,0] = a*Xprev[i,:,0] + (1/np.sqrt(tauRho))*np.random.normal(size=M)
			
			# Weight
			logW = logPhi(Xcur[i,:,0],y[0,t])
			maxLogW = np.max(logW)
			w = np.exp(logW - maxLogW)
			logZ[i] = maxLogW + np.log(np.sum(w))-np.log(M)
			w /= np.sum(w)
			
			# Resample
			ancestors = hlp.resampling(w)
			Xprev[i,:,:] = Xprev[i,ancestors,:]
			Xcur[i,:,0] = Xcur[i,ancestors,0]
			
			# j = 2:d
			for j in np.arange(1,d):
				# Propagate
				tau = tauRho + tauPsi
				mu = (tauRho*a*Xprev[i, :, j] + tauPsi*Xcur[i,:,j-1])/tau
				Xcur[i,:,j] = mu + (1/np.sqrt(tau))*np.random.normal(size=M)
			
				# Weighting
				logW = logPhi(Xcur[i, :, j],y[j,t])
				maxLogW = np.max(logW)
				w = np.exp(logW - maxLogW)
				logZ[i] += maxLogW + np.log(np.sum(w))-np.log(M)
Example #2
0
def runNested(d, tauPhi, N, M, nrRuns):
    r"""Run NSMC filtering on high-dimensional LGSS.
    
    Parameters
    ----------
    d : int
        State dimension.
    tauPhi : float
        Measurement precision.
    N : int
        Number of particles, 1st level.
    M : int
        Number of particles, 2nd level.
    nrRuns : int
        Number of independent runs of the algorithm.
    
    Returns
    -------
    Saves E[X] and E[X**2] estimates.
    """
    # Model init
    a = 0.5
    tauPsi = 1.
    tauRho = 1.
    params = hlp.params(a=a, tauPsi=tauPsi, tauRho=tauRho, tauPhi=tauPhi)

    filename = 'simulatedData/d' + str(d) + 'tauPhi' + str(tauPhi) + 'y.txt'
    y = np.loadtxt(filename)
    T = y.shape[1]

    for j in range(nrRuns):
        # Algorithm init
        logZ = np.zeros(N)
        ancestors = np.zeros(N)
        X = np.zeros((N, d))
        ESS = np.zeros(T)

        # Setup new file
        filename = './results/paper/d' + str(d) + '_N' + str(N) + '_M' + str(
            M) + 'tauPhi' + str(tauPhi) + '_nestedSMCrun' + str(j + 1) + '.csv'
        f = open(filename, 'w')
        #f.write('Iter ESS E[x] E[x**2]\n')
        f.close()

        for t in range(T):
            q = [
                nsmc.nestedFAPF(params, y[:, t], M, X[i, :]) for i in range(N)
            ]

            for i in range(N):
                logZ[i] = q[i].logZ

            maxLz = np.max(logZ)
            w = np.exp(logZ - maxLz)
            w /= np.sum(w)
            ESS[t] = 1 / np.sum(w**2)
            ancestors = hlp.resampling(w)

            for i in range(N):
                X[i, :] = q[ancestors[i]].simulate(1, BS=True)

            f = open(filename, 'a')
            tmpVec = np.r_[t + 1, ESS[t],
                           np.mean(X, axis=0),
                           np.mean(X**2, axis=0)]
            np.savetxt(f, tmpVec.reshape((1, len(tmpVec))), delimiter=',')
            f.close()
Example #3
0
    def __init__(self, t, N, M, xCond=None):
		C1 = 0.5
		C2 = 3.
		
		# Model init
		xDomain = np.arange(2)
				
		# Load parameters
		region = 'dustBowl'
		#region = 'sahel'
		filename = 'parameters/'+region+'Sigma2_N35-55_W90-120_downsampled.csv'
		sigma2 = np.loadtxt(filename, delimiter=',')
		filename = 'parameters/'+region+'MuAb_N35-55_W90-120_downsampled.csv'
		muAb = np.loadtxt(filename, delimiter=',')
		filename = 'parameters/'+region+'MuNorm_N35-55_W90-120_downsampled.csv'
		muNorm = np.loadtxt(filename, delimiter=',')
		filename = 'processedData/'+region+'Yt'+str(t)+'_N35-55_W90-120_downsampled.csv'
		
		Y = np.loadtxt(filename, delimiter=',')
		I = Y.shape[0]
		J = Y.shape[1]
		
		# SMC init
		X = np.zeros( (N, I, J), dtype=bool )
		ancestors = np.zeros( N )
		logZ = 0.
		logW = np.zeros( N )
		w = np.zeros( N )
		ESS = np.zeros( J )
		
		# ---------------
		#      SMC
		# ---------------        
		# SMC first iteration
		params = hlp.params(I = I, muAb = muAb[:,0], muNorm = muNorm[:,0], sigma2 = sigma2[:,0])
		if xCond is not None:
			q = [nested.inner(params, Y[:,0], M, xCond[:,0]) for i in range(N)]
		else:
			q = [nested.inner(params, Y[:,0], M) for i in range(N)]
		logW = np.array([q[i].logZ for i in range(N)])
		maxLogW = np.max(logW)
		w = np.exp(logW - maxLogW)
		logZ = maxLogW + np.log(np.sum(w)) - np.log(N)
		w /= np.sum(w)
		#print w.shape
		ESS[0] = 1/np.sum(w**2)
		#print 'ESS: ',ESS[0]
		#print 'First logZ: ',logZ
		
		ancestors = hlp.resampling(w)
		for i in range(N):
			X[i,:,0] = q[ancestors[i]].simulate()
		
		## SMC MAIN LOOP
		for j in np.arange(1,J):
			#print j
			params = hlp.params(I = I, muAb = muAb[:,j], muNorm = muNorm[:,j], sigma2 = sigma2[:,j])
			if xCond is not None:
				q = [nested.inner(params, Y[:,j], M, xCond[:,j], X[i,:,j-1]) for i in range(N)]
			else:
				q = [nested.inner(params, Y[:,j], M, xSpaceCond = X[i,:,j-1]) for i in range(N)]
			logW = np.array([q[i].logZ for i in range(N)])
			maxLogW = np.max(logW)
			w = np.exp(logW - maxLogW)
			logZ += maxLogW + np.log(np.sum(w)) - np.log(N)
			#print 'j: ',j,' logZ',logZ
			#print 'logW: ',logW
			#print 'Y: ',Y[:,j]
			w /= np.sum(w)
			#print 'Max w: ',np.max(w)
			ESS[j] = 1/np.sum(w**2)
			#print 'ESS: ',ESS[j]
			
			ancestors = hlp.resampling(w)
			for i in range(N):
				X[i,:,j] = q[ancestors[i]].simulate()
		
		#print 'Last logZ: ',logZ
		## Save init to class object
		self.N = N
		self.J = J
		self.I = I
		self.X = X
		self.logZ = logZ
		self.w = w
		self.xCond = xCond
		self.ESS = ESS
Example #4
0
def runNested(d, tauPhi, N, M, nrRuns):
    r"""Run NSMC filtering on high-dimensional LGSS.
    
    Parameters
    ----------
    d : int
        State dimension.
    tauPhi : float
        Measurement precision.
    N : int
        Number of particles, 1st level.
    M : int
        Number of particles, 2nd level.
    nrRuns : int
        Number of independent runs of the algorithm.
    
    Returns
    -------
    Saves E[X] and E[X**2] estimates.
    """
    # Model init
    a = 0.5
    tauPsi = 1.
    tauRho = 1.
    params = hlp.params(a = a,tauPsi = tauPsi,tauRho = tauRho,tauPhi = tauPhi)

    filename = 'simulatedData/d'+str(d)+'tauPhi'+str(tauPhi)+'y.txt'
    y = np.loadtxt(filename)
    T = y.shape[1]

    for j in range(nrRuns):
        # Algorithm init
        logZ = np.zeros(N)
        ancestors = np.zeros(N)
        X = np.zeros((N,d))
        ESS = np.zeros(T)

        # Setup new file
        filename = './results/paper/d'+str(d)+'_N'+str(N)+'_M'+str(M)+'tauPhi'+str(tauPhi)+'_nestedSMCrun'+str(j+1)+'.csv'
        f = open(filename, 'w')
        #f.write('Iter ESS E[x] E[x**2]\n')
        f.close()
        
        for t in range(T):
            q = [ nsmc.nestedFAPF(params, y[:,t], M, X[i,:]) for i in range(N) ]

            for i in range(N):
                logZ[i] = q[i].logZ
                
            maxLz = np.max(logZ)
            w = np.exp(logZ-maxLz)
            w /= np.sum(w)
            ESS[t] = 1/np.sum(w**2)
            ancestors = hlp.resampling(w)
            
            for i in range(N):
                X[i,:] = q[ancestors[i]].simulate(1,BS=True)    
            
            f = open(filename, 'a')
            tmpVec = np.r_[t+1, ESS[t], np.mean(X,axis=0), np.mean(X**2,axis=0)]
            np.savetxt(f, tmpVec.reshape((1,len(tmpVec))),delimiter=',')
            f.close()
Example #5
0
    def __init__(self, params, y, N, xTimeCond=None, xSpaceCond=None):
        C1 = 0.5
        C2 = 3.

        # Model init
        xDomain = np.arange(2)

        def logPhi(x, y, sig2, mu_ab, mu_norm):
            return -0.5 * (y - mu_ab * x.astype('float') - mu_norm *
                           (1. - x.astype('float')))**2 / sig2

        I = params.I
        muAb = params.muAb
        muNorm = params.muNorm
        sigma2 = params.sigma2

        # SMC init
        X = np.zeros((N, I), dtype=bool)
        ancestors = np.zeros(N)
        logZ = 0.
        logW = np.zeros(N)
        w = np.zeros(N)

        # ---------------
        #      SMC
        # ---------------
        # Sample proposal
        tempDist = np.zeros(2)
        if xTimeCond is not None:
            tempDist += C2 * (xTimeCond[0] == xDomain.astype(bool))
        if xSpaceCond is not None:
            tempDist += C1 * (xSpaceCond[0] == xDomain.astype(bool))
        tempDist = np.exp(tempDist)
        tempDist /= np.sum(tempDist)
        X[:, 0] = hlp.discreteSampling(tempDist, xDomain, N)

        # Weighting
        logW = logPhi(X[:, 0], y[0], sigma2[0], muAb[0], muNorm[0])
        maxLogW = np.max(logW)
        w = np.exp(logW - maxLogW)
        logZ = maxLogW + np.log(np.sum(w)) - np.log(N)
        w /= np.sum(w)
        ancestors = hlp.resampling(w)
        X[:, 0] = X[ancestors, 0]

        ## SMC MAIN LOOP
        for i in np.arange(1, I):
            tempDist = np.zeros(2)
            if xTimeCond is not None:
                tempDist += C2 * (xTimeCond[i] == xDomain.astype(bool))
            if xSpaceCond is not None:
                tempDist += C1 * (xSpaceCond[i] == xDomain.astype(bool))
            for iParticle in range(N):
                tempParticleDist = tempDist + C1 * (X[iParticle, i - 1]
                                                    == xDomain.astype(bool))
                tempParticleDist = np.exp(tempParticleDist)
                tempParticleDist /= np.sum(tempParticleDist)
                X[iParticle, i] = hlp.discreteSampling(tempParticleDist,
                                                       xDomain, 1)
            logW = logPhi(X[:, i], y[i], sigma2[i], muAb[i], muNorm[i])
            maxLogW = np.max(logW)
            w = np.exp(logW - maxLogW)
            logZ += maxLogW + np.log(np.sum(w)) - np.log(N)
            #if math.isnan(logZ):
            #print 'X: ',X[:,i]
            #print 'y: ',y[i]
            #print 'muAb: ',muAb[i]
            #print 'muNorm: ',muNorm[i]
            #print 'sig2: ',sigma2[i]
            #raw_input()
            w /= np.sum(w)
            ancestors = hlp.resampling(w)
            X[:, i] = X[ancestors, i]

        ## Save init to class object
        self.N = N
        self.C1 = C1
        self.I = I
        self.X = X
        self.y = y
        self.logZ = logZ
        self.w = w
Example #6
0
    def __init__(self, params, Y, N, xCond=None):
		# Model init
		d = len(Y)
		def logPhi(x,y): return -0.5*params.tauPhi*(x-y)**2
		
		# SMC init
		if xCond is None:
			xCond = np.zeros( d )
		logZ = 0.
		X = np.zeros( (N, d) )
		Xa = np.zeros( (N, d) )
		ancestors = np.zeros( N )
		w = np.zeros( (N, d) )
		W = np.zeros( (N, d) )
		logW = np.zeros( N )
		#ESS = np.zeros( d )
		#NT = N/2
		#resamp = 0
		
		# i=1
		X[:,0] = params.tauRho*params.a*xCond[0] + (1/np.sqrt(params.tauRho))*np.random.normal(size=N)
		
		# Weighting
		logW = logPhi(X[:,0],Y[0])
		maxLogW = np.max(logW)
		w[:,0] = np.exp(logW - maxLogW)
		logZ += maxLogW + np.log(np.sum(w[:,0])) - np.log(N)
		w[:,0] /= np.sum(w[:,0])
		ancestors = hlp.resampling(w[:,0])
		X[:,0] = X[ancestors,0]
		Xa[:,0] = X[:,0] 
		#tempW = w[:,0] / np.sum(w[:,0])
		#ESS[0] = 1/np.sum(tempW**2)
		
		#if ESS[0] < NT:
			#logZ += maxLogW + np.log(np.sum(w[:,0])) - np.log(N)
			
			#w[:,0] /= np.sum(w[:,0])
			#ancestors = hlp.resampling(w[:,0],scheme='sys')
			#X[:,0] = X[ancestors,0] 
			
			#logW = np.zeros( N )
		#else:
			#ancestors = np.arange(N)
				
		# i=2:d
		for i in np.arange(1,d):
			# Propagate
			tau = params.tauRho + params.tauPsi
			mu = (params.tauRho*params.a*xCond[i] + params.tauPsi*X[:,i-1])/tau
			X[:,i] = mu + (1/np.sqrt(tau))*np.random.normal(size=N)
			
			# Weighting, Resampling
			logW = logPhi(X[:,i],Y[i])
			maxLogW = np.max(logW)
			w[:,i] = np.exp(logW - maxLogW)
			logZ += maxLogW + np.log(np.sum(w[:,i])) - np.log(N)
			w[:,i] /= np.sum(w[:,i])
			ancestors = hlp.resampling(w[:,i])
			X[:,i] = X[ancestors,i] 
			Xa[:,:i] = Xa[ancestors,:i]
			Xa[:,i] = X[:,i]
			#tempW = w[:,i] / np.sum(w[:,i])
			#ESS[i] = 1/np.sum(tempW**2)
			
			## ESS-based Resampling
			#if ESS[i] < NT or i == d-1:
				#logZ += maxLogW + np.log(np.sum(w[:,i])) - np.log(N)
				
				#w[:,i] /= np.sum(w[:,i])
				#ancestors = hlp.resampling(w[:,i],scheme='sys')
				#X[:,i] = X[ancestors,i] 
				
				#logW = np.zeros( N )
			#else:
				#ancestors = np.arange(N)
		
		# Save init to class object
		self.N = N
		self.d = d
		self.X = X
		self.Xa = Xa
		self.Y = Y
		self.params = params
		self.logZ = logZ
		self.w = w
		self.xCond = xCond
Example #7
0
    def __init__(self, params, y, N, xTimeCond=None, xSpaceCond=None):
		C1 = 0.5
		C2 = 3.

		# Model init
		xDomain = np.arange(2)
		def logPhi(x,y,sig2,mu_ab,mu_norm): return -0.5*(y-mu_ab*x.astype('float')-mu_norm*(1.-x.astype('float')))**2/sig2

		I = params.I
		muAb = params.muAb
		muNorm = params.muNorm
		sigma2 = params.sigma2
		
		# SMC init
		X = np.zeros( (N, I), dtype=bool )
		ancestors = np.zeros( N )
		logZ = 0.
		logW = np.zeros( N )
		w = np.zeros( N )
		
		# ---------------
		#      SMC
		# ---------------        
		# Sample proposal
		tempDist = np.zeros(2)
		if xTimeCond is not None:
			tempDist += C2*(xTimeCond[0] == xDomain.astype(bool))
		if xSpaceCond is not None:
			tempDist += C1*(xSpaceCond[0] == xDomain.astype(bool))
		tempDist = np.exp(tempDist)
		tempDist /= np.sum(tempDist)
		X[:,0] = hlp.discreteSampling(tempDist, xDomain, N)
		
		# Weighting
		logW = logPhi(X[:,0], y[0], sigma2[0], muAb[0], muNorm[0])
		maxLogW = np.max(logW)
		w = np.exp(logW-maxLogW)
		logZ = maxLogW + np.log(np.sum(w)) - np.log(N)
		w /= np.sum(w)
		ancestors = hlp.resampling(w)
		X[:,0] = X[ancestors,0]
		
			
		## SMC MAIN LOOP
		for i in np.arange(1,I):
			tempDist = np.zeros(2)
			if xTimeCond is not None:
				tempDist += C2*(xTimeCond[i] == xDomain.astype(bool))
			if xSpaceCond is not None:
				tempDist += C1*(xSpaceCond[i] == xDomain.astype(bool))
			for iParticle in range(N):
				tempParticleDist = tempDist+C1*(X[iParticle,i-1] == xDomain.astype(bool))
				tempParticleDist = np.exp(tempParticleDist)
				tempParticleDist /= np.sum(tempParticleDist)
				X[iParticle,i] = hlp.discreteSampling(tempParticleDist, xDomain, 1)
			logW = logPhi(X[:,i], y[i], sigma2[i], muAb[i], muNorm[i])
			maxLogW = np.max(logW)
			w = np.exp(logW-maxLogW)
			logZ += maxLogW + np.log(np.sum(w)) - np.log(N)
			#if math.isnan(logZ):
				#print 'X: ',X[:,i]
				#print 'y: ',y[i]
				#print 'muAb: ',muAb[i]
				#print 'muNorm: ',muNorm[i]
				#print 'sig2: ',sigma2[i]
				#raw_input()
			w /= np.sum(w)
			ancestors = hlp.resampling(w)
			X[:,i] = X[ancestors,i]
		
		## Save init to class object
		self.N = N
		self.C1 = C1
		self.I = I
		self.X = X
		self.y = y
		self.logZ = logZ
		self.w = w
Example #8
0
def runModular(tStart, tEnd, Np, N, M):
    r"""Run the drought detection filtering algorithm (NSMC) for a specific 
    region.
    
    Parameters
    ----------
    tStart : int
        Start year.
    tEnd : int
        End year.
    Np : int
        Number of particles, 1st level.
    N : int
        Number of particles, 2nd level.
    M : int
        Number of particles, 3rd level.
    
    Returns
    -------
    Saves marginal posterior mean.
    """
    # Model init
    region = 'us'
    #region = 'sahel'
    if region == 'us':
        X = np.zeros((Np,20,30))
        xC = np.zeros((20,30),dtype=bool)
    else:
        X = np.zeros((Np,24,44))
        xC = np.zeros((24,44),dtype=bool)
    logZ = np.zeros(Np)


    q = []

    for i in range(Np):
        q.append(nsmc.nestedSMC(t=tStart,N=N,M=M,xCond=xC))
        logZ[i] = q[i].logZ
    maxLZ = np.max(logZ)
    w = np.exp(logZ-maxLZ)
    w /= np.sum(w)
    ESS = 1/np.sum(w**2)
    ancestors = hlp.resampling(w)
    for i in range(Np):
        X[i,:,:] = q[ancestors[i]].simulate()

    folder = '/data/chran60/nestedsmc/drought'
    np.savetxt(folder+'/Np'+str(Np)+'_M'+str(N)+'_M'+str(M)+'_t'+str(tStart)+region+'.csv',np.mean(X,axis=0),delimiter=',')

    for t in np.arange(1,tEnd-tStart+1):
        print 't: ',tStart+t, ' ESS: ', ESS
        q = []
        for i in range(Np):
            q.append(nsmc.nestedSMC(t=t+tStart,N=N,M=M,xCond=X[i,:,:]))
            logZ[i] = q[i].logZ
        maxLZ = np.max(logZ)
        w = np.exp(logZ-maxLZ)
        w /= np.sum(w)
        ESS = 1/np.sum(w**2)
        ancestors = hlp.resampling(w)
        for i in range(Np):
            X[i,:,:] = q[ancestors[i]].simulate()
        np.savetxt(folder+'/Np'+str(Np)+'_M'+str(N)+'_M'+str(M)+'_t'+str(tStart+t)+region+'.csv',np.mean(X,axis=0),delimiter=',')
Example #9
0
    for t in range(T):
        for i in range(N):
            # j = 1
            # Propagate
            Xcur[i, :, 0] = a * Xprev[i, :, 0] + (
                1 / np.sqrt(tauRho)) * np.random.normal(size=M)

            # Weight
            logW = logPhi(Xcur[i, :, 0], y[0, t])
            maxLogW = np.max(logW)
            w = np.exp(logW - maxLogW)
            logZ[i] = maxLogW + np.log(np.sum(w)) - np.log(M)
            w /= np.sum(w)

            # Resample
            ancestors = hlp.resampling(w)
            Xprev[i, :, :] = Xprev[i, ancestors, :]
            Xcur[i, :, 0] = Xcur[i, ancestors, 0]

            # j = 2:d
            for j in np.arange(1, d):
                # Propagate
                tau = tauRho + tauPsi
                mu = (tauRho * a * Xprev[i, :, j] +
                      tauPsi * Xcur[i, :, j - 1]) / tau
                Xcur[i, :,
                     j] = mu + (1 / np.sqrt(tau)) * np.random.normal(size=M)

                # Weighting
                logW = logPhi(Xcur[i, :, j], y[j, t])
                maxLogW = np.max(logW)
Example #10
0
NT = N / 2
Xcur = np.zeros((N, d))
Xprev = np.zeros((N, d))
logW = np.zeros(N)
w = np.ones(N)
ancestors = np.zeros(N)
ESS = N * np.ones(T * d)

filename = './results/d' + str(d) + '_N' + str(N) + 'tauPhi' + str(
    tauPhi) + '_nipsSMC.csv'
f = open(filename, 'w')
f.close()

for t in range(T):
    if ESS[t * d - 1] < NT:
        ancestors = hlp.resampling(w, scheme='sys')
        Xprev = Xprev[ancestors, :]
        w = np.ones(N)

    Xcur[:,
         0] = a * Xprev[:,
                        0] + (1 / np.sqrt(tauRho)) * np.random.normal(size=N)
    logW = logPhi(Xcur[:, 0], y[0, t])
    maxLogW = np.max(logW)
    w *= np.exp(logW - maxLogW)
    w /= np.sum(w)

    ESS[t * d] = 1 / np.sum(w**2)

    for i in np.arange(1, d):
        # Resampling
Example #11
0
    def __init__(self, t, N, M, xCond=None):
        C1 = 0.5
        C2 = 3.

        # Model init
        xDomain = np.arange(2)

        # Load parameters
        region = 'dustBowl'
        #region = 'sahel'
        filename = 'parameters/' + region + 'Sigma2_N35-55_W90-120_downsampled.csv'
        sigma2 = np.loadtxt(filename, delimiter=',')
        filename = 'parameters/' + region + 'MuAb_N35-55_W90-120_downsampled.csv'
        muAb = np.loadtxt(filename, delimiter=',')
        filename = 'parameters/' + region + 'MuNorm_N35-55_W90-120_downsampled.csv'
        muNorm = np.loadtxt(filename, delimiter=',')
        filename = 'processedData/' + region + 'Yt' + str(
            t) + '_N35-55_W90-120_downsampled.csv'

        Y = np.loadtxt(filename, delimiter=',')
        I = Y.shape[0]
        J = Y.shape[1]

        # SMC init
        X = np.zeros((N, I, J), dtype=bool)
        ancestors = np.zeros(N)
        logZ = 0.
        logW = np.zeros(N)
        w = np.zeros(N)
        ESS = np.zeros(J)

        # ---------------
        #      SMC
        # ---------------
        # SMC first iteration
        params = hlp.params(I=I,
                            muAb=muAb[:, 0],
                            muNorm=muNorm[:, 0],
                            sigma2=sigma2[:, 0])
        if xCond is not None:
            q = [
                nested.inner(params, Y[:, 0], M, xCond[:, 0]) for i in range(N)
            ]
        else:
            q = [nested.inner(params, Y[:, 0], M) for i in range(N)]
        logW = np.array([q[i].logZ for i in range(N)])
        maxLogW = np.max(logW)
        w = np.exp(logW - maxLogW)
        logZ = maxLogW + np.log(np.sum(w)) - np.log(N)
        w /= np.sum(w)
        #print w.shape
        ESS[0] = 1 / np.sum(w**2)
        #print 'ESS: ',ESS[0]
        #print 'First logZ: ',logZ

        ancestors = hlp.resampling(w)
        for i in range(N):
            X[i, :, 0] = q[ancestors[i]].simulate()

        ## SMC MAIN LOOP
        for j in np.arange(1, J):
            #print j
            params = hlp.params(I=I,
                                muAb=muAb[:, j],
                                muNorm=muNorm[:, j],
                                sigma2=sigma2[:, j])
            if xCond is not None:
                q = [
                    nested.inner(params, Y[:, j], M, xCond[:, j],
                                 X[i, :, j - 1]) for i in range(N)
                ]
            else:
                q = [
                    nested.inner(params, Y[:, j], M, xSpaceCond=X[i, :, j - 1])
                    for i in range(N)
                ]
            logW = np.array([q[i].logZ for i in range(N)])
            maxLogW = np.max(logW)
            w = np.exp(logW - maxLogW)
            logZ += maxLogW + np.log(np.sum(w)) - np.log(N)
            #print 'j: ',j,' logZ',logZ
            #print 'logW: ',logW
            #print 'Y: ',Y[:,j]
            w /= np.sum(w)
            #print 'Max w: ',np.max(w)
            ESS[j] = 1 / np.sum(w**2)
            #print 'ESS: ',ESS[j]

            ancestors = hlp.resampling(w)
            for i in range(N):
                X[i, :, j] = q[ancestors[i]].simulate()

        #print 'Last logZ: ',logZ
        ## Save init to class object
        self.N = N
        self.J = J
        self.I = I
        self.X = X
        self.logZ = logZ
        self.w = w
        self.xCond = xCond
        self.ESS = ESS
Example #12
0
    def smc(self, order, N, NT=1.1, resamp='mult', verbose=False):
        """
        SMC algorithm to estimate (log)Z of the classical XY model with 
        free boundary conditions.
        
        Parameters
        ----------
        order : 1-D array_like
            The order in which to add the random variables x, flat index.
        N : int
            The number of particles used to estimate Z.
        NT : int
            Threshold for ESS-based resampling (0,1] or 1.1. Resample if ESS < NT*N (NT=1.1, resample every time)
        resamp : string
            Type of resampling scheme {mult, res, strat, sys}.
        verbose : bool
            Output changed to logZk, xMean, ESS.
        
        Output
        ------
        logZ : float
            Estimate of (log)Z in double precision.
        """
        # Init variables
        nx = self.sz
        ny = self.sz
        logZ = np.zeros( nx*ny )
        indSorted = order.argsort()
        orderSorted = order[indSorted]
        # SMC specific
        trajectory = np.zeros( (N, len(order)) )
        ancestors = np.zeros( N, np.int )
        nu = np.zeros( N )
        tempNu = np.zeros( N )
        ess = np.zeros( len(order)-1 )
        iter = 0

        # -------
        #   SMC
        # -------
        # First iteration
        ix, iy = hlp.unravel_index( order[0], (nx,ny) )
        tempMean = 0.
        tempDispersion = 0.
        trajectory[:,0] = np.random.vonmises(tempMean, tempDispersion, N)
        # Log-trick update of adjustment multipliers and logZ
        tempDispersion = np.zeros(N)

        for iSMC in range( 1, len(order) ):
            # Resampling with log-trick update
            nu += np.log(2 * np.pi * np.i0(tempDispersion))
            nuMax = np.max(nu)
            tempNu = np.exp( nu - nuMax )
            c = np.sum(tempNu)
            tempNu /= c
            ess[iSMC-1] = 1 / (np.sum(tempNu**2))

            if ess[iSMC-1] < NT*float(N):
                nu = np.exp( nu - nuMax )
                if iter > 0:
                    logZ[iter] = logZ[iter-1] + nuMax + np.log( np.sum(nu) ) - np.log(N)
                else:
                    logZ[iter] = nuMax + np.log( np.sum(nu) ) - np.log(N)
                c = np.sum(nu)
                nu /= c
                ancestors = hlp.resampling( nu, scheme=resamp )
                nu = np.zeros( N )
                trajectory[:,:iSMC] = trajectory[ancestors, :iSMC]
                iter += 1

            # Calculate optimal proposal and adjustment multipliers
            ix, iy = hlp.unravel_index( order[iSMC], (nx,ny) )
            tempMean = np.zeros( N )
            tempDispersion = np.zeros( N )
            if ix > 0:
                tempInd = hlp.ravel_multi_index( (ix-1,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (nx-1,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            if ix < nx-1:
                tempInd = hlp.ravel_multi_index( (ix+1,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (0,iy), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            if iy > 0:
                tempInd = hlp.ravel_multi_index( (ix,iy-1), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (ix,ny-1), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            if iy < ny-1:
                tempInd = hlp.ravel_multi_index( (ix,iy+1), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            else:
                tempInd = hlp.ravel_multi_index( (ix,0), (nx,ny) )
                if tempInd in order[:iSMC]:
                    kappa = self.J
                    tempIndSMC = indSorted[orderSorted.searchsorted(tempInd)]
                    Y = tempDispersion*np.sin( -tempMean ) + kappa*np.sin( -trajectory[:,tempIndSMC] )
                    X = tempDispersion*np.cos( -tempMean ) + kappa*np.cos( -trajectory[:,tempIndSMC] )
                    tempDispersion = np.sqrt( tempDispersion**2 + kappa**2 + 2*kappa*tempDispersion*np.cos(-tempMean+trajectory[:,tempIndSMC] ) )
                    tempMean = -np.arctan2(Y ,X)
            for iParticle in range(N):
                trajectory[iParticle, iSMC] = hlp.vonmises(tempMean[iParticle], tempDispersion[iParticle])

        nu += np.log(2 * np.pi * np.i0(tempDispersion))
        nuMax = np.max(nu)
        nu = np.exp( nu - nuMax )
        logZ[iter] = logZ[iter-1] + nuMax + np.log( np.sum(nu) ) - np.log(N)
        
        if verbose:
            c = np.sum(nu)
            nu /= c
            trajMean = np.mean( (np.tile(nu, (len(order),1))).T*trajectory, axis=0 )
            return logZ, trajMean[order].reshape( (nx,ny) ), ess
        else:
            return logZ[iter]
Example #13
0
def runModular(tStart, tEnd, Np, N, M):
    r"""Run the drought detection filtering algorithm (NSMC) for a specific 
    region.
    
    Parameters
    ----------
    tStart : int
        Start year.
    tEnd : int
        End year.
    Np : int
        Number of particles, 1st level.
    N : int
        Number of particles, 2nd level.
    M : int
        Number of particles, 3rd level.
    
    Returns
    -------
    Saves marginal posterior mean.
    """
    # Model init
    region = 'us'
    #region = 'sahel'
    if region == 'us':
        X = np.zeros((Np, 20, 30))
        xC = np.zeros((20, 30), dtype=bool)
    else:
        X = np.zeros((Np, 24, 44))
        xC = np.zeros((24, 44), dtype=bool)
    logZ = np.zeros(Np)

    q = []

    for i in range(Np):
        q.append(nsmc.nestedSMC(t=tStart, N=N, M=M, xCond=xC))
        logZ[i] = q[i].logZ
    maxLZ = np.max(logZ)
    w = np.exp(logZ - maxLZ)
    w /= np.sum(w)
    ESS = 1 / np.sum(w**2)
    ancestors = hlp.resampling(w)
    for i in range(Np):
        X[i, :, :] = q[ancestors[i]].simulate()

    folder = '/data/chran60/nestedsmc/drought'
    np.savetxt(folder + '/Np' + str(Np) + '_M' + str(N) + '_M' + str(M) +
               '_t' + str(tStart) + region + '.csv',
               np.mean(X, axis=0),
               delimiter=',')

    for t in np.arange(1, tEnd - tStart + 1):
        print 't: ', tStart + t, ' ESS: ', ESS
        q = []
        for i in range(Np):
            q.append(nsmc.nestedSMC(t=t + tStart, N=N, M=M, xCond=X[i, :, :]))
            logZ[i] = q[i].logZ
        maxLZ = np.max(logZ)
        w = np.exp(logZ - maxLZ)
        w /= np.sum(w)
        ESS = 1 / np.sum(w**2)
        ancestors = hlp.resampling(w)
        for i in range(Np):
            X[i, :, :] = q[ancestors[i]].simulate()
        np.savetxt(folder + '/Np' + str(Np) + '_M' + str(N) + '_M' + str(M) +
                   '_t' + str(tStart + t) + region + '.csv',
                   np.mean(X, axis=0),
                   delimiter=',')
Example #14
0
    def __init__(self, params, Y, N, xCond=None):
        # Model init
        d = len(Y)

        def logPhi(x, y):
            return -0.5 * params.tauPhi * (x - y)**2

        # SMC init
        if xCond is None:
            xCond = np.zeros(d)
        logZ = 0.
        X = np.zeros((N, d))
        Xa = np.zeros((N, d))
        ancestors = np.zeros(N)
        w = np.zeros((N, d))
        W = np.zeros((N, d))
        logW = np.zeros(N)
        #ESS = np.zeros( d )
        #NT = N/2
        #resamp = 0

        # i=1
        X[:, 0] = params.tauRho * params.a * xCond[0] + (
            1 / np.sqrt(params.tauRho)) * np.random.normal(size=N)

        # Weighting
        logW = logPhi(X[:, 0], Y[0])
        maxLogW = np.max(logW)
        w[:, 0] = np.exp(logW - maxLogW)
        logZ += maxLogW + np.log(np.sum(w[:, 0])) - np.log(N)
        w[:, 0] /= np.sum(w[:, 0])
        ancestors = hlp.resampling(w[:, 0])
        X[:, 0] = X[ancestors, 0]
        Xa[:, 0] = X[:, 0]
        #tempW = w[:,0] / np.sum(w[:,0])
        #ESS[0] = 1/np.sum(tempW**2)

        #if ESS[0] < NT:
        #logZ += maxLogW + np.log(np.sum(w[:,0])) - np.log(N)

        #w[:,0] /= np.sum(w[:,0])
        #ancestors = hlp.resampling(w[:,0],scheme='sys')
        #X[:,0] = X[ancestors,0]

        #logW = np.zeros( N )
        #else:
        #ancestors = np.arange(N)

        # i=2:d
        for i in np.arange(1, d):
            # Propagate
            tau = params.tauRho + params.tauPsi
            mu = (params.tauRho * params.a * xCond[i] +
                  params.tauPsi * X[:, i - 1]) / tau
            X[:, i] = mu + (1 / np.sqrt(tau)) * np.random.normal(size=N)

            # Weighting, Resampling
            logW = logPhi(X[:, i], Y[i])
            maxLogW = np.max(logW)
            w[:, i] = np.exp(logW - maxLogW)
            logZ += maxLogW + np.log(np.sum(w[:, i])) - np.log(N)
            w[:, i] /= np.sum(w[:, i])
            ancestors = hlp.resampling(w[:, i])
            X[:, i] = X[ancestors, i]
            Xa[:, :i] = Xa[ancestors, :i]
            Xa[:, i] = X[:, i]
            #tempW = w[:,i] / np.sum(w[:,i])
            #ESS[i] = 1/np.sum(tempW**2)

            ## ESS-based Resampling
            #if ESS[i] < NT or i == d-1:
            #logZ += maxLogW + np.log(np.sum(w[:,i])) - np.log(N)

            #w[:,i] /= np.sum(w[:,i])
            #ancestors = hlp.resampling(w[:,i],scheme='sys')
            #X[:,i] = X[ancestors,i]

            #logW = np.zeros( N )
            #else:
            #ancestors = np.arange(N)

        # Save init to class object
        self.N = N
        self.d = d
        self.X = X
        self.Xa = Xa
        self.Y = Y
        self.params = params
        self.logZ = logZ
        self.w = w
        self.xCond = xCond
Example #15
0
NT = N/2
Xcur = np.zeros( (N, d) )
Xprev = np.zeros( (N, d) )
logW = np.zeros(N)
w = np.ones( N )
ancestors = np.zeros( N )
ESS = N*np.ones( T*d )

filename = './results/d'+str(d)+'_N'+str(N)+'tauPhi'+str(tauPhi)+'_nipsSMC.csv'
f = open(filename, 'w')
f.close()


for t in range(T):
    if ESS[t*d-1] < NT:
        ancestors = hlp.resampling(w,scheme='sys')
        Xprev = Xprev[ancestors,:]
        w = np.ones( N )
        
    Xcur[:, 0] = a*Xprev[:, 0] + (1/np.sqrt(tauRho))*np.random.normal(size=N)
    logW = logPhi(Xcur[:,0],y[0,t])
    maxLogW = np.max(logW)
    w *= np.exp(logW - maxLogW)
    w /= np.sum(w)
    
    ESS[t*d] = 1/np.sum(w**2)
    
    for i in np.arange(1,d):
        # Resampling
        if ESS[t*d+i-1] < NT:
            ancestors = hlp.resampling(w,scheme='sys')
Example #16
0
def runBootstrap(d, tauPhi, N, nrRuns):
    r"""Run bootstrap particle filtering on high-dimensional LGSS.
    
    Parameters
    ----------
    d : int
        State dimension.
    tauPhi : float
        Measurement precision.
    N : int
        Number of particles
    nrRuns : int
        Number of independent runs of the algorithm..
    
    Returns
    -------
    Saves E[X] and E[X**2] estimates.
    """
    a = 0.5
    tauPsi = 1.
    tauRho = 1.
    filename = 'simulatedData/d' + str(d) + 'tauPhi' + str(tauPhi) + 'y.txt'
    y = np.loadtxt(filename)
    filename = 'simulatedData/d' + str(d) + 'tauPhi' + str(tauPhi) + 'P.txt'
    P = np.loadtxt(filename)
    P = P[:d, :d]

    T = y.shape[1]

    for j in range(nrRuns):
        filename = './results/paper/d' + str(d) + '_N' + str(
            N) + 'tauPhi' + str(tauPhi) + '_bootstraprun' + str(j + 1) + '.csv'
        f = open(filename, 'w')
        f.close()

        xCur = np.zeros((N, d))
        xPrev = np.zeros((N, d))
        ancestors = np.zeros(N)
        weights = np.zeros(N)
        logWeights = np.zeros(N)
        ESS = np.zeros(T)

        xCur = np.random.multivariate_normal(np.zeros(d), P, size=N)
        xPrev = xCur

        # t = 1
        for i in range(N):
            logWeights[i] = -0.5 * tauPhi * np.sum((xCur[i, :] - y[:, 0])**2)
        maxLw = np.max(logWeights)
        weights = np.exp(logWeights - maxLw)
        weights /= np.sum(weights)
        ancestors = hlp.resampling(weights)
        xCur = xCur[ancestors, :]
        ESS[0] = 1 / np.sum(weights**2)

        f = open(filename, 'a')
        tmpVec = np.r_[1, ESS[0],
                       np.mean(xCur, axis=0),
                       np.mean(xCur**2, axis=0)]
        np.savetxt(f, tmpVec.reshape((1, len(tmpVec))), delimiter=',')
        f.close()

        # t > 1
        for t in np.arange(1, T):
            # Resampling
            ancestors = hlp.resampling(weights)

            # Generate samples
            rndSamp = np.random.multivariate_normal(np.zeros(d), P, size=N)

            # Propagate
            for i in range(N):
                mu = tauRho * a * np.dot(P, xPrev[ancestors[i], :])
                xCur[i, :] = mu + rndSamp[i, :]
                logWeights[i] = -0.5 * tauPhi * np.sum(
                    (xCur[i, :] - y[:, t])**2)
            maxLw = np.max(logWeights)
            weights = np.exp(logWeights - maxLw)
            weights /= np.sum(weights)
            ESS[t] = 1 / np.sum(weights**2)
            xPrev = xCur

            f = open(filename, 'a')
            tmpVec = np.r_[t + 1, ESS[t],
                           np.mean(xCur, axis=0),
                           np.mean(xCur**2, axis=0)]
            np.savetxt(f, tmpVec.reshape((1, len(tmpVec))), delimiter=',')
            f.close()