def simulate(self, BS=True): if BS: def logPsi(xp, x): return np.sum(C1 * (xp == x).astype(float), axis=1) C1 = 0.5 w = np.zeros(self.N) logW = np.zeros(self.N) Xout = np.zeros((self.I, self.J), dtype=bool) b = hlp.discreteSampling(np.ones(self.N), np.arange(self.N), 1) Xout[:, -1] = self.X[b, :, -1] for j in np.arange(self.J - 1)[::-1]: logW = logPsi(Xout[:, j + 1], self.X[:, :, j]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) b = hlp.discreteSampling(w, np.arange(self.N), 1) Xout[:, j] = self.X[b, :, j] return Xout else: b = hlp.discreteSampling(np.ones(self.N), np.arange(self.N), M) return self.X[b, :, :]
def simulate(self, M, BS=True): r"""Simulate properly weighted sample. Parameters ---------- BS : bool Sample using backward simulation. Returns ------- Xout : 1-D array_like Simulated trajectory. """ if BS: def logPsi(xp,x): return -0.5*self.params.tauPsi*(xp-x)**2 w = np.zeros( self.N ) logW = np.zeros( self.N ) Xout = np.zeros( (M, self.d) ) b = hlp.discreteSampling(np.ones(self.N),np.arange(self.N),M) Xout[:,-1] = self.X[b,-1] for i in np.arange(self.d-1)[::-1]: for j in np.arange(M): logW = logPsi(Xout[j,i+1],self.X[:,i]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) b = hlp.discreteSampling(w, np.arange(self.N), 1) Xout[j,i] = self.X[b,i] return Xout else: b = hlp.discreteSampling(np.ones(self.N),np.arange(self.N),M) return self.Xa[b,:]
def simulate(self, BS=True): """Simulate properly weighted sample. Parameters ---------- BS : bool Sample using backward simulation. Returns ------- Xout : 2-D array_like Simulated trajectory. """ if BS: C1 = 0.5 def logPsi(xp,x): return np.sum(C1*(x==xp).astype(float)) w = np.zeros( self.N ) logW = np.zeros( self.N ) Xout = np.zeros( (self.I, self.J), dtype=bool ) b = hlp.discreteSampling(np.ones(self.N),np.arange(self.N),1) Xout[:,-1] = self.X[b,:,-1] for i in np.arange(self.J-1)[::-1]: logW = logPsi(Xout[:,i+1],self.X[:,:,i]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) b = hlp.discreteSampling(w, np.arange(self.N), 1) Xout[:,i] = self.X[b,:,i] return Xout else: b = hlp.discreteSampling(np.ones(self.N),np.arange(self.N),M) return self.X[b,:,:]
def simulate(self, BS=True): """Simulate properly weighted sample. Parameters ---------- BS : bool Sample using backward simulation. Returns ------- Xout : 1-D array_like Simulated trajectory. """ if BS: C1 = 0.5 def logPsi(xp, x): return C1 * (x == xp) w = np.zeros(self.N) logW = np.zeros(self.N) Xout = np.zeros(self.I) b = hlp.discreteSampling(np.ones(self.N), np.arange(self.N), 1) Xout[-1] = self.X[b, -1] for i in np.arange(self.I - 1)[::-1]: logW = logPsi(Xout[i + 1], self.X[:, i]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) b = hlp.discreteSampling(w, np.arange(self.N), 1) Xout[i] = self.X[b, i] return Xout else: b = hlp.discreteSampling(np.ones(self.N), np.arange(self.N), M) return self.Xa[b, :]
def simulate(self, M, BS=True): r"""Simulate properly weighted sample. Parameters ---------- BS : bool Sample using backward simulation. Returns ------- Xout : 1-D array_like Simulated trajectory. """ if BS: def logPsi(xp, x): return -0.5 * self.params.tauPsi * (xp - x)**2 w = np.zeros(self.N) logW = np.zeros(self.N) Xout = np.zeros((M, self.d)) b = hlp.discreteSampling(np.ones(self.N), np.arange(self.N), M) Xout[:, -1] = self.X[b, -1] for i in np.arange(self.d - 1)[::-1]: for j in np.arange(M): logW = logPsi(Xout[j, i + 1], self.X[:, i]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) b = hlp.discreteSampling(w, np.arange(self.N), 1) Xout[j, i] = self.X[b, i] return Xout else: b = hlp.discreteSampling(np.ones(self.N), np.arange(self.N), M) return self.Xa[b, :]
for col in range(d): for row in range(d): # Propagate wInd = np.zeros(5) wInd[0] = 1. if row > 0: wInd[2] = 0.5 if row < d - 1: wInd[4] = 0.5 if col > 0: wInd[1] = 0.5 if col < d - 1: wInd[3] = 0.5 wInd /= np.sum(wInd) for j in range(M): ind = hlp.discreteSampling(wInd, range(5), 1) xMean = np.zeros(M) if ind == 0: xMean[j] = Xprev[i, j, row, col] elif ind == 1: xMean[j] = Xprev[i, j, row, col - 1] elif ind == 2: xMean[j] = Xprev[i, j, row - 1, col] elif ind == 3: xMean[j] = Xprev[i, j, row, col + 1] elif ind == 4: xMean[j] = Xprev[i, j, row + 1, col] Xcur[i, :, row, col] = xMean + np.random.normal(size=M) # Weight logW = logPhi(Xcur[i, :, row, col], y[t, row, col])
def __init__(self, t, N, xCond=None): def phi(x, y, sig2, mu_ab, mu_norm): return np.exp(-0.5 * (y - mu_ab * x.astype("float") - mu_norm * (1.0 - x.astype("float"))) ** 2 / sig2) def rho(xp, x): return np.exp(C2 * (xp.astype("bool") == x.astype("bool")).astype("float")) def psi(xp, x): return np.exp(C1 * (xp == x).astype("float")) C1 = 0.5 C2 = 3.0 # Model init xDomain = np.arange(2) psiMat = np.array( [ np.exp(C1 * (xDomain.astype("bool") == False).astype("float")), np.exp(C1 * (xDomain.astype("bool") == True).astype("float")), ] ) # Load parameters # region = 'dustBowl' region = "sahel" filename = "parameters/" + region + "Sigma2_N35-55_W90-120_downsampled.csv" sigma2 = np.loadtxt(filename, delimiter=",") filename = "parameters/" + region + "MuAb_N35-55_W90-120_downsampled.csv" muAb = np.loadtxt(filename, delimiter=",") filename = "parameters/" + region + "MuNorm_N35-55_W90-120_downsampled.csv" muNorm = np.loadtxt(filename, delimiter=",") filename = "processedData/" + region + "Yt" + str(t) + "_N35-55_W90-120_downsampled.csv" Y = np.loadtxt(filename, delimiter=",") I = Y.shape[0] J = Y.shape[1] # SMC init X = np.zeros((N, I, J), dtype=bool) ancestors = np.zeros(N) logZ = 0.0 logW = np.zeros(N) w = np.zeros(N) ESS = np.zeros(J) msg = np.zeros((N, I, 2)) c = np.zeros((N, I)) # --------------- # SMC # --------------- # SMC first iteration, j = 0 # Forward filtering unaryFactor = np.ones((N, I, 2)) for n in range(N): unaryFactor[n, 0, :] *= phi(xDomain, Y[0, 0], sigma2[0, 0], muAb[0, 0], muNorm[0, 0]) unaryFactor[n, 0, :] *= rho(xDomain, xCond[0, 0]) msg[n, 0, :] = np.dot(psiMat, unaryFactor[n, 0, :]) c[n, 0] = np.sum(msg[n, 0, :]) msg[n, 0, :] /= c[n, 0] for i in np.arange(1, I - 1): unaryFactor[n, i, :] *= phi(xDomain, Y[i, 0], sigma2[i, 0], muAb[i, 0], muNorm[i, 0]) unaryFactor[n, i, :] *= rho(xDomain, xCond[i, 0]) msg[n, i, :] = np.dot(psiMat, unaryFactor[n, i, :] * msg[n, i - 1, :]) c[n, i] = np.sum(msg[n, i, :]) msg[n, i, :] /= c[n, i] unaryFactor[n, I - 1, :] *= phi(xDomain, Y[I - 1, 0], sigma2[I - 1, 0], muAb[I - 1, 0], muNorm[I - 1, 0]) unaryFactor[n, I - 1, :] *= rho(xDomain, xCond[I - 1, 0]) # Backward sampling for n in range(N): tempDist = unaryFactor[n, I - 1, :] * msg[n, I - 2, :] tempDist /= np.sum(tempDist) X[n, I - 1, 0] = hlp.discreteSampling(tempDist, xDomain, 1) for i in np.arange(2, I - 1)[::-1]: tempDist = unaryFactor[n, i, :] * msg[n, i - 1, :] tempDist /= np.sum(tempDist) X[n, i, 0] = hlp.discreteSampling(tempDist * psiMat[:, X[n, i + 1, 0]], xDomain, 1) logW = np.sum(np.log(c[:, : I - 1]), axis=1) + np.log( np.sum(unaryFactor[:, I - 1, :] * msg[:, I - 2, :], axis=1) ) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) logZ += maxLogW + np.log(np.sum(w)) - np.log(N) # SMC iteration j = 1 to J for j in np.arange(1, J): # Forward filtering unaryFactor = np.ones((N, I, 2)) for n in range(N): unaryFactor[n, 0, :] *= phi(xDomain, Y[0, j], sigma2[0, j], muAb[0, j], muNorm[0, j]) unaryFactor[n, 0, :] *= rho(xDomain, xCond[0, j]) unaryFactor[n, 0, :] *= psi(xDomain, X[n, 0, j - 1]) msg[n, 0, :] = np.dot(psiMat, unaryFactor[n, 0, :]) c[n, 0] = np.sum(msg[n, 0, :]) msg[n, 0, :] /= c[n, 0] for i in np.arange(1, I - 1): unaryFactor[n, i, :] *= phi(xDomain, Y[i, j], sigma2[i, j], muAb[i, j], muNorm[i, j]) unaryFactor[n, i, :] *= rho(xDomain, xCond[i, j]) unaryFactor[n, i, :] *= psi(xDomain, X[n, i, j - 1]) msg[n, i, :] = np.dot(psiMat, unaryFactor[n, i, :] * msg[n, i - 1, :]) c[n, i] = np.sum(msg[n, i, :]) msg[n, i, :] /= c[n, i] unaryFactor[n, I - 1, :] *= phi( xDomain, Y[I - 1, j], sigma2[I - 1, j], muAb[I - 1, j], muNorm[I - 1, j] ) unaryFactor[n, I - 1, :] *= rho(xDomain, xCond[I - 1, j]) unaryFactor[n, I - 1, :] *= psi(xDomain, X[n, I - 1, j - 1]) logW = np.sum(np.log(c[:, : I - 1]), axis=1) + np.log( np.sum(unaryFactor[:, I - 1, :] * msg[:, I - 2, :], axis=1) ) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) logZ += maxLogW + np.log(np.sum(w)) - np.log(N) ancestors = res.resampling(w, "stratified") # Backward sampling for n in range(N): tempDist = unaryFactor[ancestors[n], I - 1, :] * msg[ancestors[n], I - 2, :] tempDist /= np.sum(tempDist) X[n, I - 1, j] = hlp.discreteSampling(tempDist, xDomain, 1) for i in np.arange(2, I - 1)[::-1]: tempDist = unaryFactor[ancestors[n], i, :] * msg[ancestors[n], i - 1, :] tempDist /= np.sum(tempDist) X[n, i, j] = hlp.discreteSampling(tempDist * psiMat[:, X[n, i + 1, j]], xDomain, 1) ## Save init to class object self.N = N self.J = J self.I = I self.X = X self.logZ = logZ self.w = w self.xCond = xCond self.ESS = ESS
def __init__(self, t, N, xCond=None): def phi(x, y, sig2, mu_ab, mu_norm): return np.exp(-0.5 * (y - mu_ab * x.astype('float') - mu_norm * (1. - x.astype('float')))**2 / sig2) def rho(xp, x): return np.exp( C2 * (xp.astype('bool') == x.astype('bool')).astype('float')) def psi(xp, x): return np.exp(C1 * (xp == x).astype('float')) C1 = 0.5 C2 = 3. # Model init xDomain = np.arange(2) psiMat = np.array([ np.exp(C1 * (xDomain.astype('bool') == False).astype('float')), np.exp(C1 * (xDomain.astype('bool') == True).astype('float')) ]) # Load parameters #region = 'dustBowl' region = 'sahel' filename = 'parameters/' + region + 'Sigma2_N35-55_W90-120_downsampled.csv' sigma2 = np.loadtxt(filename, delimiter=',') filename = 'parameters/' + region + 'MuAb_N35-55_W90-120_downsampled.csv' muAb = np.loadtxt(filename, delimiter=',') filename = 'parameters/' + region + 'MuNorm_N35-55_W90-120_downsampled.csv' muNorm = np.loadtxt(filename, delimiter=',') filename = 'processedData/' + region + 'Yt' + str( t) + '_N35-55_W90-120_downsampled.csv' Y = np.loadtxt(filename, delimiter=',') I = Y.shape[0] J = Y.shape[1] # SMC init X = np.zeros((N, I, J), dtype=bool) ancestors = np.zeros(N) logZ = 0. logW = np.zeros(N) w = np.zeros(N) ESS = np.zeros(J) msg = np.zeros((N, I, 2)) c = np.zeros((N, I)) # --------------- # SMC # --------------- # SMC first iteration, j = 0 # Forward filtering unaryFactor = np.ones((N, I, 2)) for n in range(N): unaryFactor[n, 0, :] *= phi(xDomain, Y[0, 0], sigma2[0, 0], muAb[0, 0], muNorm[0, 0]) unaryFactor[n, 0, :] *= rho(xDomain, xCond[0, 0]) msg[n, 0, :] = np.dot(psiMat, unaryFactor[n, 0, :]) c[n, 0] = np.sum(msg[n, 0, :]) msg[n, 0, :] /= c[n, 0] for i in np.arange(1, I - 1): unaryFactor[n, i, :] *= phi(xDomain, Y[i, 0], sigma2[i, 0], muAb[i, 0], muNorm[i, 0]) unaryFactor[n, i, :] *= rho(xDomain, xCond[i, 0]) msg[n, i, :] = np.dot(psiMat, unaryFactor[n, i, :] * msg[n, i - 1, :]) c[n, i] = np.sum(msg[n, i, :]) msg[n, i, :] /= c[n, i] unaryFactor[n, I - 1, :] *= phi(xDomain, Y[I - 1, 0], sigma2[I - 1, 0], muAb[I - 1, 0], muNorm[I - 1, 0]) unaryFactor[n, I - 1, :] *= rho(xDomain, xCond[I - 1, 0]) # Backward sampling for n in range(N): tempDist = unaryFactor[n, I - 1, :] * msg[n, I - 2, :] tempDist /= np.sum(tempDist) X[n, I - 1, 0] = hlp.discreteSampling(tempDist, xDomain, 1) for i in np.arange(2, I - 1)[::-1]: tempDist = unaryFactor[n, i, :] * msg[n, i - 1, :] tempDist /= np.sum(tempDist) X[n, i, 0] = hlp.discreteSampling( tempDist * psiMat[:, X[n, i + 1, 0]], xDomain, 1) logW = np.sum(np.log(c[:, :I - 1]), axis=1) + np.log( np.sum(unaryFactor[:, I - 1, :] * msg[:, I - 2, :], axis=1)) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) logZ += maxLogW + np.log(np.sum(w)) - np.log(N) # SMC iteration j = 1 to J for j in np.arange(1, J): # Forward filtering unaryFactor = np.ones((N, I, 2)) for n in range(N): unaryFactor[n, 0, :] *= phi(xDomain, Y[0, j], sigma2[0, j], muAb[0, j], muNorm[0, j]) unaryFactor[n, 0, :] *= rho(xDomain, xCond[0, j]) unaryFactor[n, 0, :] *= psi(xDomain, X[n, 0, j - 1]) msg[n, 0, :] = np.dot(psiMat, unaryFactor[n, 0, :]) c[n, 0] = np.sum(msg[n, 0, :]) msg[n, 0, :] /= c[n, 0] for i in np.arange(1, I - 1): unaryFactor[n, i, :] *= phi(xDomain, Y[i, j], sigma2[i, j], muAb[i, j], muNorm[i, j]) unaryFactor[n, i, :] *= rho(xDomain, xCond[i, j]) unaryFactor[n, i, :] *= psi(xDomain, X[n, i, j - 1]) msg[n, i, :] = np.dot(psiMat, unaryFactor[n, i, :] * msg[n, i - 1, :]) c[n, i] = np.sum(msg[n, i, :]) msg[n, i, :] /= c[n, i] unaryFactor[n, I - 1, :] *= phi(xDomain, Y[I - 1, j], sigma2[I - 1, j], muAb[I - 1, j], muNorm[I - 1, j]) unaryFactor[n, I - 1, :] *= rho(xDomain, xCond[I - 1, j]) unaryFactor[n, I - 1, :] *= psi(xDomain, X[n, I - 1, j - 1]) logW = np.sum(np.log(c[:, :I - 1]), axis=1) + np.log( np.sum(unaryFactor[:, I - 1, :] * msg[:, I - 2, :], axis=1)) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) logZ += maxLogW + np.log(np.sum(w)) - np.log(N) ancestors = res.resampling(w, 'stratified') # Backward sampling for n in range(N): tempDist = unaryFactor[ancestors[n], I - 1, :] * msg[ancestors[n], I - 2, :] tempDist /= np.sum(tempDist) X[n, I - 1, j] = hlp.discreteSampling(tempDist, xDomain, 1) for i in np.arange(2, I - 1)[::-1]: tempDist = unaryFactor[ancestors[n], i, :] * msg[ancestors[n], i - 1, :] tempDist /= np.sum(tempDist) X[n, i, j] = hlp.discreteSampling( tempDist * psiMat[:, X[n, i + 1, j]], xDomain, 1) ## Save init to class object self.N = N self.J = J self.I = I self.X = X self.logZ = logZ self.w = w self.xCond = xCond self.ESS = ESS
def __init__(self, params, y, N, xTimeCond=None, xSpaceCond=None): C1 = 0.5 C2 = 3. # Model init xDomain = np.arange(2) def logPhi(x, y, sig2, mu_ab, mu_norm): return -0.5 * (y - mu_ab * x.astype('float') - mu_norm * (1. - x.astype('float')))**2 / sig2 I = params.I muAb = params.muAb muNorm = params.muNorm sigma2 = params.sigma2 # SMC init X = np.zeros((N, I), dtype=bool) ancestors = np.zeros(N) logZ = 0. logW = np.zeros(N) w = np.zeros(N) # --------------- # SMC # --------------- # Sample proposal tempDist = np.zeros(2) if xTimeCond is not None: tempDist += C2 * (xTimeCond[0] == xDomain.astype(bool)) if xSpaceCond is not None: tempDist += C1 * (xSpaceCond[0] == xDomain.astype(bool)) tempDist = np.exp(tempDist) tempDist /= np.sum(tempDist) X[:, 0] = hlp.discreteSampling(tempDist, xDomain, N) # Weighting logW = logPhi(X[:, 0], y[0], sigma2[0], muAb[0], muNorm[0]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) logZ = maxLogW + np.log(np.sum(w)) - np.log(N) w /= np.sum(w) ancestors = hlp.resampling(w) X[:, 0] = X[ancestors, 0] ## SMC MAIN LOOP for i in np.arange(1, I): tempDist = np.zeros(2) if xTimeCond is not None: tempDist += C2 * (xTimeCond[i] == xDomain.astype(bool)) if xSpaceCond is not None: tempDist += C1 * (xSpaceCond[i] == xDomain.astype(bool)) for iParticle in range(N): tempParticleDist = tempDist + C1 * (X[iParticle, i - 1] == xDomain.astype(bool)) tempParticleDist = np.exp(tempParticleDist) tempParticleDist /= np.sum(tempParticleDist) X[iParticle, i] = hlp.discreteSampling(tempParticleDist, xDomain, 1) logW = logPhi(X[:, i], y[i], sigma2[i], muAb[i], muNorm[i]) maxLogW = np.max(logW) w = np.exp(logW - maxLogW) logZ += maxLogW + np.log(np.sum(w)) - np.log(N) #if math.isnan(logZ): #print 'X: ',X[:,i] #print 'y: ',y[i] #print 'muAb: ',muAb[i] #print 'muNorm: ',muNorm[i] #print 'sig2: ',sigma2[i] #raw_input() w /= np.sum(w) ancestors = hlp.resampling(w) X[:, i] = X[ancestors, i] ## Save init to class object self.N = N self.C1 = C1 self.I = I self.X = X self.y = y self.logZ = logZ self.w = w
def __init__(self, params, y, N, xTimeCond=None, xSpaceCond=None): C1 = 0.5 C2 = 3. # Model init xDomain = np.arange(2) def logPhi(x,y,sig2,mu_ab,mu_norm): return -0.5*(y-mu_ab*x.astype('float')-mu_norm*(1.-x.astype('float')))**2/sig2 I = params.I muAb = params.muAb muNorm = params.muNorm sigma2 = params.sigma2 # SMC init X = np.zeros( (N, I), dtype=bool ) ancestors = np.zeros( N ) logZ = 0. logW = np.zeros( N ) w = np.zeros( N ) # --------------- # SMC # --------------- # Sample proposal tempDist = np.zeros(2) if xTimeCond is not None: tempDist += C2*(xTimeCond[0] == xDomain.astype(bool)) if xSpaceCond is not None: tempDist += C1*(xSpaceCond[0] == xDomain.astype(bool)) tempDist = np.exp(tempDist) tempDist /= np.sum(tempDist) X[:,0] = hlp.discreteSampling(tempDist, xDomain, N) # Weighting logW = logPhi(X[:,0], y[0], sigma2[0], muAb[0], muNorm[0]) maxLogW = np.max(logW) w = np.exp(logW-maxLogW) logZ = maxLogW + np.log(np.sum(w)) - np.log(N) w /= np.sum(w) ancestors = hlp.resampling(w) X[:,0] = X[ancestors,0] ## SMC MAIN LOOP for i in np.arange(1,I): tempDist = np.zeros(2) if xTimeCond is not None: tempDist += C2*(xTimeCond[i] == xDomain.astype(bool)) if xSpaceCond is not None: tempDist += C1*(xSpaceCond[i] == xDomain.astype(bool)) for iParticle in range(N): tempParticleDist = tempDist+C1*(X[iParticle,i-1] == xDomain.astype(bool)) tempParticleDist = np.exp(tempParticleDist) tempParticleDist /= np.sum(tempParticleDist) X[iParticle,i] = hlp.discreteSampling(tempParticleDist, xDomain, 1) logW = logPhi(X[:,i], y[i], sigma2[i], muAb[i], muNorm[i]) maxLogW = np.max(logW) w = np.exp(logW-maxLogW) logZ += maxLogW + np.log(np.sum(w)) - np.log(N) #if math.isnan(logZ): #print 'X: ',X[:,i] #print 'y: ',y[i] #print 'muAb: ',muAb[i] #print 'muNorm: ',muNorm[i] #print 'sig2: ',sigma2[i] #raw_input() w /= np.sum(w) ancestors = hlp.resampling(w) X[:,i] = X[ancestors,i] ## Save init to class object self.N = N self.C1 = C1 self.I = I self.X = X self.y = y self.logZ = logZ self.w = w
for col in range(d): for row in range(d): # Propagate wInd = np.zeros(5) wInd[0] = 1. if row>0: wInd[2] = 0.5 if row<d-1: wInd[4] = 0.5 if col>0: wInd[1] = 0.5 if col<d-1: wInd[3] = 0.5 wInd /= np.sum(wInd) for j in range(M): ind = hlp.discreteSampling(wInd, range(5), 1) xMean = np.zeros(M) if ind == 0: xMean[j] = Xprev[i,j,row,col] elif ind == 1: xMean[j] = Xprev[i,j,row,col-1] elif ind == 2: xMean[j] = Xprev[i,j,row-1,col] elif ind == 3: xMean[j] = Xprev[i,j,row,col+1] elif ind == 4: xMean[j] = Xprev[i,j,row+1,col] Xcur[i,:,row,col] = xMean + np.random.normal(size=M) # Weight logW = logPhi(Xcur[i,:,row,col],y[t,row,col])