def mcmcMixAR1_vo(burn, NMC, y, x, zT, nStates=2, nWins=1, r=40, n=400, model="binomial"): """ r = 40 Negative binomial. Approaches Poisson for r -> inf n = 400 Binomial """ ############################################## ############## Storage for sampled parameters ############################################## N = len(y) - 1 smpld_params = _N.empty((NMC + burn, 4 + 2*nStates)) #m1, m2, u1, u2 z = _N.empty((NMC+burn, N+1, nStates), dtype=_N.int) # augmented data mnCt_w1= _N.mean(y[:, 0]) mnCt_w2= _N.mean(y[:, 1]) # INITIAL samples if model=="negative binomial": kp_w1 = (y[:, 0] - r[0]) *0.5 kp_w2 = (y[:, 1] - r[1]) *0.5 p0_w1 = mnCt_w1 / (mnCt_w1 + r[0]) # matches 1 - p of genearted p0_w2 = mnCt_w2 / (mnCt_w2 + r[0]) # matches 1 - p of genearted rn = r # length nWins else: kp_w1 = y[:, 0] - n[0]*0.5 kp_w2 = y[:, 1] - n[1]*0.5 p0_w1 = mnCt_w1 / float(n[0]) # matches 1 - p of genearted p0_w2 = mnCt_w2 / float(n[1]) # matches 1 - p of genearted rn = n # length nWins u0_w1 = _N.log(p0_w1 / (1 - p0_w1)) # -1*u generated u0_w2 = _N.log(p0_w2 / (1 - p0_w2)) # -1*u generated ####### PRIOR parameters # F0 -- flat prior #a_F0 = -1 # I think a prior assumption of relatively narrow and high F0 range # is warranted. Small F0 is close to white noise, and as such can # be confused with the independent trial-to-trial count noise.Force # it to search for longer timescale correlations by setting F0 to be # fairly large. a_F0 = -0.1 # prior assumption: slow fluctuation b_F0 = 1 # u -- Gaussian prior u_u = _N.empty(nStates + nWins) s2_u = _N.zeros((nStates + nWins, nStates + nWins)) # (win1 s1) (win1 s2) (win2 s1) (win2 s2) u_u[:] = (u0_w1*1.2, u0_w1*0.8, u0_w2*1.2, u0_w2*0.8) _N.fill_diagonal(s2_u, [0.5, 0.5, 0.5, 0.5]) # q2 -- Inverse Gamma prior pr_mn_q2 = 0.05 a_q2 = 2 B_q2 = (a_q2 + 1) * pr_mn_q2 # x0 -- Gaussian prior u_x00 = 0 s2_x00 = 0.5 # V00 -- Inverse Gamma prior pr_mn_V00 = 1 # mode of prior distribution of variance ~ 1 a_V00 = 2 B_V00 = (a_V00 + 1)*pr_mn_V00 # m1, m2 -- Dirichlet prior alp = _N.ones(nStates) # #generate initial values of parameters #generate initial time series _d = _kfardat.KFARGauObsDat(N, 1) _d.copyData(y[:, 0], y[:, 0]) # dummy data copied u_w1 = _N.array([-2.5866893440979424, -1.0986122886681098]) u_w2 = _N.array([-2.5866893440979424, -1.0986122886681098]) # u_w1 = _N.random.multivariate_normal(u_u[0:nStates], s2_u[0:nStates, 0:nStates]) # u_w2 = _N.random.multivariate_normal(u_u[nStates:2*nStates], s2_u[nStates:nStates*2, nStates:nStates*2]) F0 = ((b_F0) - (a_F0)) * _N.random.rand() + a_F0 F0 = 0.92 q2 = B_q2*_ss.invgamma.rvs(a_q2) q2 = 0.015 x00 = u_x00 + _N.sqrt(s2_x00)*_N.random.rand() V00 = B_V00*_ss.invgamma.rvs(a_V00) # m = _N.random.dirichlet(alp) m = _N.zeros(nStates) m[0] = _N.sum(1-zT) / float(N+1) m[1] = 1 - m[0] smp_F = _N.zeros(NMC + burn) smp_q2 = _N.zeros(NMC + burn) smp_u = _N.zeros((NMC + burn, nWins, nStates)) # uL_w1, uH_w1, uL_w2, uH_w2, .... smp_m = _N.zeros((NMC + burn, nStates)) smpx = _N.zeros(N + 1) # start at 0 + u smpx[:] = x[:] # start at 0 + u Bsmpx= _N.zeros((NMC, N + 1)) ws_w1 = lw.rpg_devroye(rn[0], smpx + u0_w1, num=(N + 1)) ws_w2 = lw.rpg_devroye(rn[1], smpx + u0_w2, num=(N + 1)) trm_w1 = _N.empty(nStates) trm_w2 = _N.empty(nStates) for it in xrange(1, NMC+burn): if (it % 50) == 0: print it # generate latent zs. Depends on Xs and PG latents kw_w1 = kp_w1 / ws_w1 kw_w2 = kp_w2 / ws_w2 rnds =_N.random.rand(N+1) z[it, :, 0] = 1-zT z[it, :, 1] = zT # generate PG latents. Depends on Xs and us, zs. us1 us2 us_w1 = _N.dot(z[it, :, :], u_w1) # either low or high uf us_w2 = _N.dot(z[it, :, :], u_w2) # us_w1 is like --> [u1 u2 u2 u2 u1 u1 u1 ...] ws_w1 = lw.rpg_devroye(rn[0], smpx + us_w1, num=(N + 1)) ws_w2 = lw.rpg_devroye(rn[1], smpx + us_w2, num=(N + 1)) _d.copyParams(_N.array([F0]), q2, _N.array([1]), 1) # generate latent AR state _d.f_x[0, 0, 0] = x00 _d.f_V[0, 0, 0] = V00 btm = 1 / ws_w1 + 1 / ws_w2 # shape N x 1 top = (kw_w1 - us_w1) / ws_w2 + (kw_w2 - us_w2) / ws_w1 _d.y[:] = top/btm _d.Rv[:] =1 / (ws_w1 + ws_w2) # time dependent noise smpx = _kfar.armdl_FFBS_1itr(_d, samples=1) # p3 -- samp u here dirArgs = _N.empty(nStates) for i in xrange(nStates): dirArgs[i] = alp[i] + _N.sum(z[it, :, i]) m[:] = _N.random.dirichlet(dirArgs) # # sample u for st in xrange(nStates): # win1 for this state iw = st + 0 * nStates A = 0.5*(1/s2_u[iw,iw] + _N.dot(ws_w1, z[it, :, st])) B = u_u[iw]/s2_u[iw,iw] + _N.dot(kp_w1 - ws_w1*smpx, z[it, :, st]) u_w1[st] = B/(2*A) + _N.sqrt(1/(2*A))*_N.random.randn() # print "mean u_w1[%(st)d] = %(u).3f" % {"st" : st, "u" : u_w1[st]} # win2 for this state iw = st + 1 * nStates A = 0.5*(1/s2_u[iw,iw] + _N.dot(ws_w2, z[it, :, st])) B = u_u[iw]/s2_u[iw,iw] + _N.dot(kp_w2 - ws_w2*smpx, z[it, :, st]) u_w2[st] = B/(2*A) + _N.sqrt(1/(2*A))*_N.random.randn() # print "mean u_w2[%(st)d] = %(u).3f" % {"st" : st, "u" : u_w2[st]} # sample F0 F0AA = _N.dot(smpx[0:-1], smpx[0:-1]) F0BB = _N.dot(smpx[0:-1], smpx[1:]) F0std= _N.sqrt(q2/F0AA) F0a, F0b = (a_F0 - F0BB/F0AA) / F0std, (b_F0 - F0BB/F0AA) / F0std F0=F0BB/F0AA+F0std*_ss.truncnorm.rvs(F0a, F0b) # print "%(F0).4f %(q2).4f" % {"F0" : F0, "q2" : q2} ##################### sample q2 a = a_q2 + 0.5*(N+1) # N + 1 - 1 rsd_stp = smpx[1:] - F0*smpx[0:-1] BB = B_q2 + 0.5 * _N.dot(rsd_stp, rsd_stp) q2 = _ss.invgamma.rvs(a, scale=BB) # print q2 # ##################### sample x00 mn = (u_x00*V00 + s2_x00*x00) / (V00 + s2_x00) vr = (V00*s2_x00) / (V00 + s2_x00) # x00 = mn + _N.sqrt(vr)*_N.random.randn() ##################### sample V00 aa = a_V00 + 0.5 BB = B_V00 + 0.5*(smpx[0] - x00)*(smpx[0] - x00) # V00 = _ss.invgamma.rvs(aa, scale=BB) smp_F[it] = F0 smp_q2[it] = q2 smp_u[it, 0, :] = u_w1 smp_u[it, 1, :] = u_w2 smp_m[it, :] = m if it >= burn: Bsmpx[it-burn, :] = smpx return Bsmpx, smp_F, smp_q2, smp_u, smp_m, z, _d
def mcmcMixAR1(burn, NMC, y, x, zT, nStates=2, r=40, n=400, model="binomial", set=10): """ r = 40 Negative binomial. Approaches Poisson for r -> inf n = 400 Binomial """ ############################################## ############## Storage for sampled parameters ############################################## N = len(y) - 1 nStates = 2 smpld_params = _N.empty((NMC + burn, 4 + 2 * nStates)) # m1, m2, u1, u2 z = _N.empty((NMC + burn, N + 1, nStates)) # augmented data mnCt = _N.mean(y) # INITIAL samples if model == "negative binomial": kp = (y - r) * 0.5 p0 = mnCt / (mnCt + r) # matches 1 - p of genearted rn = r else: kp = y - n * 0.5 p0 = mnCt / float(n) # matches 1 - p of genearted rn = n u0 = _N.log(p0 / (1 - p0)) # -1*u generated ####### PRIOR parameters # F0 -- flat prior # a_F0 = -1 # I think a prior assumption of relatively narrow and high F0 range # is warranted. Small F0 is close to white noise, and as such can # be confused with the independent trial-to-trial count noise.Force # it to search for longer timescale correlations by setting F0 to be # fairly large. a_F0 = -0.1 # prior assumption: slow fluctuation b_F0 = 1 # u -- Gaussian prior u_u = _N.empty(nStates) s2_u = _N.zeros((nStates, nStates)) u_u[:] = (u0 * 1.2, u0 * 0.8) _N.fill_diagonal(s2_u, [0.5, 0.5]) # q2 -- Inverse Gamma prior # pr_mn_q2 = 0.05 pr_mn_q2 = 0.05 a_q2 = 2 B_q2 = (a_q2 + 1) * pr_mn_q2 # x0 -- Gaussian prior u_x00 = 0 s2_x00 = 0.5 # V00 -- Inverse Gamma prior pr_mn_V00 = 1 # mode of prior distribution of variance ~ 1 a_V00 = 2 B_V00 = (a_V00 + 1) * pr_mn_V00 # m1, m2 -- Dirichlet prior alp = _N.ones(nStates) # #generate initial values of parameters # generate initial time series _d = _kfardat.KFARGauObsDat(N, 1) # _d.copyData(y, x_st_cnts[:, 0]) _d.copyData(y, y) # dummy data copied u = _N.array([-2.1972245773362191, -0.40546510810816427]) # if set==13: # u = _N.array([-1.3862943611198906, 0]) # if set==11: # u = _N.array([-2.5866893440979424, -1.5163474893680886]) # if set==10: # u = _N.array([-2.5866893440979424, -1.0986122886681098]) F0 = 0.92 q2 = 0.025 x00 = u_x00 + _N.sqrt(s2_x00) * _N.random.rand() V00 = B_V00 * _ss.invgamma.rvs(a_V00) m = _N.random.dirichlet(alp) smpld_params[0, :] = (F0, q2, x00, V00) for i in xrange(nStates): smpld_params[0, i + 4] = m[i] smpld_params[0, i + 6] = u[i] smpx = x Bsmpx = _N.zeros((NMC, N + 1)) trm = _N.empty(nStates) ws = lw.rpg_devroye(rn, smpx + u0, num=(N + 1)) for it in xrange(1, NMC + burn): if (it % 50) == 0: print it # generate latent zs. Depends on Xs and PG latents kw = kp / ws rnds = _N.random.rand(N + 1) z[it, :, 0] = 1 - zT z[it, :, 1] = zT # generate PG latents. Depends on Xs and us, zs us = _N.dot(z[it, :, :], u) ws = lw.rpg_devroye(rn, smpx + us, num=(N + 1)) _d.copyParams(_N.array([F0]), q2, _N.array([1]), 1) print "mean ws %.3f" % _N.mean(ws) print "mean kp %.3f" % _N.mean(kp) # generate latent AR state _d.f_x[0, 0, 0] = x00 _d.f_V[0, 0, 0] = V00 _d.y[:] = kp / ws - us _d.Rv[:] = 1 / ws # time dependent noise print _N.mean(kp / ws - us) print _N.mean(us) return _d
def gibbsSamp(self): ########################### GIBBSSAMPH oo = self ooTR = oo.TR ook = oo.k ooNMC = oo.NMC ooN = oo.N oo.x00 = _N.array(oo.smpx[:, 2]) oo.V00 = _N.zeros((ooTR, ook, ook)) kpOws = _N.empty((ooTR, ooN + 1)) lv_u = _N.zeros((ooN + 1, ooN + 1)) psthOffset = _N.empty((ooTR, ooN + 1)) kpOws = _N.empty((ooTR, ooN + 1)) Wims = _N.empty((ooTR, ooN + 1, ooN + 1)) smWimOm = _N.zeros(ooN + 1) iD = _N.diag(_N.ones(oo.B.shape[0]) * 10) D = _N.linalg.inv(iD) BDB = _N.dot(oo.B.T, _N.dot(D, oo.B)) it = 0 oo.lrn = _N.empty((ooTR, ooN + 1)) if oo.l2 is None: oo.lrn[:] = 1 else: for tr in xrange(ooTR): oo.lrn[tr] = oo.build_lrnLambda2(tr) while (it < ooNMC + oo.burn - 1): t1 = _tm.time() it += 1 print it if (it % 10) == 0: print it # generate latent AR state BaS = _N.dot(oo.B.T, oo.aS) for m in xrange(ooTR): psthOffset[m] = BaS ### PG latent variable sample #tPG1 = _tm.time() for m in xrange(ooTR): lw.rpg_devroye(oo.rn, psthOffset[m], out=oo.ws[m]) ###devryoe if ooTR == 1: oo.ws = oo.ws.reshape(1, ooN + 1) _N.divide(oo.kp, oo.ws, out=kpOws) t1 = _tm.time() #for m in xrange(ooTR): # Wims[m] = _N.diag(oo.ws[m]) _N.einsum("tj,tj->j", oo.ws, kpOws, out=smWimOm) # avg over trials t2 = _tm.time() #ilv_u = _N.sum(Wims, axis=0) # ilv_u is diagonal ilv_u = _N.diag(_N.sum(oo.ws, axis=0)) # diag(_N.linalg.inv(Bi)) == diag(1./Bi). Bii = inv(Bi) _N.fill_diagonal(lv_u, 1. / _N.diagonal(ilv_u)) lm_u = _N.dot(lv_u, smWimOm) # nondiag of 1./Bi are inf t3 = _tm.time() iVAR = _N.dot(oo.B, _N.dot(ilv_u, oo.B.T)) + iD VAR = _N.linalg.inv(iVAR) # knots x knots iBDBW = _N.linalg.inv(BDB + lv_u) M = oo.u_a + _N.dot( D, _N.dot(oo.B, _N.dot(iBDBW, lm_u - _N.dot(oo.B.T, oo.u_a)))) t4 = _tm.time() print(t2 - t1) print(t3 - t2) print(t4 - t3) #M = _N.dot(_N.linalg.inv(_N.dot(oo.B, oo.B.T)), _N.dot(oo.B, O)) # multivar_normal returns a row vector oo.aS = _N.random.multivariate_normal(M, VAR, size=1)[0, :] oo.smp_aS[it, :] = oo.aS
def initGibbs(self): ################################ INITGIBBS oo = self if oo.bpsth: oo.B = patsy.bs(_N.linspace(0, (oo.t1 - oo.t0) * oo.dt, (oo.t1 - oo.t0)), df=oo.dfPSTH, knots=oo.kntsPSTH, include_intercept=True) # spline basis if oo.dfPSTH is None: oo.dfPSTH = oo.B.shape[1] oo.B = oo.B.T # My convention for beta oo.aS = _N.linalg.solve( _N.dot(oo.B, oo.B.T), _N.dot(oo.B, _N.ones(oo.t1 - oo.t0) * _N.mean(oo.u))) # #generate initial values of parameters oo._d = _kfardat.KFARGauObsDat(oo.TR, oo.N, oo.k) oo._d.copyData(oo.y) sPR = "cmpref" if oo.use_prior == _cd.__FREQ_REF__: sPR = "frqref" elif oo.use_prior == _cd.__ONOF_REF__: sPR = "onfref" sAO = "sf" if (oo.ARord == _cd.__SF__) else "nf" ts = "[%(1)d-%(2)d]" % {"1": oo.t0, "2": oo.t1} baseFN = "rs=%(rs)d" % {"pr": sPR, "rs": oo.restarts} setdir = "%(sd)s/AR%(k)d_%(ts)s_%(pr)s_%(ao)s" % { "sd": oo.setname, "k": oo.k, "ts": ts, "pr": sPR, "ao": sAO } # baseFN_inter baseFN_comps baseFN_comps ############### oo.Bsmpx = _N.zeros((oo.TR, oo.NMC + oo.burn, (oo.N + 1) + 2)) oo.smp_u = _N.zeros((oo.TR, oo.burn + oo.NMC)) oo.smp_q2 = _N.zeros((oo.TR, oo.burn + oo.NMC)) oo.smp_x00 = _N.empty((oo.TR, oo.burn + oo.NMC - 1, oo.k)) # store samples of oo.allalfas = _N.empty((oo.burn + oo.NMC, oo.k), dtype=_N.complex) oo.uts = _N.empty((oo.TR, oo.burn + oo.NMC, oo.R, oo.N + 2)) oo.wts = _N.empty((oo.TR, oo.burn + oo.NMC, oo.C, oo.N + 3)) oo.ranks = _N.empty((oo.burn + oo.NMC, oo.C), dtype=_N.int) oo.pgs = _N.empty((oo.TR, oo.burn + oo.NMC, oo.N + 1)) oo.fs = _N.empty((oo.burn + oo.NMC, oo.C)) oo.amps = _N.empty((oo.burn + oo.NMC, oo.C)) if oo.bpsth: oo.smp_aS = _N.zeros((oo.burn + oo.NMC, oo.dfPSTH)) radians = buildLims(oo.Cn, oo.freq_lims, nzLimL=1.) oo.AR2lims = 2 * _N.cos(radians) if (oo.rs < 0): oo.smpx = _N.zeros( (oo.TR, (oo.N + 1) + 2, oo.k)) # start at 0 + u oo.ws = _N.empty((oo.TR, oo._d.N + 1), dtype=_N.float) oo.F_alfa_rep = initF(oo.R, oo.Cs, oo.Cn, ifs=oo.ifs).tolist() # init F_alfa_rep print "begin---" print ampAngRep(oo.F_alfa_rep) print "begin^^^" q20 = 1e-3 oo.q2 = _N.ones(oo.TR) * q20 oo.F0 = (-1 * _Npp.polyfromroots(oo.F_alfa_rep)[::-1].real)[1:] ######## Limit the amplitude to something reasonable xE, nul = createDataAR(oo.N, oo.F0, q20, 0.1) mlt = _N.std(xE) / 0.5 # we want amplitude around 0.5 oo.q2 /= mlt * mlt xE, nul = createDataAR(oo.N, oo.F0, oo.q2[0], 0.1) if oo.model == "Bernoulli": oo.initBernoulli() #smpx[0, 2:, 0] = x[0] ########## DEBUG #### initialize ws if starting for first time if oo.TR == 1: oo.ws = oo.ws.reshape(1, oo._d.N + 1) for m in xrange(oo._d.TR): lw.rpg_devroye(oo.rn, oo.smpx[m, 2:, 0] + oo.u[m], num=(oo.N + 1), out=oo.ws[m, :]) oo.smp_u[:, 0] = oo.u oo.smp_q2[:, 0] = oo.q2 if oo.bpsth: oo.u_a = _N.ones(oo.dfPSTH) * _N.mean(oo.u)
def mcmcMixAR1_vo(burn, NMC, y, x, zT, nStates=2, nWins=1, r=40, n=400, model="binomial", set=10): """ r = 40 Negative binomial. Approaches Poisson for r -> inf n = 400 Binomial """ ############################################## ############## Storage for sampled parameters ############################################## N = len(y) - 1 smpld_params = _N.empty((NMC + burn, 4 + 2*nStates)) #m1, m2, u1, u2 z = _N.empty((NMC+burn, N+1, nStates), dtype=_N.int) # augmented data mnCt_w1= _N.mean(y[:, 0]) mnCt_w2= _N.mean(y[:, 1]) # INITIAL samples if model=="negative binomial": kp_w1 = (y[:, 0] - r[0]) *0.5 kp_w2 = (y[:, 1] - r[1]) *0.5 p0_w1 = mnCt_w1 / (mnCt_w1 + r[0]) # matches 1 - p of genearted p0_w2 = mnCt_w2 / (mnCt_w2 + r[0]) # matches 1 - p of genearted rn = r # length nWins else: kp_w1 = y[:, 0] - n[0]*0.5 kp_w2 = y[:, 1] - n[1]*0.5 p0_w1 = mnCt_w1 / float(n[0]) # matches 1 - p of genearted p0_w2 = mnCt_w2 / float(n[1]) # matches 1 - p of genearted rn = n # length nWins u0_w1 = _N.log(p0_w1 / (1 - p0_w1)) # -1*u generated u0_w2 = _N.log(p0_w2 / (1 - p0_w2)) # -1*u generated ####### PRIOR parameters # F0 -- flat prior #a_F0 = -1 # I think a prior assumption of relatively narrow and high F0 range # is warranted. Small F0 is close to white noise, and as such can # be confused with the independent trial-to-trial count noise.Force # it to search for longer timescale correlations by setting F0 to be # fairly large. a_F0 = -0.1 # prior assumption: slow fluctuation b_F0 = 1 # u -- Gaussian prior u_u = _N.empty(nStates + nWins) s2_u = _N.zeros((nStates + nWins, nStates + nWins)) # (win1 s1) (win1 s2) (win2 s1) (win2 s2) u_u[:] = (u0_w1*1.2, u0_w1*0.8, u0_w2*1.2, u0_w2*0.8) _N.fill_diagonal(s2_u, [0.5, 0.5, 0.5, 0.5]) # q2 -- Inverse Gamma prior pr_mn_q2 = 0.05 a_q2 = 2 B_q2 = (a_q2 + 1) * pr_mn_q2 # x0 -- Gaussian prior u_x00 = 0 s2_x00 = 0.5 # V00 -- Inverse Gamma prior pr_mn_V00 = 1 # mode of prior distribution of variance ~ 1 a_V00 = 2 B_V00 = (a_V00 + 1)*pr_mn_V00 # m1, m2 -- Dirichlet prior alp = _N.ones(nStates) # #generate initial values of parameters #generate initial time series _d = _kfardat.KFARGauObsDat(N, 1) _d.copyData(y[:, 0], y[:, 0]) # dummy data copied u_w1 = _N.array([-2.1972245773362191, -0.40546510810816427]) u_w2 = _N.array([-2.1972245773362191, -0.40546510810816427]) # if set==13: # # set 13 # u_w1 = _N.array([-1.3862943611198906, 0]) # u_w2 = _N.array([-1.3862943611198906, 0]) # if set==11: # # set 11 # u_w1 = _N.array([-2.5866893440979424, -1.5163474893680886]) # u_w2 = _N.array([-2.5866893440979424, -1.5163474893680886]) # if set==10: # # set 10 # u_w1 = _N.array([-2.5866893440979424, -1.0986122886681098]) # u_w2 = _N.array([-2.5866893440979424, -1.0986122886681098]) F0 = 0.92 q2 = 0.025 x00 = u_x00 + _N.sqrt(s2_x00)*_N.random.rand() V00 = B_V00*_ss.invgamma.rvs(a_V00) m = _N.random.dirichlet(alp) smp_F = _N.zeros(NMC + burn) smp_q2 = _N.zeros(NMC + burn) smp_u = _N.zeros((NMC + burn, nWins, nStates)) # uL_w1, uH_w1, uL_w2, uH_w2, .... smp_m = _N.zeros((NMC + burn, nStates)) smpx = x Bsmpx= _N.zeros((NMC, N + 1)) ws_w1 = lw.rpg_devroye(rn[0], smpx + u0_w1, num=(N + 1)) ws_w2 = lw.rpg_devroye(rn[1], smpx + u0_w2, num=(N + 1)) trm_w1 = _N.empty(nStates) trm_w2 = _N.empty(nStates) for it in xrange(1, NMC+burn): if (it % 50) == 0: print it kw_w1 = kp_w1 / ws_w1 kw_w2 = kp_w2 / ws_w2 # generate latent zs. Depends on Xs and PG latents rnds =_N.random.rand(N+1) z[it, :, 0] = 1-zT z[it, :, 1] = zT # generate PG latents. Depends on Xs and us, zs. us1 us2 us_w1 = _N.dot(z[it, :, :], u_w1) # either low or high u us_w2 = _N.dot(z[it, :, :], u_w2) ws_w1 = lw.rpg_devroye(rn[0], smpx + us_w1, num=(N + 1)) ws_w2 = lw.rpg_devroye(rn[1], smpx + us_w2, num=(N + 1)) print "mean ws_w1 %.3f" % _N.mean(ws_w1) print "mean ws_w2 %.3f" % _N.mean(ws_w2) print "mean kp_w1" print _N.mean(kp_w1) print _N.mean(kp_w2) _d.copyParams(_N.array([F0]), q2, _N.array([1]), 1) # generate latent AR state _d.f_x[0, 0, 0] = x00 _d.f_V[0, 0, 0] = V00 btm = 1 / ws_w1 + 1 / ws_w2 # size N top = (kw_w1 - us_w1) / ws_w2 + (kw_w2 - us_w2) / ws_w1 _d.y[:] = top/btm _d.Rv[:] =1 / (ws_w1 + ws_w2) # time dependent noise print _N.mean(kp_w1/ws_w1 - us_w1) print _N.mean(kp_w2/ws_w2 - us_w2) # print _N.mean(kw_w2 - us_w2) print _N.mean(us_w1) print _N.mean(us_w2) return _d
def mcmcMixAR1(burn, NMC, y, nStates=2, r=40, n=400, model="binomial"): """ r = 40 Negative binomial. Approaches Poisson for r -> inf n = 400 Binomial """ ############################################## ############## Storage for sampled parameters ############################################## N = len(y) - 1 nStates = 2 smpld_params = _N.empty((NMC + burn, 4 + 2*nStates)) #m1, m2, u1, u2 z = _N.empty((NMC+burn, N+1, nStates)) # augmented data mnCt= _N.mean(y) # INITIAL samples if model=="negative binomial": kp = (y - r) *0.5 p0 = mnCt / (mnCt + r) # matches 1 - p of genearted rn = r else: kp = y - n*0.5 p0 = mnCt / float(n) # matches 1 - p of genearted rn = n u0 = _N.log(p0 / (1 - p0)) # -1*u generated ####### PRIOR parameters # F0 -- flat prior #a_F0 = -1 # I think a prior assumption of relatively narrow and high F0 range # is warranted. Small F0 is close to white noise, and as such can # be confused with the independent trial-to-trial count noise.Force # it to search for longer timescale correlations by setting F0 to be # fairly large. a_F0 = -0.1 # prior assumption: slow fluctuation b_F0 = 1 # u -- Gaussian prior u_u = _N.empty(nStates) s2_u = _N.zeros((nStates, nStates)) u_u[:] = (u0*1.2, u0*0.8) _N.fill_diagonal(s2_u, [0.5, 0.5]) # q2 -- Inverse Gamma prior # pr_mn_q2 = 0.05 pr_mn_q2 = 0.05 a_q2 = 2 B_q2 = (a_q2 + 1) * pr_mn_q2 # x0 -- Gaussian prior u_x00 = 0 s2_x00 = 0.5 # V00 -- Inverse Gamma prior pr_mn_V00 = 1 # mode of prior distribution of variance ~ 1 a_V00 = 2 B_V00 = (a_V00 + 1)*pr_mn_V00 # m1, m2 -- Dirichlet prior alp = _N.ones(nStates) # #generate initial values of parameters #generate initial time series _d = _kfardat.KFARGauObsDat(N, 1) # _d.copyData(y, x_st_cnts[:, 0]) _d.copyData(y, y) # dummy data copied u = _N.random.multivariate_normal(u_u, s2_u) F0 = ((b_F0) - (a_F0)) * _N.random.rand() + a_F0 q2 = B_q2*_ss.invgamma.rvs(a_q2) x00 = u_x00 + _N.sqrt(s2_x00)*_N.random.rand() V00 = B_V00*_ss.invgamma.rvs(a_V00) m = _N.random.dirichlet(alp) smpld_params[0, :] = (F0, q2, x00, V00) for i in xrange(nStates): smpld_params[0,i + 4] = m[i] smpld_params[0,i + 6] = u[i] smpx = _N.zeros(N + 1) # start at 0 + u Bsmpx= _N.zeros((NMC, N + 1)) trm = _N.empty(nStates) ws = lw.rpg_devroye(rn, smpx + u0, num=(N + 1)) for it in xrange(1, NMC+burn): if (it % 50) == 0: print it # generate latent zs. Depends on Xs and PG latents kw = kp / ws rnds =_N.random.rand(N+1) for n in xrange(N+1): for i in xrange(nStates): rsd = ((u[i] + smpx[n]) - kw[n]) # residual trm[i] = m[i] * _N.exp(-0.5*ws[n]*rsd*rsd) # rsd = ((u + smpx[n]) - kw[n]) # residual # u is a vector # trm = m * _N.exp(-0.5*ws[n]*(_N.dot(rsd, rsd) - kw[n]*kw[n])) z[it, n, :] = (0, 1) # was failing when this method called repeatedly because # n was being reset by wrong value from 2nd time try: if rnds[n] < (trm[0] / _N.sum(trm)): z[it, n, :] = (1, 0) except Warning: print "^^^^^^^^^^^ small" # m0 e-{-0.5*a*x*x} / (m0 e-{-0.5*a*x*x} + m1 e-{-0.5*b*y*y}) # 1 / (1 + (m1/m0)*e-{-0.5*b*y*y + 0.5*a*x*x}) rsd0 = (u[0] + smpx[n]) - kw[n] rsd1 = (u[1] + smpx[n]) - kw[n] thr = 1 / (1 + (m[1]/m[0])*_N.exp(0.5*ws[n]*(rsd0*rsd0 - rsd1*rsd1))) print thr if rnds[n] < thr: z[it, n, :] = (1, 0) # generate PG latents. Depends on Xs and us, zs us = _N.dot(z[it, :, :], u) ws = lw.rpg_devroye(rn, smpx + us, num=(N + 1)) _d.copyParams(_N.array([F0]), q2, _N.array([1]), 1) # generate latent AR state _d.f_x[0, 0, 0] = x00 _d.f_V[0, 0, 0] = V00 _d.y[:] = kp/ws - us _d.Rv[:] =1 / ws # time dependent noise smpx = _kfar.armdl_FFBS_1itr(_d, samples=1) # p3 -- samp u here dirArgs = _N.empty(nStates) for i in xrange(nStates): dirArgs[i] = alp[i] + _N.sum(z[it, :, i]) m[:] = _N.random.dirichlet(dirArgs) # # sample u for i in xrange(nStates): A = 0.5*(1/s2_u[i,i] + _N.dot(ws, z[it, :, i])) B = u_u[i]/s2_u[i,i] + _N.dot(kp - ws*smpx, z[it, :, i]) u[i] = B/(2*A) + _N.sqrt(1/(2*A))*_N.random.randn() # sample F0 F0AA = _N.dot(smpx[0:-1], smpx[0:-1]) F0BB = _N.dot(smpx[0:-1], smpx[1:]) F0std= _N.sqrt(q2/F0AA) F0a, F0b = (a_F0 - F0BB/F0AA) / F0std, (b_F0 - F0BB/F0AA) / F0std F0=F0BB/F0AA+F0std*_ss.truncnorm.rvs(F0a, F0b) ##################### sample q2 a = a_q2 + 0.5*(N+1) # N + 1 - 1 rsd_stp = smpx[1:] - F0*smpx[0:-1] BB = B_q2 + 0.5 * _N.dot(rsd_stp, rsd_stp) # print BB / (a-1) q2 = _ss.invgamma.rvs(a, scale=BB) ##################### sample x00 mn = (u_x00*V00 + s2_x00*x00) / (V00 + s2_x00) vr = (V00*s2_x00) / (V00 + s2_x00) x00 = mn + _N.sqrt(vr)*_N.random.randn() ##################### sample V00 aa = a_V00 + 0.5 BB = B_V00 + 0.5*(smpx[0] - x00)*(smpx[0] - x00) V00 = _ss.invgamma.rvs(aa, scale=BB) smpld_params[it, 0:4] = (F0, q2, x00, V00) for i in xrange(nStates): smpld_params[it,i + 4] = m[i] smpld_params[it,i + 6] = u[i] if it >= burn: Bsmpx[it-burn, :] = smpx return Bsmpx, smpld_params, z