def __init__(self, t, y, dy, p=2, q=1): self._t = t.copy() self._y = y.copy() self._dy = dy.copy() self._p = p self._q = q self._tv = cm.vecD() self._tv.extend(t) self._yv = cm.vecD() self._yv.extend(y) self._dyv = cm.vecD() self._dyv.extend(dy) self._carma_process = cm.run_mcmc_carma(1, 1, self._tv, self._yv, self._dyv, self.p, self.q, 1, False, 1) self._carma_process.SetMLE(True) self._T = self.t[-1] - self.t[0] self._dt_min = np.min(np.diff(self.t)) self._mean_variance()
def doit(args): pModel = int(args[0]) x, y, dy = args[1] nSample = 10000 nBurnin = 1000 nThin = 1 nWalkers = 10 # Should not have to do this... xv = carmcmc.vecD() xv.extend(x) yv = carmcmc.vecD() yv.extend(y) dyv = carmcmc.vecD() dyv.extend(dy) if pModel == 1: sampler = carmcmc.run_mcmc_car1(nSample, nBurnin, xv, yv, dyv, nWalkers, nThin) samplep = carmcmc.CarSample1(x, y, dy, sampler) else: sampler = carmcmc.run_mcmc_carma(nSample, nBurnin, xv, yv, dyv, pModel, 0, nWalkers, False, nThin) samplep = carmcmc.CarmaSample(x, y, dy, sampler) dic = samplep.DIC() print "DIC", pModel, dic return samplep
def setUp(self): self.nSample = 100 self.nBurnin = 10 self.nThin = 1 self.nWalkers = 2 self.xdata = carmcmc.vecD() self.xdata.extend(1.0 * np.arange(10)) self.ydata = carmcmc.vecD() self.ydata.extend(1.0 * np.random.random(10)) self.dydata = carmcmc.vecD() self.dydata.extend(0.1 * np.random.random(10))
def testKalmanp(self): pModel = 4 qModel = 0 sampler = carmcmc.run_mcmc_carma(self.nSample, self.nBurnin, self.xdata, self.ydata, self.dydata, pModel, qModel, self.nWalkers, False, self.nThin) psampler = carmcmc.CarmaSample(np.array(self.xdata), np.array(self.ydata), np.array(self.dydata), sampler) sigsqr = (psampler._samples["sigma"][0]**2)[0] ma_coefs = carmcmc.vecD() ma_coefs0 = psampler._samples["ma_coefs"][0] if len(ma_coefs0) != pModel: ma_coefs0 = np.append(ma_coefs0, np.zeros(pModel - qModel - 1)) ma_coefs.extend(ma_coefs0) omega = carmcmc.vecC() for i in range(psampler.p): omega.append(psampler._samples["ar_roots"][0][i]) #import pdb; pdb.set_trace() kfilter = carmcmc.KalmanFilterp(self.xdata, self.ydata, self.dydata, sigsqr, omega, ma_coefs) kfilter.Filter() pred0 = kfilter.Predict(self.xdata[0]) # evaluate at data point val0 = pred0.first var0 = pred0.second predN = kfilter.Predict(self.xdata[-1] + 1) # extrapolate valN = predN.first varN = predN.second self.assertTrue(varN > var0)
def testKalmanp(self): pModel = 4 qModel = 0 sampler = carmcmc.run_mcmc_carma(self.nSample, self.nBurnin, self.xdata, self.ydata, self.dydata, pModel, qModel, self.nWalkers, False, self.nThin) psampler = carmcmc.CarmaSample(np.array(self.xdata), np.array(self.ydata), np.array(self.dydata), sampler) sigsqr = (psampler._samples["sigma"][0]**2)[0] ma_coefs = carmcmc.vecD() ma_coefs0 = psampler._samples["ma_coefs"][0] if len(ma_coefs0) != pModel: ma_coefs0 = np.append(ma_coefs0, np.zeros(pModel - qModel - 1)) ma_coefs.extend(ma_coefs0) omega = carmcmc.vecC() for i in range(psampler.p): omega.append(psampler._samples["ar_roots"][0][i]) #import pdb; pdb.set_trace() kfilter = carmcmc.KalmanFilterp(self.xdata, self.ydata, self.dydata, sigsqr, omega, ma_coefs) kfilter.Filter() pred0 = kfilter.Predict(self.xdata[0]) # evaluate at data point val0 = pred0.first var0 = pred0.second predN = kfilter.Predict(self.xdata[-1]+1) # extrapolate valN = predN.first varN = predN.second self.assertTrue(varN > var0)
def __setstate__(self, d): self.__dict__.update(d) self._tv = cm.vecD() self._tv.extend(self.t) self._yv = cm.vecD() self._yv.extend(self.y) self._dyv = cm.vecD() self._dyv.extend(self.dy) self._carma_process = cm.run_mcmc_carma(1, 1, self._tv, self._yv, self._dyv, self.p, self.q, 1, False, 1) self._carma_process.SetMLE(True)
def setUp(self): self.nSample = 100 self.nBurnin = 10 self.nThin = 1 self.nWalkers = 2 npts = 10 self.x = 1.0 * np.arange(npts) ar_roots = np.array([-0.06283185-1.25663706j, -0.06283185+1.25663706j, -0.02094395-0.25132741j, -0.02094395+0.25132741j, -0.03141593+0.j]) sigsqr = 0.00126811439419 self.y = carmcmc.carma_process(self.x, sigsqr, ar_roots) self.dy = np.sqrt(sigsqr) * np.ones(npts) self.xdata = carmcmc.vecD() self.xdata.extend(self.x) self.ydata = carmcmc.vecD() self.ydata.extend(self.y) self.dydata = carmcmc.vecD() self.dydata.extend(self.dy)
def testCar1(self): cppSample = carmcmc.run_mcmc_car1(self.nSample, self.nBurnin, self.xdata, self.ydata, self.dydata, self.nThin) psampler = carmcmc.Car1Sample(self.x, self.y, self.dy, cppSample) self.assertEqual(psampler.p, 1) psamples = np.array(cppSample.getSamples()) ploglikes = np.array(cppSample.GetLogLikes()) sample0 = carmcmc.vecD() sample0.extend(psamples[0]) logprior0 = cppSample.getLogPrior(sample0) loglike0 = cppSample.getLogDensity(sample0) self.assertAlmostEqual(ploglikes[0], loglike0)
def log_likelihood(self, p): pc = self.to_carmapack_params(p) pcv = cm.vecD() pcv.extend(pc) ll = self._carma_process.getLogDensity(pcv) if np.isnan(ll): ll = np.NINF return ll
def setUp(self): self.nSample = 100 self.nBurnin = 10 self.nThin = 1 self.nWalkers = 2 npts = 10 self.x = 1.0 * np.arange(npts) ar_roots = np.array([ -0.06283185 - 1.25663706j, -0.06283185 + 1.25663706j, -0.02094395 - 0.25132741j, -0.02094395 + 0.25132741j, -0.03141593 + 0.j ]) sigsqr = 0.00126811439419 self.y = carmcmc.carma_process(self.x, sigsqr, ar_roots) self.dy = np.sqrt(sigsqr) * np.ones(npts) self.xdata = carmcmc.vecD() self.xdata.extend(self.x) self.ydata = carmcmc.vecD() self.ydata.extend(self.y) self.dydata = carmcmc.vecD() self.dydata.extend(self.dy)
def testCarpq(self, pModel=3, qModel=2): sampler = carmcmc.run_mcmc_carma(self.nSample, self.nBurnin, self.xdata, self.ydata, self.dydata, pModel, qModel, self.nWalkers, False, self.nThin) psampler = carmcmc.CarmaSample(np.array(self.xdata), np.array(self.ydata), np.array(self.dydata), sampler) self.assertEqual(psampler.p, pModel+qModel) psamples = np.array(sampler.getSamples()) ploglikes = np.array(sampler.GetLogLikes()) sample0 = carmcmc.vecD() sample0.extend(psamples[0]) logprior0 = sampler.getLogPrior(sample0) loglike0 = sampler.getLogDensity(sample0) # OK, this is where I truly test that sampler is of class CARp and not CAR1 self.assertAlmostEqual(ploglikes[0], loglike0)
def _make_kalman_filter(self, p): p = self.to_params(p) ar_roots = self.ar_roots(p) ma_coefs = self.ma_poly(p) mu = par.bounded_values(p['logit_mu'], low=self.mu_min, high=self.mu_max) nu = par.bounded_values(p['logit_nu'], low=self.nu_min, high=self.nu_max) s = par.bounded_values(p['logit_sigma'], low=self.sigma_min, high=self.sigma_max) sigma = s / np.sqrt(cm.carma_variance(1.0, ar_roots, ma_coefs)) sigmasq = sigma * sigma sigmasq = float(sigmasq) tv = cm.vecD() tv.extend(self.t) yv = cm.vecD() yv.extend(self.y - mu) dyv = cm.vecD() dyv.extend(self.dy * nu) arv = cm.vecC() arv.extend(ar_roots) mav = cm.vecD() mav.extend(ma_coefs) kfilter = cm.KalmanFilterp(tv, yv, dyv, sigmasq, arv, mav) kfilter.Filter() return kfilter
def testCarpq(self, pModel=3, qModel=2): sampler = carmcmc.run_mcmc_carma(self.nSample, self.nBurnin, self.xdata, self.ydata, self.dydata, pModel, qModel, self.nWalkers, False, self.nThin) psampler = carmcmc.CarmaSample(np.array(self.xdata), np.array(self.ydata), np.array(self.dydata), sampler) self.assertEqual(psampler.p, pModel + qModel) psamples = np.array(sampler.getSamples()) ploglikes = np.array(sampler.GetLogLikes()) sample0 = carmcmc.vecD() sample0.extend(psamples[0]) logprior0 = sampler.getLogPrior(sample0) loglike0 = sampler.getLogDensity(sample0) # OK, this is where I truly test that sampler is of class CARp and not CAR1 self.assertAlmostEqual(ploglikes[0], loglike0)
def simulate(self, p, ts, dys=None): p = self.to_params(p) kfilter = self._make_kalman_filter(p) vtime = cm.vecD() vtime.extend(ts) ysim = np.asarray(kfilter.Simulate(vtime)) if dys is not None: nu = par.bounded_values(p['logit_nu'], low=self.nu_min, high=self.nu_max) ysim = ysim + np.random.randn(ysim.shape[0]) * dys * nu mu = par.bounded_values(p['logit_mu'], low=self.mu_min, high=self.mu_max) return ysim + mu