예제 #1
0
	def getReferencePriorPP(self):
		if self.observations != None:
			loc = self.sampleMean
			scale = ((1+self.n)*self.sampleVariance)/(self.n*(self.n-1))
			df = self.n-1
			return scipyStudent(df=df,loc=loc,scale=np.sqrt(scale))
		else: return None
예제 #2
0
	def getPPTom(self):
		"This is the posterior predictive distribution as defined in Griffiths & Sanborn's notation - notation is the only difference from Murphy's definition."
		"Returns a frozen scipy t distribution object parametrised according to computed posterior quantities"
		ui,lambdai,ai,sigmai = self.translatePosteriorQuantitiesTom()
		loc, scale, df = ui, sigmai*(1+(1./lambdai)), ai
		precisionLambda = 1./pow(scale,2)
		#NOTE: Scipy's distribution object assumes sqrt of variance (e.g. std in the Normal case)
		#Therefore must use np.sqrt of whatever quantity the model gives us for the scale param of a t dist
		#PYMC Version: student(mu=ui,lam=precisionLambda,nu = ai)
		return scipyStudent(df=df,loc=loc,scale=np.sqrt(scale))
예제 #3
0
	def getPP(self):
		"This is the posterior predictive distribution as defined in KMurphy's ConjAnalysis Doc"
		"Returns a frozen scipy t distribution object parametrised according to computed posterior quantities"
		"The PP in this model is a non-standard t distribution with df = vn, and shifted loc = un, scale = ((1+kn)*sigman)/kn "
		if self.observations != None:
			loc,scale, df = self.un, ((1+self.kn)*self.sigman)/self.kn, self.vn
			precisionLambda = 1./pow(scale,2)
			#PYMC Version: student(mu=loc,lam=precisionLambda,nu = df)
			#NOTE: Scipy's distribution object assumes sqrt of variance (e.g. std in the Normal case)
			#Therefore must use np.sqrt of whatever quantity the model gives us for the scale param of a t dist
			return scipyStudent(df=df,loc=loc,scale=np.sqrt(scale))
		else: return None