Example #1
0
def main():
    # Test case 1
    print("\nTest case 1\n")

    prior = 0.05
    likelihood = ((0.001, 0.3), (0.05, 0.9), (0.7, 0.99))

    observation = (True, True, True)

    class_posterior_true = posterior(prior, likelihood, observation)
    print("P(C=False|observation) is approximately {:.5f}".format(
        1 - class_posterior_true))
    print("P(C=True |observation) is approximately {:.5f}".format(
        class_posterior_true))

    # Test case 2
    print("\nTest case 2\n")

    prior = 0.05
    likelihood = ((0.001, 0.3), (0.05, 0.9), (0.7, 0.99))

    observation = (True, False, True)

    class_posterior_true = posterior(prior, likelihood, observation)
    print("P(C=False|observation) is approximately {:.5f}".format(
        1 - class_posterior_true))
    print("P(C=True |observation) is approximately {:.5f}".format(
        class_posterior_true))

    # Test case 3
    print("\nTest case 3\n")

    prior = 0.05
    likelihood = ((0.001, 0.3), (0.05, 0.9), (0.7, 0.99))

    observation = (False, False, True)

    class_posterior_true = posterior(prior, likelihood, observation)
    print("P(C=False|observation) is approximately {:.5f}".format(
        1 - class_posterior_true))
    print("P(C=True |observation) is approximately {:.5f}".format(
        class_posterior_true))

    # Test case 4
    print("\nTest case 4\n")

    prior = 0.05
    likelihood = ((0.001, 0.3), (0.05, 0.9), (0.7, 0.99))

    observation = (False, False, False)

    class_posterior_true = posterior(prior, likelihood, observation)
    print("P(C=False|observation) is approximately {:.5f}".format(
        1 - class_posterior_true))
    print("P(C=True |observation) is approximately {:.5f}".format(
        class_posterior_true))
Example #2
0
    def _start_game(self):
        for t in np.arange(self.T):
            hat_mu_list = list() # for all estimated mus
            var_list = list()
            print("round ------",t)
            for i in np.arange(self.k):
                # draw hat_mu according to Beta(S_i(t)+a, F_i(t)+b)
                a,b = self.SF_counter[i][0]+1, self.SF_counter[i][1]+1
                hat_mu_list.append(posterior(a,b).sample())
                var_list.append(posterior(a,b).get_var())
            self.hat_mu_list = hat_mu_list
            self.var_list=var_list
            # get UCB values of each arm and get max arm index, 
            pulled_arm = int(UCB(t,hat_mu_list,self.N,self.var_list,self.alpha).pull_max_arm())
            print("selected arm---------:",pulled_arm)
            # get reward
            reward = self.bandts[pulled_arm].draw_sample()
            self.rewards.append(reward)
            # get regret
            self.regrets.append(self.get_regret(t))
            # update progress on best arm
            self.get_best_arm_progress(t)
            # alarm when best arm progress N5,t/t is above 0.95
            # if self.progress_best_arm[-1]>0.95:
            #     print("first time above 0.95",t)
            #     break
            # update Success and Failure count
            success,fail = (0,0)
            if reward ==1:
                success = self.SF_counter[pulled_arm][0]+1
                fail = self.SF_counter[pulled_arm][1]
                self.SF_counter[pulled_arm]=(success,fail)
            else:
                fail = self.SF_counter[pulled_arm][1]+1
                success = self.SF_counter[pulled_arm][0]
                self.SF_counter[pulled_arm]=(success,fail)
            # self.SF_counter[pulled_arm]=(success,fail)
            print("self.SF_counter[pulled_arm]=(success,fail)",(success,fail))
            # update N_i,t,N_matrix [T,k]
            
            self.N[pulled_arm]+=1
            self.N_matrix[t,:]=self.N

           

            
        self.plot_regret()
        self.plot_cf()
        self.plot_arm_progress()
Example #3
0
def runToy2D():
	np.random.seed(0)
	random.seed(0)

	for i in range(20):
		print ' '*random.randint(0,70) + '*'

	# priors:
	aP = prior.uniform(-5,5)			# will have 2D gaussian toy likelihood
	bP = prior.uniform(-5,5)			
	g.priors = [aP,bP]

    # parameters ('name', x_init, isFrozen):
	a = particle.param('a',aP.sample(),False)
	b = particle.param('b',bP.sample(),False)
	g.initParams = [a,b]

	for i,p in enumerate(g.initParams):		# important!
		p.setPrior(i)

	# which indexes are thawed:
	for i,p in enumerate(g.initParams):
		if(not p.isFrozen):
			g.thawedIdxs.append(i)

	# choose likelihood:
	g.likelihood = likelihood.toyGauss()

	# mass vector for parameters:
	g.masses = np.ones(len(g.initParams))

	# create the initial particle set:
	particles = []
	for i in range(g.nParticles):
		particles.append(particle.particle(g.initParams))
		particles[i].assignPriorSample()

	# set the inference running:
	fname = '/Users/jmrv/Documents/school/mit/research/software/nest/samples/toy2d.txt'
	nested.sample(particles,fname)

	pos = posterior.posterior(fname)
	plotname = '/Users/jmrv/Documents/school/mit/research/software/nest/plots/toy2d.pdf'
	pos.plotMarginals(plotname)
Example #4
0
File: ul.py Project: jmrv/scandium
def findUL(chaindir):
    pos = posterior.posterior(chaindir)
    samples = pos.marginalize(3, dataBack=True)
    print "%d samples in the posterior" % len(samples)

    plt.figure()
    rng = (np.min(samples), np.max(samples) + 1)  # log flux
    prob, edges, blah = plt.hist(samples, bins=100, range=rng, normed=True, histtype="step", color="k")
    x = ((edges + np.roll(edges, -1)) / 2)[0:-1]
    pars = fitPost(x, prob)
    x0 = pars[0]
    b = pars[1]
    h = pars[2]
    plt.plot(x, modiLogi(x, x0, b, h), "-", color="0.5")

    # analytic upper limit:
    aUL = np.log(np.e ** 2 - 1) / b + x0

    # histogram upper limit:
    above = np.where(prob > h / np.e ** 2)[0]
    edge = above[-1] + 1
    UL = edges[edge]

    f6 = 10 ** (UL + 6.0)
    # bin width:
    width = 10 ** (edges[edge + 1] + 6.0) - f6
    print "bin width at edge: %.3f" % width
    print UL, f6

    xfine = np.linspace(rng[0], rng[1], 1000)
    ULindicator = np.ones(len(xfine)) * h / np.e ** 2
    ULindicator[xfine >= UL] = 0
    plt.plot(xfine, ULindicator, "k:")

    plt.xlabel("log flux")
    plt.ylabel("probability density")
    plt.title("posterior for Sc line: upper limit = %.2f = %.2f f6" % (UL, f6))

    plotname = root + "plots/" + chaindir.split("/")[-2] + ".pdf"
    plt.savefig(plotname)
    print "plotted " + plotname
def generatePosteriorDistributionWithObsevation():

    observation = {}

    pos_args_string = request.args.get('pos_args')
    observation['pos_args'] = json.loads(pos_args_string)

    neg_args_string = request.args.get('neg_args')
    observation['neg_args'] = json.loads(neg_args_string)

    observation['rating'] = int(request.args.get('rating'))
    observation['observationNo'] = int(request.args.get('observationNo'))
    
    attacks_string = request.args.get('attacks')
    attacks_raw = json.loads(attacks_string)
    observation['attacks'] = [tuple(attack) for attack in attacks_raw]

    currentPriorString = request.args.get('currentPrior')
    currentPrior = np.array(json.loads(currentPriorString))

    # Will need to rebuild all of the important graph space data as this is needed for the liklihood construction
    graphSpaceSummaryString = request.args.get('graphSpaceSummary')
    graphSpaceSummary = json.loads(graphSpaceSummaryString)

    p_G = prior(graphSpaceSummary['pos_args'], graphSpaceSummary['neg_args'])
    p_G.rating = graphSpaceSummary['rating'] # This is possibly reckless coding

    p_G_T = liklihood(p_G, observation)

    liklihood_distribution = p_G_T.buildLiklihoodDistribution()

    p_T_G = posterior(currentPrior, liklihood_distribution)

    posterior_distribution = p_T_G.buildPosteriorDistribution()

    distributions = {}
    distributions['liklihood_distribution'] = list(liklihood_distribution)
    distributions['posterior_distribution'] = list(posterior_distribution)

    return jsonify(distributions)
Example #6
0
err = 0
C = l.shape[1]
i = 0

while(i < C):
    if l[0][i] < l[1][i]:
        err += test_x[0, i]
    elif l[0][i] > l[1][i]:
        err += test_x[1, i]
    i += 1
print(err)



from posterior import posterior
p = posterior(train_x)
width = 0.35
p1 = plt.bar(np.arange(data_range[0], data_range[1] + 1), p.T[:,0], width)
p2 = plt.bar(np.arange(data_range[0], data_range[1] + 1) + width, p.T[:,1], width)
plt.xlabel('x')
plt.ylabel('$P(\omega|x)$')
plt.legend((p1[0], p2[0]), ('$\omega_1$', '$\omega_2$'))
plt.axis([data_range[0] - 1, data_range[1] + 1, 0, 1.2])
plt.show()

err = 0
C = p.shape[1]
i = 0

while(i < C):
    if p[0][i] < p[1][i]:
Example #7
0
# jmrv 04.2013

from posterior import posterior
import matplotlib.pyplot as plt

chainroot = '/nfs/tabla/data1/fgastro/sc/chains/nustar/'
studies = ['weE1/','weE2/','weE3/','weE4/','weE5/']
exposures = [164.5,313.5,448.5,592,755.5]
l100 = []
l99 = []
l95 = []
l90 = []

plt.figure()
for s in studies:
	p = posterior(chainroot+s)
	h = p.marginalize(3,1000,dataBack=True)

	l100.append(h[-1])
	l99.append(h[int(0.99*len(h))])
	l95.append(h[int(0.95*len(h))])
	l90.append(h[int(0.90*len(h))])

plt.ylim(-6,-4)
plt.xlim(0,800)
plt.plot(exposures, l100,'k-',marker='o')
plt.plot(exposures, l99,'k--',marker='o')
plt.plot(exposures, l95,'k-.',marker='o')
plt.plot(exposures, l90,'k:',marker='o')

plt.ylabel('upper limit (log flux)')
Example #8
0
File: headsup.py Project: jmrv/chmc
import sys
from posterior import posterior
import chmcGlobals as g

# python headsup.py ../chains/spitzer ../plots/spitzer/headsup
if __name__ == '__main__':
	chaindir = g.chaindir
	plotroot = g.plotdir+'headsup'
	
	if len(sys.argv) > 1:
		chaindir = sys.argv[1] 
		plotroot = sys.argv[2] 
	p = posterior(chaindir)
	p.headsup(plotroot)
Example #9
0
def runSc():
	np.random.seed(0)
	random.seed(0)

	for i in range(20):
		print ' '*random.randint(0,70) + '*'
	
	# priors:
	normP = prior.uniform(1e-2,1,isLog=True)			# powerlaw norm			
	alphaP = prior.uniform(2,4)							# powerlaw power			
	
	nHP = prior.uniform(1.0,3.0)						# nH (absorption)	
	
	scAreaP = prior.uniform(1e-6, 1e-3, isLog=True)		# Sc line area
	
	area1P = prior.uniform(1e-6, 1e-3, isLog=True)		# nuisance line 1
	center1P = prior.uniform(3.5, 3.7)					# 						
	sigma1P = prior.uniform(0.001, 0.010)				# natural width

	area2P = prior.uniform(1e-5, 1e-4, isLog=True)		# nuisance line 2
	center2P = prior.uniform(3.75, 4.0)					#					
	sigma2P = prior.uniform(0.001, 0.010)				#	

	g.priors = [	normP, alphaP, \
					nHP, \
					scAreaP, \
					#area1P,center1P,sigma1P, \
					area2P,center2P,sigma2P			]

    # parameters ('name', x_init, isFrozen):
	norm = particle.param('norm',5e-2,False)
	alpha = particle.param('alpha',2.9,False)

	nH = particle.param('nH',2.0,True)

	scarea = particle.param('Sc area',1e-5,False)

	area1 = particle.param('area1',1e-5,False)
	center1 = particle.param('center1',3.6,False)
	sigma1 = particle.param('sigma1',0.005,False)

	area2 = particle.param('area2',1.8e-5,False)
	center2 = particle.param('center2',3.87,False)
	sigma2 = particle.param('sigma2',0.005,False)
	g.initParams = [    norm,alpha, \
						nH, \
						scarea, \
						#area1,center1,sigma1, \
						area2,center2,sigma2]

	for i,p in enumerate(g.initParams):
		p.setPrior(i)

	# which indexes are thawed:
	for i,p in enumerate(g.initParams):
		if(not p.isFrozen):
			g.thawedIdxs.append(i)
	
	# mass vector for parameters:
	g.masses = np.ones(len(g.initParams))*0.1

    # data and likelihood:
	g.likelihood = likelihood.ScLike(	g.datadir+'column.warf',
										g.datadir+'column.wrmf',
										g.datadir+'column.pi',
										g.modeldir+'phabs1e22.txt',
										[3.4,5])

	# create the initial particle set:
	particles = []
	for i in range(g.nParticles):
		particles.append(particle.particle(g.initParams))
		particles[i].assignPriorSample()

	# set the inference running:
	fname = g.sampleDir+'samples.txt'
	nested.sample(particles,fname)

	pos = posterior.posterior(fname)
	plotname = g.plotDir+'sctest.pdf'
	pos.plotMarginals(plotname)
Example #10
0
def nb_classify(prior, likelihood, input_vector):
    spam_prob = posterior(prior, likelihood, input_vector)
    return ("Spam", spam_prob) if spam_prob > 0.5 else ("Not Spam",
                                                        (1 - spam_prob))
Example #11
0
    test_x[0][f - r[0]] for f in range(r[0], r[1] + 1)
    if pred_label_ml[f - r[0]] != 0
])
error_c2 = sum([
    test_x[1][f - r[0]] for f in range(r[0], r[1] + 1)
    if pred_label_ml[f - r[0]] != 1
])
error_num = error_c1 + error_c2
error_r = error_num / np.sum(test_x)
print("Error number: %d. Error rate: %f." % (error_num, error_r))

# ## Part2 Posterior:

# In[6]:

pos = posterior(train_x)
width = 0.3
plt.bar(np.arange(r[0], r[1] + 1) - width / 2,
        pos[0],
        width=width,
        label='$\omega_1$',
        color='r')
plt.bar(np.arange(r[0], r[1] + 1) + width / 2,
        pos[1],
        width=width,
        label='$\omega_2$',
        color='g')
plt.axis([r[0], r[1], 0, 1.1])
plt.title('Posterior Distribution')
plt.xlabel('x')
plt.ylabel('$P(\omega|x)$')