def hypotheses_expected_final_entropy(action, data=None, normalized=False): #normalize p_data_action, p_theory_data not necessarily, just tell me norm=0 for d in world.possible_data(action): norm+=model.p_data_action(d,action,data) expval=0 for d in world.possible_data(action): alldata=[d] if data is None else [d]+data expval+=utils.H(lambda hs: model.p_hypotheses_data(hs,alldata),\ model.fullh_space, False)*model.p_data_action(d,action,data) return expval/norm
def hypotheses_expected_final_entropy(action, data=None, normalized=False): #normalize p_data_action, p_theory_data not necessarily, just tell me norm = 0 for d in world.possible_data(action): norm += model.p_data_action(d, action, data) expval = 0 for d in world.possible_data(action): alldata = [d] if data is None else [d] + data expval+=utils.H(lambda hs: model.p_hypotheses_data(hs,alldata),\ model.fullh_space, False)*model.p_data_action(d,action,data) return expval / norm
def success_probability(self, action, prev_data=[]): data_no, data_yes = world.possible_data(action) p_yes = model.p_data_action(data_yes, action, prev_data) p_no = model.p_data_action(data_no, action, prev_data) return p_yes / (p_yes + p_no)
def success_probability(self, action, prev_data=[]): data_no, data_yes=world.possible_data(action) p_yes=model.p_data_action(data_yes, action, prev_data) p_no=model.p_data_action(data_no, action, prev_data) return p_yes/(p_yes+p_no)
def theory_expected_entropy_gain(action, data=None): #normalize p_data_action #p_theory_data SHOULD BE NORMALIZED. norm=0 for d in world.possible_data(action): norm+=model.p_data_action(d,action,data) expval=0 for d in world.possible_data(action): alldata=[d] if data is None else [d]+data #print utils.H(lambda t: model.p_theory_data(t, alldata), model.t_space) #print 'p_d_a', model.p_data_action(d,action,data) expval+=(utils.H(lambda t: model.p_theory_data(t, alldata, normalized=True)\ , model.t_space)-\ utils.H(lambda t: model.p_theory_data(t, data, normalized=True)\ , model.t_space))*\ model.p_data_action(d,action,data) return expval/norm
def theory_expected_entropy_gain(action, data=None): #normalize p_data_action #p_theory_data SHOULD BE NORMALIZED. norm = 0 for d in world.possible_data(action): norm += model.p_data_action(d, action, data) expval = 0 for d in world.possible_data(action): alldata = [d] if data is None else [d] + data #print utils.H(lambda t: model.p_theory_data(t, alldata), model.t_space) #print 'p_d_a', model.p_data_action(d,action,data) expval+=(utils.H(lambda t: model.p_theory_data(t, alldata, normalized=True)\ , model.t_space)-\ utils.H(lambda t: model.p_theory_data(t, data, normalized=True)\ , model.t_space))*\ model.p_data_action(d,action,data) return expval / norm
def p_data_action(datapoint, action, prev_data=None): if datapoint in world.possible_data(action): pda=0 machine=action[1] for t in t_space: for h in hf.create_all_hypotheses(machine): pda+=h.single_likelihood(datapoint)*\ t.hypothesis_likelihood(h)*t.prior() #h.unnormalized_posterior(prev_data) return pda else: return 0
def p_data_action(datapoint, action, prev_data=[]): """UNNORMALIZED --CHECKED""" if datapoint in world.possible_data(action): pda = 0 machine = action[1] for t in t_space: for h in singleh_space: pda+=p_singledata_hypothesis(datapoint,h,machine)*\ p_hypothesis_theorydata(h,machine,t,prev_data)*\ p_theory_data(t,prev_data)#, normalized=True) I don't need to normalize, this gives an extra constant return pda else: return 0
def p_data_action(datapoint, action, prev_data=[]): """UNNORMALIZED --CHECKED""" if datapoint in world.possible_data(action): pda=0 machine=action[1] for t in t_space: for h in singleh_space: pda+=p_singledata_hypothesis(datapoint,h,machine)*\ p_hypothesis_theorydata(h,machine,t,prev_data)*\ p_theory_data(t,prev_data)#, normalized=True) I don't need to normalize, this gives an extra constant return pda else: return 0
def p_data_action(datapoint, action, prev_data=[]): """UNNORMALIZED""" if datapoint in world.possible_data(action): pda = 0 machine = action[1] for t in t_space: #for h in hf.create_all_hypotheses(machine): for h in singleh_space: pda+=p_singledata_hypothesis(datapoint, h, machine)*\ p_hypothesis_theory(h,machine,t)*p_theory(t) #p_hypothesis_data(h, machine, prev_data) #pda+=h.single_likelihood(datapoint)*h.unnormalized_posterior(prev_data) return pda #float(pda)/len(world.possible_data(action)) else: return 0
def p_data_action(datapoint, action, prev_data=[]): """UNNORMALIZED""" if datapoint in world.possible_data(action): pda=0 machine=action[1] for t in t_space: #for h in hf.create_all_hypotheses(machine): for h in singleh_space: pda+=p_singledata_hypothesis(datapoint, h, machine)*\ p_hypothesis_theory(h,machine,t)*p_theory(t) #p_hypothesis_data(h, machine, prev_data) #pda+=h.single_likelihood(datapoint)*h.unnormalized_posterior(prev_data) return pda#float(pda)/len(world.possible_data(action)) else: return 0
# print 't: {0}, p: {1}, ppost: {2}'.format(t, model.p_theory(t),\ # model.p_theory_data(t,d0)\ # ) d0p=Datapoint.Datapoint((t1,m0), True) d1=Datapoint.Datapoint((t1,m1), True) #for h in model.singleh_space: #print model.p_data_action(d0p, (t1,m0), []), model.p_data_action(d0p, (t1,m0), d0) action=(t2,m1) n1=0 n2=0 p1s=[] p2s=[] d0[0].display() print action for dat in world.possible_data(action): p1=model.p_data_action(dat, (t2,m1), []) p2=model.p_data_action(dat, (t2,m1), d0) n1+=p1 n2+=p2 p1s.append(p1) p2s.append(p2) print [p/n1 for p in p1s] print [p/n2 for p in p2s]
# for t in model.t_space: # print 't: {0}, p: {1}, ppost: {2}'.format(t, model.p_theory(t),\ # model.p_theory_data(t,d0)\ # ) d0p = Datapoint.Datapoint((t1, m0), True) d1 = Datapoint.Datapoint((t1, m1), True) #for h in model.singleh_space: #print model.p_data_action(d0p, (t1,m0), []), model.p_data_action(d0p, (t1,m0), d0) action = (t2, m1) n1 = 0 n2 = 0 p1s = [] p2s = [] d0[0].display() print action for dat in world.possible_data(action): p1 = model.p_data_action(dat, (t2, m1), []) p2 = model.p_data_action(dat, (t2, m1), d0) n1 += p1 n2 += p2 p1s.append(p1) p2s.append(p2) print[p / n1 for p in p1s] print[p / n2 for p in p2s]