def theory_expected_entropy_gain(action, data=None): #normalize p_data_action #p_theory_data SHOULD BE NORMALIZED. norm = 0 for d in world.possible_data(action): norm += model.p_data_action(d, action, data) expval = 0 for d in world.possible_data(action): alldata = [d] if data is None else [d] + data #print utils.H(lambda t: model.p_theory_data(t, alldata), model.t_space) #print 'p_d_a', model.p_data_action(d,action,data) expval+=(utils.H(lambda t: model.p_theory_data(t, alldata, normalized=True)\ , model.t_space)-\ utils.H(lambda t: model.p_theory_data(t, data, normalized=True)\ , model.t_space))*\ model.p_data_action(d,action,data) return expval / norm
def hypotheses_expected_final_entropy(action, data=None, normalized=False): #normalize p_data_action, p_theory_data not necessarily, just tell me norm = 0 for d in world.possible_data(action): norm += model.p_data_action(d, action, data) expval = 0 for d in world.possible_data(action): alldata = [d] if data is None else [d] + data expval+=utils.H(lambda hs: model.p_hypotheses_data(hs,alldata),\ model.fullh_space, False)*model.p_data_action(d,action,data) return expval / norm
def main(): #test likelihood #print likelihood((3,1),9) #test posterior #print posterior(0,testdata[0]) # posthist=np.empty(2**N) # for i in hspace: # posthist[i]=posterior(i,testdata) # plt.plot(hspace, posthist) # plt.show() #for res in [0,1]: print p_data_action((0,0),0), p_data_action((0,1),0) print p_data_action((0,0),0,(0,0)), p_data_action((0,1),0,(0,0)) print p_data_action((1,0),1,(0,0)), p_data_action((1,1),1,(0,0)) #test entropy # print utils.H(lambda h: posterior(h,(0,0)), hspace) #test evalue #print evalue(0) #print entropy_gain(0,[(0,0),(1,0)]) print "H prior: ", utils.H(lambda h: prior(h), hspace) print "H posterior: ", utils.H(lambda h: posterior(h,[(0,0)]), hspace) print "H posterior: ", utils.H(lambda h: posterior(h,[(0,0),(0,0)]), hspace) print "H posterior: ", utils.H(lambda h: posterior(h,[(0,0),(0,1)]), hspace) print "H posterior: ", utils.H(lambda h: posterior(h,[(0,0),(1,0)]), hspace) print "H posterior: ", utils.H(lambda h: posterior(h,[(0,0),(1,1)]), hspace), "\n" print entropy_gain(0), entropy_gain(1), entropy_gain(2) print entropy_gain(0,[(0,0)]), entropy_gain(1,[(0,0)]), entropy_gain(2,[(0,0)]) print "action: ", choose_action([(1,0),(0,1),(3,0)])
def entropy_gain(a, prev_data=None): expval=0 for d in possible_data(a): alldata=[d] if prev_data is None else [d]+prev_data expval+=utils.H(lambda h: posterior(h, alldata), hspace)*p_data_action(d,a,prev_data) return expval
import torch.nn as nn import utils filename = 'audios/mix.wav' nb_seconds = 3 nb_epoch = 200 audio, sample_rate = utils.load(filename) audio = audio[:, :sample_rate * nb_seconds] nb_mixtures, T = audio.size() nb_sources = nb_mixtures B = nn.Linear(nb_mixtures, nb_sources, bias=False) estim_function = utils.H(utils.edgeworth, nb_sources) #%% def train(model, mixtures, optimizer, criterion, epoch): model.train() y = torch.t(model(torch.t(mixtures))) # compute loss loss = criterion(y) # backward optimizer.zero_grad() loss.backward() optimizer.step() return loss