Exemplo n.º 1
0
 def expected_final_entropy(self, action, data=None):
     return entropy_gains.hypotheses_expected_final_entropy(action, data)
Exemplo n.º 2
0
	def expected_final_entropy(self, action, data=None):
		return entropy_gains.hypotheses_expected_final_entropy(action, data)
Exemplo n.º 3
0
def main(player, n):
    #random.seed(0)

    starttime = time.clock()
    # if player=='adults':
    # 	data=Data.Data(parameters.inputfile_adults)
    # 	data.read(astext=False)
    # elif player=='kids':
    # 	data=Data.Data(parameters.inputfile_kids)
    # 	data.read(astext=False)

    #data=Data.Data(parameters.inputfile_kids)
    data = Data.Data(parameters.inputfile_adults)
    data.read(astext=False)

    n_kids = parameters.n_kids
    truncate = int(n)
    #n_r_theo=parameters.n_r_theo
    n_r_random = parameters.n_r_random

    eg = np.zeros(len(data.get_kids()[:n_kids]))

    if player in ('kids', 'adults'):
        n_r = 1
        for k, kid in enumerate(data.get_kids()[:n_kids]):
            kidseq = data.data[kid][:truncate]
            keg = entropy_gains.ave_theory_expected_entropy_gain(kidseq)[0]
            #print kid, keg
            eg[k] = keg
            #nkegs

    elif player == 'random':
        n_r = n_r_random
        egall = np.zeros((len(data.get_kids()[:n_kids]), n_r))
        for k, kid in enumerate(data.get_kids()[:n_kids]):
            for r in range(n_r_random):
                rl = learners.RandomLearner()
                rlseq = rl.play(min(data.get_kid_nactions(kid), truncate))
                reg = entropy_gains.ave_theory_expected_entropy_gain(rlseq)[0]
                #print kid, reg
                eg[k] += reg
                egall[k, r] = reg
            eg[k] /= n_r
        #[d.display() for d in rlseq]
        #print reg

    elif player == 'theory':
        n_r = n_r_theo
        egall = np.zeros((len(data.get_kids()[:n_kids]), n_r))
        for k, kid in enumerate(data.get_kids()[:n_kids]):
            for r in range(n_r_theo):
                tl = learners.TheoryLearner()
                tlseq = tl.play(min(data.get_kid_nactions(kid), truncate))
                teg = entropy_gains.ave_theory_expected_entropy_gain(tlseq)[0]
                #print kid, teg
                eg[k] += teg
                egall[k, r] = teg
            eg[k] /= n_r

    elif player == 'theoryfull':

        n_long_kids=sum([data.get_kid_nactions(kid)>=truncate \
             for kid in data.get_kids()])
        eig = np.zeros((n_long_kids, 3))
        tlactions = []
        rlactions = []

        k = 0
        for ki, kid in enumerate(data.get_kids()[:n_kids]):
            print 'Run for {0} actions, processing kid {1} out of {2}'.format(
                truncate, ki + 1, n_kids)
            if data.get_kid_nactions(kid) < truncate:
                continue

            #get kid's action sequence
            kidseq = data.data[kid][:truncate]
            #keg, kents=entropy_gains.ave_theory_expected_entropy_gain(kidseq)
            #keg=entropy_gains.theory_expected_entropy_gain(kidseq[-1].action,kidseq[:-1])
            keg = entropy_gains.theory_expected_final_entropy(
                kidseq[-1].action, kidseq[:-1])

            #print 'kid {0} entropies: {1}'.format(k,kents)

            #compute optimal choice entropy gain with kid's action sequence
            tl = learners.TheoryLearner()
            tlaction = tl.choose_action(kidseq[:truncate - 1])
            tlactions.append(tlaction)
            yokedseq = kidseq[:-1] + [
                Datapoint.Datapoint(tlaction, False)
            ]  #this False is generic, shouldn't be taken into account
            #tleg, tlents=entropy_gains.ave_theory_expected_entropy_gain(yokedseq)
            #tleg=entropy_gains.theory_expected_entropy_gain(tlaction, kidseq[:-1])
            tleg = entropy_gains.theory_expected_final_entropy(
                tlaction, kidseq[:-1])

            #print tlents

            reg = 0
            rlactions.append([])
            for r in range(n_r_random):
                rl = learners.RandomLearner()
                #rseq=rl.play(truncate)
                rlaction = rl.choose_action(kidseq[:truncate - 1])
                rlactions[k].append(rlaction)
                yokedseqr = kidseq[:-1] + [
                    Datapoint.Datapoint(rlaction, False)
                ]  #this False is generic, shouldn't be taken into account
                #reg+=entropy_gains.ave_theory_expected_entropy_gain(yokedseqr)[0]
                #reg+=entropy_gains.theory_expected_entropy_gain(rlaction, kidseq[:-1])
                reg += entropy_gains.theory_expected_final_entropy(
                    rlaction, kidseq[:-1])

            reg /= n_r_random

            eig[k, 0] = tleg
            eig[k, 1] = reg
            eig[k, 2] = keg
            #print 'k: {0}, r:{1}, t:{2}'.format(keg, reg, tleg)
            k += 1

    elif player == 'jointfull':

        n_long_kids=sum([data.get_kid_nactions(kid)>=truncate \
             for kid in data.get_kids()])
        eig = np.zeros((n_long_kids, 3))
        tlactions = []
        rlactions = []

        k = 0
        for ki, kid in enumerate(data.get_kids()[:n_kids]):
            if data.get_kid_nactions(kid) < truncate:
                continue

            #get kid's action sequence
            kidseq = data.data[kid][:truncate]
            #keg, kents=entropy_gains.ave_theory_expected_entropy_gain(kidseq)
            #keg=entropy_gains.theory_expected_entropy_gain(kidseq[-1].action,kidseq[:-1])
            keg = entropy_gains.joint_expected_final_entropy(
                kidseq[-1].action, kidseq[:-1])

            #print 'kid {0} entropies: {1}'.format(k,kents)

            #compute optimal choice entropy gain with kid's action sequence
            tl = learners.JointLearner()
            tlaction = tl.choose_action(kidseq[:truncate - 1])
            tlactions.append(tlaction)
            yokedseq = kidseq[:-1] + [
                Datapoint.Datapoint(tlaction, False)
            ]  #this False is generic, shouldn't be taken into account
            #tleg, tlents=entropy_gains.ave_theory_expected_entropy_gain(yokedseq)
            #tleg=entropy_gains.theory_expected_entropy_gain(tlaction, kidseq[:-1])
            tleg = entropy_gains.joint_expected_final_entropy(
                tlaction, kidseq[:-1])

            #print tlents

            reg = 0
            rlactions.append([])
            for r in range(n_r_random):
                rl = learners.RandomLearner()
                #rseq=rl.play(truncate)
                rlaction = rl.choose_action(kidseq[:truncate - 1])
                rlactions[k].append(rlaction)
                yokedseqr = kidseq[:-1] + [
                    Datapoint.Datapoint(rlaction, False)
                ]  #this False is generic, shouldn't be taken into account
                #reg+=entropy_gains.ave_theory_expected_entropy_gain(yokedseqr)[0]
                #reg+=entropy_gains.theory_expected_entropy_gain(rlaction, kidseq[:-1])
                reg += entropy_gains.joint_expected_final_entropy(
                    rlaction, kidseq[:-1])

            reg /= n_r_random

            eig[k, 0] = tleg
            eig[k, 1] = reg
            eig[k, 2] = keg
            #print 'k: {0}, r:{1}, t:{2}'.format(keg, reg, tleg)
            k += 1

    elif player == 'hypfull':

        n_long_kids=sum([data.get_kid_nactions(kid)>=truncate \
             for kid in data.get_kids()])
        eig = np.zeros((n_long_kids, 3))
        tlactions = []
        rlactions = []

        k = 0
        for ki, kid in enumerate(data.get_kids()[:n_kids]):
            if data.get_kid_nactions(kid) < truncate:
                continue

            #get kid's action sequence
            kidseq = data.data[kid][:truncate]
            #keg, kents=entropy_gains.ave_theory_expected_entropy_gain(kidseq)
            #keg=entropy_gains.theory_expected_entropy_gain(kidseq[-1].action,kidseq[:-1])
            keg = entropy_gains.hypotheses_expected_final_entropy(
                kidseq[-1].action, kidseq[:-1])

            #print 'kid {0} entropies: {1}'.format(k,kents)

            #compute optimal choice entropy gain with kid's action sequence
            tl = learners.HypothesesLearner()
            tlaction = tl.choose_action(kidseq[:truncate - 1])
            tlactions.append(tlaction)
            yokedseq = kidseq[:-1] + [
                Datapoint.Datapoint(tlaction, False)
            ]  #this False is generic, shouldn't be taken into account
            #tleg, tlents=entropy_gains.ave_theory_expected_entropy_gain(yokedseq)
            #tleg=entropy_gains.theory_expected_entropy_gain(tlaction, kidseq[:-1])
            tleg = entropy_gains.hypotheses_expected_final_entropy(
                tlaction, kidseq[:-1])

            #print tlents

            reg = 0
            rlactions.append([])
            for r in range(n_r_random):
                rl = learners.RandomLearner()
                #rseq=rl.play(truncate)
                rlaction = rl.choose_action(kidseq[:truncate - 1])
                rlactions[k].append(rlaction)
                yokedseqr = kidseq[:-1] + [
                    Datapoint.Datapoint(rlaction, False)
                ]  #this False is generic, shouldn't be taken into account
                #reg+=entropy_gains.ave_theory_expected_entropy_gain(yokedseqr)[0]
                #reg+=entropy_gains.theory_expected_entropy_gain(rlaction, kidseq[:-1])
                reg += entropy_gains.hypotheses_expected_final_entropy(
                    rlaction, kidseq[:-1])

            reg /= n_r_random

            eig[k, 0] = tleg
            eig[k, 1] = reg
            eig[k, 2] = keg
            #print 'k: {0}, r:{1}, t:{2}'.format(keg, reg, tleg)
            k += 1

    if player in ['random', 'theory', 'kids', 'adults']:
        filename = parameters.output_directory + 'out-' + player + '-' + str(
            truncate) + '_tru-' + str(n_r) + '_real.txt'
        np.savetxt(filename, eg)

    if player in ['random', 'theory']:
        filenameall = parameters.output_directory + 'all-' + player + '-' + str(
            truncate) + '_tru-' + str(n_r) + '_real.txt'
        np.savetxt(filenameall, egall)

    if player in ['theoryfull', 'jointfull', 'hypfull']:
        filename=parameters.output_directory+player+'-'+str(truncate)+'_tru-'\
          +str(n_r_random)+'_rreal.txt'
        np.savetxt(filename, eig)

        with open(parameters.output_directory+player+'-modelactions-'+str(truncate)+'_tru-'+\
         str(n_r_random)+'_rreal.txt','w') as f:
            for kact in tlactions:
                f.write(str(kact) + '\n')

        with open(parameters.output_directory+player+'-randomactions-'+str(truncate)+'_tru-'+\
         str(n_r_random)+'_rreal.txt','w') as f:
            for kact in rlactions:
                f.write(str(kact) + '\n')

    print 'time elapsed for run {0}: {1:.0f} s'.format(
        filename,
        time.clock() - starttime)
Exemplo n.º 4
0
def main(player, n):
	#random.seed(0)

	starttime=time.clock()
	data=Data.Data()
	data.read(astext=False)
	n_kids=parameters.n_kids
	truncate=int(n)
	#n_r_theo=parameters.n_r_theo
	n_r_random=parameters.n_r_random

	eg=np.zeros(len(data.get_kids()[:n_kids]))
	
	if player=='kids':
		n_r=1
		for k,kid in enumerate(data.get_kids()[:n_kids]):
			kidseq=data.data[kid][:truncate]
			keg=entropy_gains.ave_theory_expected_entropy_gain(kidseq)[0]
			#print kid, keg
			eg[k]=keg
			#nkegs

	elif player=='random':
		n_r=n_r_random
		egall=np.zeros((len(data.get_kids()[:n_kids]), n_r))
		for k,kid in enumerate(data.get_kids()[:n_kids]):
			for r in range(n_r_random):
				rl=learners.RandomLearner()
				rlseq=rl.play(min(data.get_kid_nactions(kid),truncate))
				reg=entropy_gains.ave_theory_expected_entropy_gain(rlseq)[0]
				#print kid, reg
				eg[k]+=reg
				egall[k,r]=reg
			eg[k]/=n_r
		#[d.display() for d in rlseq]
		#print reg

	elif player=='theory':
		n_r=n_r_theo
		egall=np.zeros((len(data.get_kids()[:n_kids]), n_r))
		for k,kid in enumerate(data.get_kids()[:n_kids]):
			for r in range(n_r_theo):
				tl=learners.TheoryLearner()
				tlseq=tl.play(min(data.get_kid_nactions(kid),truncate))
				teg=entropy_gains.ave_theory_expected_entropy_gain(tlseq)[0]
				#print kid, teg
				eg[k]+=teg
				egall[k,r]=teg
			eg[k]/=n_r




	elif player=='theoryfull':

		n_long_kids=sum([data.get_kid_nactions(kid)>=truncate \
						 for kid in data.get_kids()])
		eig=np.zeros((n_long_kids,3))
		tlactions=[]
		rlactions=[]

		
		k=0
		for ki,kid in enumerate(data.get_kids()[:n_kids]):
			if data.get_kid_nactions(kid)<truncate:
				continue
			
			#get kid's action sequence
			kidseq=data.data[kid][:truncate]
			#keg, kents=entropy_gains.ave_theory_expected_entropy_gain(kidseq)
			#keg=entropy_gains.theory_expected_entropy_gain(kidseq[-1].action,kidseq[:-1])
			keg=entropy_gains.theory_expected_final_entropy(kidseq[-1].action,kidseq[:-1])
			
			#print 'kid {0} entropies: {1}'.format(k,kents)
			
			#compute optimal choice entropy gain with kid's action sequence
			tl=learners.TheoryLearner()
			tlaction=tl.choose_action(kidseq[:truncate-1])
			tlactions.append(tlaction)
			yokedseq=kidseq[:-1]+[Datapoint.Datapoint(tlaction, False)]#this False is generic, shouldn't be taken into account
			#tleg, tlents=entropy_gains.ave_theory_expected_entropy_gain(yokedseq)
			#tleg=entropy_gains.theory_expected_entropy_gain(tlaction, kidseq[:-1])
			tleg=entropy_gains.theory_expected_final_entropy(tlaction, kidseq[:-1])
			
			#print tlents

			reg=0
			rlactions.append([])
			for r in range(n_r_random):
				rl=learners.RandomLearner()
				#rseq=rl.play(truncate)
				rlaction=rl.choose_action(kidseq[:truncate-1])
				rlactions[k].append(rlaction)
				yokedseqr=kidseq[:-1]+[Datapoint.Datapoint(rlaction, False)]#this False is generic, shouldn't be taken into account
				#reg+=entropy_gains.ave_theory_expected_entropy_gain(yokedseqr)[0]
				#reg+=entropy_gains.theory_expected_entropy_gain(rlaction, kidseq[:-1])
				reg+=entropy_gains.theory_expected_final_entropy(rlaction, kidseq[:-1])
				
			reg/=n_r_random
			

			eig[k,0]=tleg
			eig[k,1]=reg
			eig[k,2]=keg
			#print 'k: {0}, r:{1}, t:{2}'.format(keg, reg, tleg)
			k+=1


	elif player=='jointfull':

		n_long_kids=sum([data.get_kid_nactions(kid)>=truncate \
						 for kid in data.get_kids()])
		eig=np.zeros((n_long_kids,3))
		tlactions=[]
		rlactions=[]

		
		k=0
		for ki,kid in enumerate(data.get_kids()[:n_kids]):
			if data.get_kid_nactions(kid)<truncate:
				continue
			
			#get kid's action sequence
			kidseq=data.data[kid][:truncate]
			#keg, kents=entropy_gains.ave_theory_expected_entropy_gain(kidseq)
			#keg=entropy_gains.theory_expected_entropy_gain(kidseq[-1].action,kidseq[:-1])
			keg=entropy_gains.joint_expected_final_entropy(kidseq[-1].action,kidseq[:-1])
			
			#print 'kid {0} entropies: {1}'.format(k,kents)
			
			#compute optimal choice entropy gain with kid's action sequence
			tl=learners.JointLearner()
			tlaction=tl.choose_action(kidseq[:truncate-1])
			tlactions.append(tlaction)
			yokedseq=kidseq[:-1]+[Datapoint.Datapoint(tlaction, False)]#this False is generic, shouldn't be taken into account
			#tleg, tlents=entropy_gains.ave_theory_expected_entropy_gain(yokedseq)
			#tleg=entropy_gains.theory_expected_entropy_gain(tlaction, kidseq[:-1])
			tleg=entropy_gains.joint_expected_final_entropy(tlaction, kidseq[:-1])
			
			#print tlents

			reg=0
			rlactions.append([])
			for r in range(n_r_random):
				rl=learners.RandomLearner()
				#rseq=rl.play(truncate)
				rlaction=rl.choose_action(kidseq[:truncate-1])
				rlactions[k].append(rlaction)
				yokedseqr=kidseq[:-1]+[Datapoint.Datapoint(rlaction, False)]#this False is generic, shouldn't be taken into account
				#reg+=entropy_gains.ave_theory_expected_entropy_gain(yokedseqr)[0]
				#reg+=entropy_gains.theory_expected_entropy_gain(rlaction, kidseq[:-1])
				reg+=entropy_gains.joint_expected_final_entropy(rlaction, kidseq[:-1])
				
			reg/=n_r_random
			

			eig[k,0]=tleg
			eig[k,1]=reg
			eig[k,2]=keg
			#print 'k: {0}, r:{1}, t:{2}'.format(keg, reg, tleg)
			k+=1

	elif player=='hypfull':

		n_long_kids=sum([data.get_kid_nactions(kid)>=truncate \
						 for kid in data.get_kids()])
		eig=np.zeros((n_long_kids,3))
		tlactions=[]
		rlactions=[]

		
		k=0
		for ki,kid in enumerate(data.get_kids()[:n_kids]):
			if data.get_kid_nactions(kid)<truncate:
				continue
			
			#get kid's action sequence
			kidseq=data.data[kid][:truncate]
			#keg, kents=entropy_gains.ave_theory_expected_entropy_gain(kidseq)
			#keg=entropy_gains.theory_expected_entropy_gain(kidseq[-1].action,kidseq[:-1])
			keg=entropy_gains.hypotheses_expected_final_entropy(kidseq[-1].action,kidseq[:-1])
			
			#print 'kid {0} entropies: {1}'.format(k,kents)
			
			#compute optimal choice entropy gain with kid's action sequence
			tl=learners.HypothesesLearner()
			tlaction=tl.choose_action(kidseq[:truncate-1])
			tlactions.append(tlaction)
			yokedseq=kidseq[:-1]+[Datapoint.Datapoint(tlaction, False)]#this False is generic, shouldn't be taken into account
			#tleg, tlents=entropy_gains.ave_theory_expected_entropy_gain(yokedseq)
			#tleg=entropy_gains.theory_expected_entropy_gain(tlaction, kidseq[:-1])
			tleg=entropy_gains.hypotheses_expected_final_entropy(tlaction, kidseq[:-1])
			
			#print tlents

			reg=0
			rlactions.append([])
			for r in range(n_r_random):
				rl=learners.RandomLearner()
				#rseq=rl.play(truncate)
				rlaction=rl.choose_action(kidseq[:truncate-1])
				rlactions[k].append(rlaction)
				yokedseqr=kidseq[:-1]+[Datapoint.Datapoint(rlaction, False)]#this False is generic, shouldn't be taken into account
				#reg+=entropy_gains.ave_theory_expected_entropy_gain(yokedseqr)[0]
				#reg+=entropy_gains.theory_expected_entropy_gain(rlaction, kidseq[:-1])
				reg+=entropy_gains.hypotheses_expected_final_entropy(rlaction, kidseq[:-1])
				
			reg/=n_r_random
			

			eig[k,0]=tleg
			eig[k,1]=reg
			eig[k,2]=keg
			#print 'k: {0}, r:{1}, t:{2}'.format(keg, reg, tleg)
			k+=1



	if player in ['random', 'theory', 'kids']:
		filename=parameters.output_directory+'out-'+player+'-'+str(truncate)+'_tru-'+str(n_r)+'_real.txt'
		np.savetxt(filename, eg)

	if player in ['random', 'theory']:
		filenameall=parameters.output_directory+'all-'+player+'-'+str(truncate)+'_tru-'+str(n_r)+'_real.txt'
		np.savetxt(filenameall, egall)
	
	if player in ['theoryfull', 'jointfull', 'hypfull']:
		filename=parameters.output_directory+player+'-'+str(truncate)+'_tru-'\
				+str(n_r_random)+'_rreal.txt'
		np.savetxt(filename, eig)

		with open(parameters.output_directory+player+'-modelactions-'+str(truncate)+'_tru-'+\
			str(n_r_random)+'_rreal.txt','w') as f:
			for kact in tlactions:
				f.write(str(kact)+'\n')

		with open(parameters.output_directory+player+'-randomactions-'+str(truncate)+'_tru-'+\
			str(n_r_random)+'_rreal.txt','w') as f:
			for kact in rlactions:
				f.write(str(kact)+'\n')


	print 'time elapsed for run {0}: {1:.0f} s'.format(filename, time.clock()-starttime)
Exemplo n.º 5
0
today='141119/'
#batch='ep-0.05/'
batch='varyep/'
data_directory=output_directory+today#+batch

n_act=2

model='jointfull'
filename=model+'-'+str(n_act)+'_tru-20'+'_rreal.txt'
datajoi=np.loadtxt(data_directory+filename)
print data_directory+filename

model='hypfull'
filename=model+'-'+str(n_act)+'_tru-20'+'_rreal.txt'
datahyp=np.loadtxt(data_directory+filename)
print data_directory+filename

for i in range(len(datahyp)):
	print datahyp[i,2]-datahyp[i,0], datajoi[i,2]-datajoi[i,0] 


import entropy_gains as eg

import world
pa=world.possible_actions()
print eg.hypotheses_expected_final_entropy(pa[0],[])-eg.hypotheses_expected_final_entropy(pa[1],[])
print eg.joint_expected_final_entropy(pa[0],[])-eg.hypotheses_expected_final_entropy(pa[1],[])



Exemplo n.º 6
0

import low_model as model
import world
import entropy_gains as eg

for action in world.possible_actions():
	print action, eg.hypotheses_expected_final_entropy(action,[])