#sensible consummatory values. # "approach potential partner" will exhaust the opportunity after one turn. bb.action_elicitor[i_friends,i_friends]=0 # "meet friends" doesn't really get 'consumed' at all.. bb.action_elicitor[i_partner,i_partner]=1 #studying also doesn't get 'consumed'. bb.action_elicitor[i_study,i_study]=0 #neither is the need satiated very quickly. bb.action_state[i_study,i_study]=bb.action_state[i_study,i_study]/4 bb.display_current_state() while bb.actions[0].value==0: bb.step_and_display(1,2) #OK. In order to do copy we're going to have to copy the individual attributes. Otherwise we're currently capturing graph objects and things and that's causing problems. print("tendency increasing over time:") print([iter["actions"][0].tendency for iter in bb.record]) bb.step_and_display() #OK. Now, we introduce a potential partner, friends, and also a study environment bb.elicitors[i_friends].value=1 bb.elicitors[i_partner].value =1 bb.elicitors[i_study].value =1 bb.step_and_display() #next task:
#need to set up the relationships. bb.state_action[range(0,4),:]=np.diag([1.0]*4) bb.state_action[4,:]=[0.25,0.25,0.25,0.25] #should modify this study mood to be less. #and the action_elicitor links. bb.state_action[range(0,4),:]=np.diag([1.0]*4) bb.state_action[4,:]=[0.25,0.25,0.25,0.25] #how much does each action satiate the "mood" item? #this really ought to depend on the amount of reward signal and we don't have a way to do that in the existing model #am I going down the wrong track? #not necessarily...but it's not going to entirely make sense until we make the reward signal into an associationist model #can we do that now or are there other more important things to sort out first? ######################################################################################################################## bb.step_and_display() #model doesn't do anything: there's no environment elicitation. let's bring some food in. bb.elicitors[0].value=1.0 bb.step() bb.display_current_state() #still nothing...because there's negative association with the action, it's not done. #I'm not sure about including this reinforcement learning... #anyhow, let's show the model that eating has a smaller negative than positive value. bb.actions[0].neg_expectancy=0.5 bb.actions[0].neg_val=0.5 #bb.actions[1].pos_expectancy=2 #approaching a potential partner carries a high risk of rejection #while meeting friends carries very little negative but also lesser positive expected gain #partner