예제 #1
0
파일: Run.py 프로젝트: carlsonrw/niceness
from Observer import *
from Agent import *
from Event import *

MyEvent = Event(options=[0, 1],
                agentvalue=[2.5, 2],
                agentbeliefs=[9, 3],
                recipientrewards=[4, 8])
MyAgent = Agent()
# Create an observer with an event and a mental model of the agent
MyObserver = Observer(MyEvent, MyAgent)
MyObserver.ToM(
    1
)  # Infers that agent really cares about own utilities and not about others
MyObserver.ToM(0)  # Uncertain.

# Same as above, but now make agent value option 0 a lot
MyEvent = Event(options=[0, 1],
                agentvalue=[10, 2],
                agentbeliefs=[9, 3],
                recipientrewards=[4, 8])
MyAgent = Agent()
MyObserver = Observer(MyEvent, MyAgent)
MyObserver.ToM(
    1
)  # In contrast to above, now less certain that agent is selfish (because option 0 is very valuable)
MyObserver.ToM(0)  # Should infer low selfishness and high altruism
예제 #2
0
파일: Run.py 프로젝트: carlsonrw/niceness
# Convert to numpy arrays.
actions = np.array(actions)
actor_rewards = np.array(actor_rewards)
actor_beliefs = np.array(actor_beliefs)
receiver_rewards = np.array(receiver_rewards)

# Instantiate a state, actor, and observer.
state = State(actions=actions,
              actor_rewards=actor_rewards,
              actor_beliefs=actor_beliefs,
              receiver_rewards=receiver_rewards)
actor = Actor()
observer = Observer(state, actor)

print("Actions:")
print(actions)
index = 0
action = actions[index]
print("Action:")
print(action)

# Run the sacrifice model.
print(observer.sacrifice(action))

# Run the utilitarian model.
print(observer.utilitarian(action, receiver_reward_values))

# Run the ToM model.
print(observer.ToM(index))