Ejemplo n.º 1
0
next_states_377 = np.concatenate(
    (next_states_377_train, next_states_377_test),
    axis = 0)
actions_377 = np.concatenate(
    (actions_377_train, actions_377_test),
    axis = 0)
rewards_377 = np.concatenate(
    (rewards_377_train, rewards_377_test),
    axis = 0)

trajectories_377 = trajectories_377_train + trajectories_377_test

print '-------------------Evaluation for Q377--------------------------------'
# evaluation for q377
state_rewards_377 = ql.estimate_rewards(next_states_377_train, actions_377_train, rewards_377_train, action_q377)
discounted_rewards_377 = ql.discount_rewards(state_rewards_377, discount)
discounted_max_states_377 = ql.get_max_reward_states(discounted_rewards_377)
max_states_377 = ql.get_max_reward_states(state_rewards_377)
q377_policy = QLearnedPolicy(discounted_max_states_377[0], q377_labels)

print 'Reward of max state = {a}, discounted max state = {b}'.format(
    a = state_rewards_377[max_states_377[0]], b = state_rewards_377[discounted_max_states_377[0]])
print 'Discounted reward of max state = {a}, discounted max state = {b}'.format(
    a = discounted_rewards_377[max_states_377[0]], b = discounted_rewards_377[discounted_max_states_377[0]])
print 'Max state actions: {a} \nDiscounted max state actions: {b}'.format(
    a = q377_labels[np.array(max_states_377[0]).astype(int) == 1], b = q377_labels[np.array(discounted_max_states_377[0]).astype(int) == 1])

action_counts_377 = defaultdict(lambda: defaultdict(int))
for s,a in zip(states_377,actions_377):
    action_counts_377[tuple(s)][a] += 1
sample_policy_377 = SamplePolicy(action_counts_377)
Ejemplo n.º 2
0
next_states_315 = np.concatenate(
    (next_states_315_train, next_states_315_test),
    axis = 0)
actions_315 = np.concatenate(
    (actions_315_train, actions_315_test),
    axis = 0)
rewards_315 = np.concatenate(
    (rewards_315_train, rewards_315_test),
    axis = 0)

trajectories_315 = trajectories_315_train + trajectories_315_test

# evaluation for q315
print '-------------------Evaluation for Q315--------------------------------'
state_rewards_315 = ql.estimate_rewards(next_states_315_train, actions_315_train, rewards_315_train, action_q315)
discounted_rewards_315 = ql.discount_rewards(state_rewards_315, discount)
discounted_max_states_315 = ql.get_max_reward_states(discounted_rewards_315)
max_states_315 = ql.get_max_reward_states(state_rewards_315)
q315_policy = QLearnedPolicy(discounted_max_states_315[0], q315_labels)

print 'Reward of max state = {a}, discounted max state = {b}'.format(
    a = state_rewards_315[max_states_315[0]], b = state_rewards_315[discounted_max_states_315[0]])
print 'Discounted reward of max state = {a}, discounted max state = {b}'.format(
    a = discounted_rewards_315[max_states_315[0]], b = discounted_rewards_315[discounted_max_states_315[0]])
print 'Max state actions: {a} \nDiscounted max state actions: {b}'.format(
    a = q315_labels[np.array(max_states_315[0]).astype(int) == 1], b = q315_labels[np.array(discounted_max_states_315[0]).astype(int) == 1])

action_counts_315 = defaultdict(lambda: defaultdict(int))
for s,a in zip(states_315,actions_315):
    action_counts_315[tuple(s)][a] += 1
sample_policy_315 = SamplePolicy(action_counts_315)