Exemple #1
0
    (actions_377_train, actions_377_test),
    axis = 0)
rewards_377 = np.concatenate(
    (rewards_377_train, rewards_377_test),
    axis = 0)

trajectories_377 = trajectories_377_train + trajectories_377_test

print '-------------------Evaluation for Q377--------------------------------'
# evaluation for q377
state_rewards_377 = ql.estimate_rewards(next_states_377_train, actions_377_train, rewards_377_train, action_q377)
discounted_rewards_377 = ql.discount_rewards(state_rewards_377, discount)
discounted_max_states_377 = ql.get_max_reward_states(discounted_rewards_377)
max_states_377 = ql.get_max_reward_states(state_rewards_377)
q377_policy = QLearnedPolicy(discounted_max_states_377[0], q377_labels)

print 'Reward of max state = {a}, discounted max state = {b}'.format(
    a = state_rewards_377[max_states_377[0]], b = state_rewards_377[discounted_max_states_377[0]])
print 'Discounted reward of max state = {a}, discounted max state = {b}'.format(
    a = discounted_rewards_377[max_states_377[0]], b = discounted_rewards_377[discounted_max_states_377[0]])
print 'Max state actions: {a} \nDiscounted max state actions: {b}'.format(
    a = q377_labels[np.array(max_states_377[0]).astype(int) == 1], b = q377_labels[np.array(discounted_max_states_377[0]).astype(int) == 1])

action_counts_377 = defaultdict(lambda: defaultdict(int))
for s,a in zip(states_377,actions_377):
    action_counts_377[tuple(s)][a] += 1
sample_policy_377 = SamplePolicy(action_counts_377)
expected_reward_377 = estimate_utility(sample_policy_377, q377_policy, trajectories_377_test, discount)
lower_bound_377 = hcope(sample_policy_377, q377_policy, trajectories_377_test, discount, delta)
print 'Expected Reward = {r}, Lower bound = {l}'.format(r = expected_reward_377, l = lower_bound_377)
print 'Expected Reward sample = {r}'.format(r = estimate_utility(sample_policy_377, sample_policy_377, trajectories_377_test, discount))