Example #1
0
    (actions_377_train, actions_377_test),
    axis = 0)
rewards_377 = np.concatenate(
    (rewards_377_train, rewards_377_test),
    axis = 0)

trajectories_377 = trajectories_377_train + trajectories_377_test

print '-------------------Evaluation for Q377--------------------------------'
# evaluation for q377
state_rewards_377 = ql.estimate_rewards(next_states_377_train, actions_377_train, rewards_377_train, action_q377)
discounted_rewards_377 = ql.discount_rewards(state_rewards_377, discount)
discounted_max_states_377 = ql.get_max_reward_states(discounted_rewards_377)
max_states_377 = ql.get_max_reward_states(state_rewards_377)
q377_policy = QLearnedPolicy(discounted_max_states_377[0], q377_labels)

print 'Reward of max state = {a}, discounted max state = {b}'.format(
    a = state_rewards_377[max_states_377[0]], b = state_rewards_377[discounted_max_states_377[0]])
print 'Discounted reward of max state = {a}, discounted max state = {b}'.format(
    a = discounted_rewards_377[max_states_377[0]], b = discounted_rewards_377[discounted_max_states_377[0]])
print 'Max state actions: {a} \nDiscounted max state actions: {b}'.format(
    a = q377_labels[np.array(max_states_377[0]).astype(int) == 1], b = q377_labels[np.array(discounted_max_states_377[0]).astype(int) == 1])

action_counts_377 = defaultdict(lambda: defaultdict(int))
for s,a in zip(states_377,actions_377):
    action_counts_377[tuple(s)][a] += 1
sample_policy_377 = SamplePolicy(action_counts_377)
expected_reward_377 = estimate_utility(sample_policy_377, q377_policy, trajectories_377_test, discount)
lower_bound_377 = hcope(sample_policy_377, q377_policy, trajectories_377_test, discount, delta)
print 'Expected Reward = {r}, Lower bound = {l}'.format(r = expected_reward_377, l = lower_bound_377)
print 'Expected Reward sample = {r}'.format(r = estimate_utility(sample_policy_377, sample_policy_377, trajectories_377_test, discount))
Example #2
0
    (actions_315_train, actions_315_test),
    axis = 0)
rewards_315 = np.concatenate(
    (rewards_315_train, rewards_315_test),
    axis = 0)

trajectories_315 = trajectories_315_train + trajectories_315_test

# evaluation for q315
print '-------------------Evaluation for Q315--------------------------------'
state_rewards_315 = ql.estimate_rewards(next_states_315_train, actions_315_train, rewards_315_train, action_q315)
discounted_rewards_315 = ql.discount_rewards(state_rewards_315, discount)
discounted_max_states_315 = ql.get_max_reward_states(discounted_rewards_315)
max_states_315 = ql.get_max_reward_states(state_rewards_315)
q315_policy = QLearnedPolicy(discounted_max_states_315[0], q315_labels)

print 'Reward of max state = {a}, discounted max state = {b}'.format(
    a = state_rewards_315[max_states_315[0]], b = state_rewards_315[discounted_max_states_315[0]])
print 'Discounted reward of max state = {a}, discounted max state = {b}'.format(
    a = discounted_rewards_315[max_states_315[0]], b = discounted_rewards_315[discounted_max_states_315[0]])
print 'Max state actions: {a} \nDiscounted max state actions: {b}'.format(
    a = q315_labels[np.array(max_states_315[0]).astype(int) == 1], b = q315_labels[np.array(discounted_max_states_315[0]).astype(int) == 1])

action_counts_315 = defaultdict(lambda: defaultdict(int))
for s,a in zip(states_315,actions_315):
    action_counts_315[tuple(s)][a] += 1
sample_policy_315 = SamplePolicy(action_counts_315)
expected_reward_315 = estimate_utility(sample_policy_315, q315_policy, trajectories_315_test, discount)
lower_bound_315 = hcope(sample_policy_315, q315_policy, trajectories_315_test, discount, delta)
print 'Expected Reward = {r}, Lower bound = {l}'.format(r = expected_reward_315, l = lower_bound_315)
print 'Expected Reward sample = {r}'.format(r = estimate_utility(sample_policy_315, sample_policy_315, trajectories_315_test, discount))