Exemple #1
0
def run_dualdice_test(model_path: str, alpha: float):
    device = torch.device("cuda") if torch.cuda.is_available() else None
    logger.info(f"Device - {device}")
    model = torch.jit.load(model_path)
    model = model.dqn_with_preprocessor.model

    random_policy = RandomRLPolicy(ActionSpace(2))
    model_policy = PyTorchPolicy(ActionSpace(2), model)
    target_policy = ComboPolicy(
        ActionSpace(2), [0.7, 0.3], [model_policy, random_policy]
    )
    behavior_policy = ComboPolicy(
        ActionSpace(2),
        [0.55 + 0.15 * alpha, 0.45 - 0.15 * alpha],
        [model_policy, random_policy],
    )

    ground_truth = estimate_value(NUM_EPISODES, MAX_HORIZON, target_policy, GAMMA)
    log_policy_value = estimate_value(NUM_EPISODES, MAX_HORIZON, behavior_policy, GAMMA)
    trained_policy_value = estimate_value(
        NUM_EPISODES, MAX_HORIZON, model_policy, GAMMA
    )

    logger.info(f"Target Policy Ground Truth value: {ground_truth}")
    logger.info(f"Behavior Policy Ground Truth value: {log_policy_value}")
    logger.info(f"Model Policy Ground Truth value: {trained_policy_value}")

    log = generate_logs(NUM_EPISODES, MAX_HORIZON, behavior_policy)

    inp = RLEstimatorInput(
        gamma=GAMMA, log=log, target_policy=target_policy, discrete_states=False
    )
    ips = IPSEstimator()
    dualdice_losses = []
    dualdice_values = []
    dualdice = NeuralDualDICE(
        state_dim=4,
        action_dim=2,
        deterministic_env=True,
        average_next_v=False,
        value_lr=0.003,
        zeta_lr=0.003,
        batch_size=2048,
        reporting_frequency=1000,
        training_samples=100000,
        loss_callback_fn=zeta_nu_loss_callback(dualdice_losses, dualdice_values, inp),
        device=device,
    )

    ips_result = ips.evaluate(inp)
    dd_result = dualdice.evaluate(inp)

    return {
        "ips_estimate": ips_result,
        "dualdice_estimate": dd_result,
        "ground_truth": ground_truth,
        "dualdice_losses": dualdice_losses,
        "dualdice_estimates_per_epoch": dualdice_values,
    }
def rlestimator_input_to_edp(input: RLEstimatorInput,
                             num_actions: int) -> EvaluationDataPage:
    mdp_ids = []
    logged_propensities = []
    logged_rewards = []
    action_mask = []
    model_propensities = []
    model_values = []

    for _, mdps in input.log.items():
        for mdp in mdps:
            mdp_id = len(mdp_ids)
            for t in mdp:
                mdp_ids.append(mdp_id)
                logged_propensities.append(t.action_prob)
                logged_rewards.append(t.reward)
                assert t.action is not None
                action_mask.append([
                    1 if x == t.action.value else 0 for x in range(num_actions)
                ])
                assert t.last_state is not None
                model_propensities.append([
                    input.target_policy(t.last_state)[Action(x)]
                    for x in range(num_actions)
                ])
                assert input.value_function is not None
                model_values.append([
                    input.value_function(t.last_state, Action(x))
                    for x in range(num_actions)
                ])

    return EvaluationDataPage(
        mdp_id=torch.tensor(mdp_ids).reshape(len(mdp_ids), 1),
        logged_propensities=torch.tensor(logged_propensities).reshape(
            (len(logged_propensities), 1)),
        logged_rewards=torch.tensor(logged_rewards).reshape(
            (len(logged_rewards), 1)),
        action_mask=torch.tensor(action_mask),
        model_propensities=torch.tensor(model_propensities),
        model_values=torch.tensor(model_values),
        sequence_number=torch.tensor([]),
        model_rewards=torch.tensor([]),
        model_rewards_for_logged_action=torch.tensor([]),
    )
Exemple #3
0
    def edp_to_rl_input(edp: EvaluationDataPage,
                        gamma,
                        device=None) -> RLEstimatorInput:
        assert edp.model_values is not None
        eq_len = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
            edp.mdp_id,
            edp.action_mask.cpu().numpy(),
            edp.logged_rewards.cpu().numpy().flatten(),
            edp.logged_propensities.cpu().numpy().flatten(),
            edp.model_propensities.cpu().numpy(),
            edp.model_values.cpu().numpy(),
        )

        (
            actions,
            rewards,
            logged_propensities,
            target_propensities,
            estimated_q_values,
        ) = (torch.tensor(x,
                          dtype=torch.double,
                          device=device,
                          requires_grad=True) for x in eq_len)

        num_examples = logged_propensities.shape[0]
        horizon = logged_propensities.shape[1]

        log = {}
        for traj in range(num_examples):
            if State(0) not in log:
                log[State(0)] = []
            log[State(0)].append([
                Transition(
                    last_state=State((traj, i)),
                    action=torch.argmax(actions[traj, i]).item(),
                    action_prob=logged_propensities[traj, i].item(),
                    state=State((traj, i + 1)),
                    reward=rewards[traj, i].item(),
                ) for i in range(horizon - 1)
                if actions[traj, i][torch.argmax(actions[traj,
                                                         i]).item()] != 0.0
            ])

        return RLEstimatorInput(
            gamma=gamma,
            log=log,
            target_policy=SequentialOPEstimatorAdapter.EDPSeqPolicy(
                actions.shape[2], target_propensities),
            value_function=SequentialOPEstimatorAdapter.EDPValueFunc(
                estimated_q_values, target_propensities),
            ground_truth=None,
            horizon=horizon,
        )
Exemple #4
0
    def test_gridworld_sequential_adapter(self):
        """
        Create a gridworld environment, logging policy, and target policy
        Evaluates target policy using the direct OPE sequential doubly robust estimator,
        then transforms the log into an evaluation data page which is passed to the ope adapter.

        This test is meant to verify the adaptation of EDPs into RLEstimatorInputs as employed
        by ReAgent since ReAgent provides EDPs to Evaluators. Going from EDP -> RLEstimatorInput
        is more involved than RLEstimatorInput -> EDP since the EDP does not store the state
        at each timestep in each MDP, only the corresponding logged outputs & model outputs.
        Thus, the adapter must do some tricks to represent these timesteps as states so the
        ope module can extract the correct outputs.

        Note that there is some randomness in the model outputs since the model is purposefully
        noisy. However, the same target policy is being evaluated on the same logged walks through
        the gridworld, so the two results should be close in value (within 1).

        """
        random.seed(0)
        np.random.seed(0)
        torch.random.manual_seed(0)

        device = torch.device("cuda") if torch.cuda.is_available() else None

        gridworld = GridWorld.from_grid(
            [
                ["s", "0", "0", "0", "0"],
                ["0", "0", "0", "W", "0"],
                ["0", "0", "0", "0", "0"],
                ["0", "W", "0", "0", "0"],
                ["0", "0", "0", "0", "g"],
            ],
            max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
        )

        action_space = ActionSpace(4)
        opt_policy = TabularPolicy(action_space)
        trainer = DPTrainer(gridworld, opt_policy)
        value_func = trainer.train(gamma=TestOPEModuleAlgs.GAMMA)

        behavivor_policy = RandomRLPolicy(action_space)
        target_policy = EpsilonGreedyRLPolicy(opt_policy,
                                              TestOPEModuleAlgs.NOISE_EPSILON)
        model = NoiseGridWorldModel(
            gridworld,
            action_space,
            epsilon=TestOPEModuleAlgs.NOISE_EPSILON,
            max_horizon=TestOPEModuleAlgs.MAX_HORIZON,
        )
        value_func = DPValueFunction(target_policy, model,
                                     TestOPEModuleAlgs.GAMMA)
        ground_truth = DPValueFunction(target_policy, gridworld,
                                       TestOPEModuleAlgs.GAMMA)

        log = []
        log_generator = PolicyLogGenerator(gridworld, behavivor_policy)
        num_episodes = TestOPEModuleAlgs.EPISODES
        for state in gridworld.states:
            for _ in range(num_episodes):
                log.append(log_generator.generate_log(state))

        estimator_input = RLEstimatorInput(
            gamma=TestOPEModuleAlgs.GAMMA,
            log=log,
            target_policy=target_policy,
            value_function=value_func,
            ground_truth=ground_truth,
        )

        edp = rlestimator_input_to_edp(estimator_input,
                                       len(model.action_space))

        dr_estimator = SeqDREstimator(weight_clamper=None,
                                      weighted=False,
                                      device=device)

        module_results = SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate(
            dr_estimator.evaluate(estimator_input))
        adapter_results = SequentialOPEstimatorAdapter(
            dr_estimator, TestOPEModuleAlgs.GAMMA, device=device).estimate(edp)

        self.assertAlmostEqual(
            adapter_results.raw,
            module_results.raw,
            delta=TestOPEModuleAlgs.CPE_PASS_BAR,
        ), f"OPE adapter results differed too much from underlying module (Diff: {abs(adapter_results.raw - module_results.raw)} > {TestOPEModuleAlgs.CPE_PASS_BAR})"
        self.assertLess(
            adapter_results.raw, TestOPEModuleAlgs.CPE_MAX_VALUE
        ), f"OPE adapter results are too large ({adapter_results.raw} > {TestOPEModuleAlgs.CPE_MAX_VALUE})"
Exemple #5
0
                 f"{gridworld.dump_value_func(ground_truth)}")

    log = {}
    log_generator = PolicyLogGenerator(gridworld, behavivor_policy)
    num_episodes = 200
    for state in gridworld.states:
        mdps = []
        for _ in range(num_episodes):
            mdps.append(log_generator.generate_log(state))
        log[state] = mdps
        logging.info(f"Generated {len(mdps)} logs for {state}")

    estimator_input = RLEstimatorInput(
        gamma=GAMMA,
        log=log,
        target_policy=target_policy,
        value_function=value_func,
        ground_truth=ground_truth,
    )

    DMEstimator(device=device).evaluate(estimator_input)

    IPSEstimator(weight_clamper=None, weighted=False,
                 device=device).evaluate(estimator_input)
    IPSEstimator(weight_clamper=None, weighted=True,
                 device=device).evaluate(estimator_input)

    DoublyRobustEstimator(weight_clamper=None, weighted=False,
                          device=device).evaluate(estimator_input)
    DoublyRobustEstimator(weight_clamper=None, weighted=True,
                          device=device).evaluate(estimator_input)