示例#1
0
    def __init__(self,
                 action_names,
                 gamma,
                 model,
                 metrics_to_score=None,
                 device=None) -> None:
        super().__init__(action_names, gamma, model, metrics_to_score)

        self._device = device
        self.ope_dm_estimator = OPEstimatorAdapter(
            DMEstimator(device=self._device))
        self.ope_ips_estimator = OPEstimatorAdapter(
            IPSEstimator(device=self._device))
        self.ope_dr_estimator = OPEstimatorAdapter(
            DoublyRobustEstimator(device=self._device))

        self.ope_seq_dr_estimator = SequentialOPEstimatorAdapter(
            SeqDREstimator(device=self._device), gamma, device=self._device)
        self.ope_seq_weighted_dr_estimator = SequentialOPEstimatorAdapter(
            SeqDREstimator(weighted=True, device=self._device),
            gamma,
            device=self._device,
        )
        self.ope_seq_magic_estimator = SequentialOPEstimatorAdapter(
            MAGICEstimator(device=self._device), gamma)
 def test_switch_dr_equal_to_dm(self):
     """
     Switch-DR with tau set at the min value should be equal to DM
     """
     # Setting candidates to 0 will default to tau being the minimum threshold
     SwitchEstimator.CANDIDATES = 0
     switch = SwitchDREstimator(rmax=1.0).evaluate(self.bandit_input)
     dm = DMEstimator().evaluate(self.bandit_input)
     self.assertAlmostEqual(dm.estimated_reward, switch.estimated_reward)
示例#3
0
    def __init__(self,
                 action_names,
                 gamma,
                 model,
                 metrics_to_score=None,
                 device=None) -> None:
        super().__init__(action_names, gamma, model, metrics_to_score)

        self._device = device
        self.ope_dm_estimator = OPEstimatorAdapter(
            DMEstimator(device=self._device))
        self.ope_ips_estimator = OPEstimatorAdapter(
            IPSEstimator(device=self._device))
        self.ope_dr_estimator = OPEstimatorAdapter(
            DoublyRobustEstimator(device=self._device))
示例#4
0
    def test_seq2slate_eval_data_page(self):
        """
        Create 3 slate ranking logs and evaluate using Direct Method, Inverse
        Propensity Scores, and Doubly Robust.

        The logs are as follows:
        state: [1, 0, 0], [0, 1, 0], [0, 0, 1]
        indices in logged slates: [3, 2], [3, 2], [3, 2]
        model output indices: [2, 3], [3, 2], [2, 3]
        logged reward: 4, 5, 7
        logged propensities: 0.2, 0.5, 0.4
        predicted rewards on logged slates: 2, 4, 6
        predicted rewards on model outputted slates: 1, 4, 5
        predicted propensities: 0.4, 0.3, 0.7

        When eval_greedy=True:

        Direct Method uses the predicted rewards on model outputted slates.
        Thus the result is expected to be (1 + 4 + 5) / 3

        Inverse Propensity Scores would scale the reward by 1.0 / logged propensities
        whenever the model output slate matches with the logged slate.
        Since only the second log matches with the model output, the IPS result
        is expected to be 5 / 0.5 / 3

        Doubly Robust is the sum of the direct method result and propensity-scaled
        reward difference; the latter is defined as:
        1.0 / logged_propensities * (logged reward - predicted reward on logged slate)
         * Indicator(model slate == logged slate)
        Since only the second logged slate matches with the model outputted slate,
        the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3


        When eval_greedy=False:

        Only Inverse Propensity Scores would be accurate. Because it would be too
        expensive to compute all possible slates' propensities and predicted rewards
        for Direct Method.

        The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3
        """
        batch_size = 3
        state_dim = 3
        src_seq_len = 2
        tgt_seq_len = 2
        candidate_dim = 2

        reward_net = FakeSeq2SlateRewardNetwork()
        seq2slate_net = FakeSeq2SlateTransformerNet()

        src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1)
        tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]])
        tgt_out_seq = src_seq[
            torch.arange(batch_size).repeat_interleave(tgt_seq_len),
            tgt_out_idx.flatten() - 2, ].reshape(batch_size, tgt_seq_len,
                                                 candidate_dim)

        ptb = rlt.PreprocessedTrainingBatch(
            training_input=rlt.PreprocessedRankingInput(
                state=rlt.FeatureData(float_features=torch.eye(state_dim)),
                src_seq=rlt.FeatureData(float_features=src_seq),
                tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
                src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len),
                tgt_out_idx=tgt_out_idx,
                tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]),
                slate_reward=torch.tensor([4.0, 5.0, 7.0]),
            ),
            extras=rlt.ExtraData(
                sequence_number=torch.tensor([0, 0, 0]),
                mdp_id=np.array(["0", "1", "2"]),
            ),
        )

        edp = EvaluationDataPage.create_from_tensors_seq2slate(
            seq2slate_net, reward_net, ptb.training_input, eval_greedy=True)
        logger.info(
            "---------- Start evaluating eval_greedy=True -----------------")
        doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
        dm_estimator = OPEstimatorAdapter(DMEstimator())
        ips_estimator = OPEstimatorAdapter(IPSEstimator())
        switch_estimator = OPEstimatorAdapter(SwitchEstimator())
        switch_dr_estimator = OPEstimatorAdapter(SwitchDREstimator())

        doubly_robust = doubly_robust_estimator.estimate(edp)
        inverse_propensity = ips_estimator.estimate(edp)
        direct_method = dm_estimator.estimate(edp)

        # Verify that Switch with low exponent is equivalent to IPS
        switch_ips = switch_estimator.estimate(edp, exp_base=1)
        # Verify that Switch with no candidates is equivalent to DM
        switch_dm = switch_estimator.estimate(edp, candidates=0)
        # Verify that SwitchDR with low exponent is equivalent to DR
        switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1)
        # Verify that SwitchDR with no candidates is equivalent to DM
        switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0)

        logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}")

        avg_logged_reward = (4 + 5 + 7) / 3
        self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6)
        self.assertAlmostEqual(direct_method.normalized,
                               direct_method.raw / avg_logged_reward,
                               delta=1e-6)
        self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6)
        self.assertAlmostEqual(
            inverse_propensity.normalized,
            inverse_propensity.raw / avg_logged_reward,
            delta=1e-6,
        )
        self.assertAlmostEqual(doubly_robust.raw,
                               direct_method.raw + 1 / 0.5 * (5 - 4) / 3,
                               delta=1e-6)
        self.assertAlmostEqual(doubly_robust.normalized,
                               doubly_robust.raw / avg_logged_reward,
                               delta=1e-6)
        self.assertAlmostEqual(switch_ips.raw,
                               inverse_propensity.raw,
                               delta=1e-6)
        self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6)
        self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6)
        self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6)
        logger.info(
            "---------- Finish evaluating eval_greedy=True -----------------")

        logger.info(
            "---------- Start evaluating eval_greedy=False -----------------")
        edp = EvaluationDataPage.create_from_tensors_seq2slate(
            seq2slate_net, reward_net, ptb.training_input, eval_greedy=False)
        doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
        dm_estimator = OPEstimatorAdapter(DMEstimator())
        ips_estimator = OPEstimatorAdapter(IPSEstimator())

        doubly_robust = doubly_robust_estimator.estimate(edp)
        inverse_propensity = ips_estimator.estimate(edp)
        direct_method = dm_estimator.estimate(edp)
        self.assertAlmostEqual(
            inverse_propensity.raw,
            (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3,
            delta=1e-6,
        )
        self.assertAlmostEqual(
            inverse_propensity.normalized,
            inverse_propensity.raw / avg_logged_reward,
            delta=1e-6,
        )
        logger.info(
            "---------- Finish evaluating eval_greedy=False -----------------")
示例#5
0
        description="Read command line parameters.")
    parser.add_argument("-p", "--parameters", help="Path to config file.")
    args = parser.parse_args(sys.argv[1:])

    with open(args.parameters, "r") as f:
        params = json.load(f)

    if "dataset" not in params:
        raise Exception('Please define "dataset" in config file')

    random.seed(1234)
    np.random.seed(1234)
    torch.random.manual_seed(1234)

    dataset = UCIMultiClassDataset(params["dataset"])
    log_trainer = LogisticRegressionTrainer()
    log_epsilon = 0.1
    tgt_trainer = SGDClassifierTrainer()
    tgt_epsilon = 0.1
    dm_trainer = DecisionTreeTrainer()
    experiments = [(
        (
            DMEstimator(DecisionTreeTrainer()),
            IPSEstimator(),
            DoublyRobustEstimator(DecisionTreeTrainer()),
        ),
        1000,
    ) for _ in range(100)]
    evaluate_all(experiments, dataset, log_trainer, log_epsilon, tgt_trainer,
                 tgt_epsilon, 0)
示例#6
0
    logs = []
    for i in range(num_epsidoes):
        train_choices = random.sample(range(num_total_samples), num_sample)
        samples = []
        for i in train_choices:
            context = MultiClassContext(i)
            logged_action, logged_dist = log_policy(context)
            logged_reward = log_model(context)[logged_action]
            target_action, target_dist = target_policy(context)
            samples.append(
                LogSample(
                    context,
                    logged_action,
                    logged_dist,
                    logged_reward,
                    target_action,
                    target_dist,
                ))
        logs.append(Log(samples))

    input = BanditsEstimatorInput(action_space, logs, target_model, gt_model)

    result = DMEstimator().evaluate(input)
    logging.info(f"DM result: {result}")

    result = IPSEstimator().evaluate(input)
    logging.info(f"IPS result: {result}")

    result = DoublyRobustEstimator().evaluate(input)
    logging.info(f"DR result: {result}")