def test_slate_slot_item_probabilities(self): probs = SlateSlotItemProbabilities( [SlateItemValues(vs) for vs in self._slot_item_relevances]) slate = probs.sample_slate(self._slots) slate_prob = probs.slate_probability(slate) self.assertAlmostEqual(slate_prob, 0.02139037) slot_item_expectations = probs.slot_item_expectations() slot_rewards = slot_item_expectations.expected_rewards( SlateItemValues(self._item_rewards)) diff = slot_rewards.values - torch.tensor([1.81818, 2.51352, 7.36929]) self.assertAlmostEqual(diff.sum().item(), 0, places=5)
def test_metrics(self): dcg = DCGSlateMetric() ndcg = NDCGSlateMetric(SlateItemValues([1.0, 2.5, 2.0, 3.0, 1.5, 0.0])) item_rewards = SlateItemValues([2.0, 1.0, 0.0, 3.0, 1.5, 2.5]) slate = Slate([SlateItem(1), SlateItem(3), SlateItem(2)]) reward = dcg(slate.slots, slate.slot_values(item_rewards)) self.assertAlmostEqual(reward, 5.416508275) reward = ndcg(slate.slots, slate.slot_values(item_rewards)) self.assertAlmostEqual(reward, 0.473547669) slate = Slate([SlateItem(5), SlateItem(0), SlateItem(4)]) reward = dcg(slate.slots, slate.slot_values(item_rewards)) self.assertAlmostEqual(reward, 7.463857073) reward = ndcg(slate.slots, slate.slot_values(item_rewards)) self.assertAlmostEqual(reward, 0.652540703)
def test_slate_slot_item_probabilities(self): probs = SlateSlotItemProbabilities( [SlateItemValues(vs) for vs in self._slot_item_relevances]) slate = probs.sample_slate(self._slots) slate_prob = probs.slate_probability(slate) self.assertAlmostEqual(slate_prob, 0.02139037) slot_item_expectations = probs.slot_item_expectations() slot_rewards = slot_item_expectations.expected_rewards( SlateItemValues(self._item_rewards)) diff = slot_rewards.values - torch.tensor([1.818, 2.449, 4.353]) self.assertAlmostEqual(diff.sum().item(), 0, places=5) for d in slot_item_expectations.items: sum = reduce(lambda a, b: a + b, d.values) self.assertAlmostEqual(sum.item(), 1.0)
def item_relevances(self, query_id: int, query_terms: Tuple[int], items: Iterable[Tuple[int, int]]) -> SlateItemValues: self._process_training_queries() if query_id in self._query_ids: q = self._query_ids[query_id] rels = q.url_relevances else: ras = {} for t in query_terms: if t in self._query_terms: q = self._query_terms[t] for i, r in q.url_relevances: if i in ras: ra = ras[i] else: ra = RunningAverage() ras[i] = ra ra.add(r) rels = {i: r.average for i, r in ras.items()} item_rels = {} for i in items: if i in rels: item_rels[i] = rels[i] else: item_rels[i] = 0.0 return SlateItemValues(item_rels)
def predict_item(self, query_id: int, query_terms: Tuple[int]) -> SlateItemValues: self._process_training_queries() if query_id in self._query_ids: q = self._query_ids[query_id] return SlateItemValues(dict(q.url_relevances.items())) else: rels = {} for t in query_terms: q = self._query_terms[t] for i, r in q.url_relevances: if i in rels: ra = rels[i] else: ra = RunningAverage() ra.add(r) return SlateItemValues({i: r.average for i, r in rels.items()})
def test_slate_item_probabilities(self): probs = SlateItemProbabilities(self._item_relevances) slate = probs.sample_slate(self._slots) slate_prob = probs.slate_probability(slate) self.assertAlmostEqual(slate_prob, 0.017825312) slot_item_expectations = probs.slot_item_expectations(self._slots) slot_rewards = slot_item_expectations.expected_rewards( SlateItemValues(self._item_rewards)) diff = slot_rewards.values - torch.tensor([1.81818, 2.13736, 2.66197]) self.assertAlmostEqual(diff.sum().item(), 0, places=5)
def evaluate( experiments: Iterable[Tuple[Iterable[SlateEstimator], int]], log_dataset: TrainingDataset, log_distribution: RewardDistribution, tgt_dataset: TrainingDataset, tgt_distribution: RewardDistribution, log_queries: Sequence[TrainingQuery], slate_size: int, item_size: int, metric_func: str, max_num_workers: int, device=None, ): log_length = len(log_queries) slots = SlateSlots(slate_size) logging.info("Generating log...") st = time.perf_counter() tasks = [] total_samples = 0 for estimators, num_samples in experiments: samples = [] if num_samples * 10 > log_length: logging.warning(f"not enough log data, needs {num_samples * 10}") continue query_choices = np.random.choice(log_length, num_samples, replace=False) for i in query_choices: q = log_queries[i] context = SlateContext(SlateQuery((q.query_id, *(q.query_terms))), slots) url_relevances = q.url_relevances if len(url_relevances) > item_size: url_relevances = { k: v for k, v in sorted(url_relevances.items(), key=lambda item: item[1])[:item_size] } items = url_relevances.keys() log_item_rewards = log_dataset.item_relevances( q.query_id, q.query_terms, items) log_item_probs = log_distribution(log_item_rewards) tgt_item_rewards = tgt_dataset.item_relevances( q.query_id, q.query_terms, items) tgt_item_probs = tgt_distribution(tgt_item_rewards) tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots) gt_item_rewards = SlateItemValues(url_relevances) if metric_func == "dcg": metric = DCGSlateMetric(device=device) elif metric_func == "err": metric = ERRSlateMetric(4.0, device=device) else: metric = NDCGSlateMetric(gt_item_rewards, device=device) slot_weights = metric.slot_weights(slots) if tgt_item_probs.is_deterministic: tgt_slate_prob = 1.0 log_slate = tgt_item_probs.sample_slate(slots) else: tgt_slate_prob = float("nan") log_slate = log_item_probs.sample_slate(slots) log_slate_prob = log_item_probs.slate_probability(log_slate) log_rewards = log_slate.slot_values(gt_item_rewards) log_reward = metric.calculate_reward(slots, log_rewards, None, slot_weights) gt_slot_rewards = tgt_slot_expectation.expected_rewards( gt_item_rewards) gt_reward = metric.calculate_reward(slots, gt_slot_rewards, None, slot_weights) samples.append( LogSample( context, metric, log_slate, log_reward, log_slate_prob, None, log_item_probs, tgt_slate_prob, None, tgt_item_probs, gt_reward, slot_weights, )) total_samples += 1 tasks.append((estimators, SlateEstimatorInput(samples))) dt = time.perf_counter() - st logging.info(f"Generating log done: {total_samples} samples in {dt}s") logging.info("start evaluating...") st = time.perf_counter() evaluator = Evaluator(tasks, max_num_workers) Evaluator.report_results(evaluator.evaluate()) logging.info(f"evaluating done in {time.perf_counter() - st}s")
def item_rewards(self, context: SlateContext) -> SlateItemValues: qv = context.query.value doc_rewards = self._relevances[qv[1]:(qv[1] + qv[2])] return SlateItemValues(doc_rewards)
def item_rewards(self, context: SlateContext) -> SlateItemValues: qv = context.query.value item_rewards = self._relevances[qv[1]:(qv[1] + qv[2])].detach().clone() return SlateItemValues(item_rewards)
def evaluate( experiments: Iterable[Tuple[Iterable[SlateEstimator], int]], dataset: MSLRDatasets, slate_size: int, item_size: int, metric_func: str, log_trainer: Trainer, log_distribution: RewardDistribution, log_features: str, tgt_trainer: Trainer, tgt_distribution: RewardDistribution, tgt_features: str, dm_features: str, max_num_workers: int, device=None, ): assert slate_size < item_size print( f"Evaluate All:" f" slate_size={slate_size}, item_size={item_size}, metric={metric_func}" f", Log=[{log_trainer.name}, {log_distribution}, {log_features}]" f", Target=[{tgt_trainer.name}, {tgt_distribution}, {tgt_features}]" f", DM=[{dm_features}]" f", Workers={max_num_workers}, device={device}", flush=True, ) logging.info("Preparing models and policies...") st = time.perf_counter() log_trainer.load_model( os.path.join(dataset.folder, log_trainer.name + "_all_" + log_features + ".pickle")) # calculate behavior model scores log_pred = log_trainer.predict(getattr(dataset, log_features)) tgt_trainer.load_model( os.path.join(dataset.folder, tgt_trainer.name + "_all_" + tgt_features + ".pickle")) # calculate target model scores tgt_pred = tgt_trainer.predict(getattr(dataset, tgt_features)) dm_train_features = getattr(dataset, dm_features) slots = SlateSlots(slate_size) dt = time.perf_counter() - st logging.info(f"Preparing models and policies done: {dt}s") total_samples = 0 for _, num_samples in experiments: total_samples += num_samples logging.info(f"Generating log: total_samples={total_samples}") st = time.perf_counter() tasks = [] samples_generated = 0 total_queries = dataset.queries.shape[0] for estimators, num_samples in experiments: samples = [] for _ in range(num_samples): # randomly sample a query q = dataset.queries[random.randrange(total_queries)] doc_size = int(q[2]) if doc_size < item_size: # skip if number of docs is less than item_size continue si = int(q[1]) ei = si + doc_size # using top item_size docs for logging log_scores, item_choices = log_pred.scores[si:ei].sort( dim=0, descending=True) log_scores = log_scores[:item_size] item_choices = item_choices[:item_size] log_item_probs = log_distribution(SlateItemValues(log_scores)) tgt_scores = tgt_pred.scores[si:ei][item_choices].detach().clone() tgt_item_probs = tgt_distribution(SlateItemValues(tgt_scores)) tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots) gt_item_rewards = SlateItemValues( dataset.relevances[si:ei][item_choices]) gt_rewards = tgt_slot_expectation.expected_rewards(gt_item_rewards) if metric_func == "dcg": metric = DCGSlateMetric(device=device) elif metric_func == "err": metric = ERRSlateMetric(4.0, device=device) else: metric = NDCGSlateMetric(gt_item_rewards, device=device) query = SlateQuery((si, ei)) context = SlateContext(query, slots, item_choices) slot_weights = metric.slot_weights(slots) gt_reward = metric.calculate_reward(slots, gt_rewards, None, slot_weights) if tgt_item_probs.is_deterministic: tgt_slate_prob = 1.0 log_slate = tgt_item_probs.sample_slate(slots) log_reward = gt_reward else: tgt_slate_prob = float("nan") log_slate = log_item_probs.sample_slate(slots) log_rewards = log_slate.slot_values(gt_item_rewards) log_reward = metric.calculate_reward(slots, log_rewards, None, slot_weights) log_slate_prob = log_item_probs.slate_probability(log_slate) item_features = SlateItemFeatures( dm_train_features[si:ei][item_choices]) sample = LogSample( context, metric, log_slate, log_reward, log_slate_prob, None, log_item_probs, tgt_slate_prob, None, tgt_item_probs, gt_reward, slot_weights, None, item_features, ) samples.append(sample) samples_generated += 1 if samples_generated % 1000 == 0: logging.info( f" samples generated: {samples_generated}, {100 * samples_generated / total_samples:.1f}%" ) tasks.append((estimators, SlateEstimatorInput(samples))) dt = time.perf_counter() - st logging.info(f"Generating log done: {total_samples} samples in {dt}s") logging.info("start evaluating...") st = time.perf_counter() evaluator = Evaluator(tasks, max_num_workers) Evaluator.report_results(evaluator.evaluate()) logging.info(f"evaluating done in {time.perf_counter() - st}s")
def item_rewards(self, context: SlateContext) -> SlateItemValues: return SlateItemValues(self.item_relevances(context))