logging.info('loading "target_training_data"') tgt_training_dataset = TrainingDataset(params["target_training_data"]) st = time.process_time() tgt_training_dataset.load_queries() logging.info(f"load time: {time.process_time() - st}") tgt_model = YandexSlateModel(tgt_training_dataset) log_queries = load_logged_queries(params["test_data"]) slots = SlateSlots(MAX_POSITION) episodes = [] for qid, qs in sorted(log_queries.items(), key=lambda i: len(i[1]), reverse=True): log_query = qs[0] context = SlateContext(SlateQuery((qid, *(log_query.query_terms))), slots) log_item_rewards = log_training_dataset.predict_item( log_query.query_id, log_query.query_terms) log_item_probs = SlateItemProbabilities(log_item_rewards.values) tgt_item_rewards = tgt_model.item_rewards(context) tgt_item_probs = SlateItemProbabilities(tgt_item_rewards.values) gt_item_rewards = gt_model.item_rewards(context) metric = NDCGSlateMetric(gt_item_rewards) samples = [] for q in qs: slate = make_slate(slots, q.list) samples.append( LogSample( slate, slate.slot_values(gt_item_rewards),
def evalute_all( dataset: MSLRDatasets, slate_size: int, log_trainer: Trainer, tgt_trainer: Trainer, tgt_deterministic: bool, num_episodes: int, num_samples: int, ): print( f"Run: {log_trainer.name}, {tgt_trainer.name}" f"[{'deterministic' if tgt_deterministic else 'stochastic'}]", flush=True, ) logging.info("Preparing models and policies...") st = time.process_time() log_trainer.load_model( os.path.join(dataset.folder, log_trainer.name + "_anchor_url_features.pickle")) log_pred = log_trainer.predict(dataset.anchor_url_features) log_model = TrainedModel(log_pred.scores) log_policy = MSLRPolicy(log_pred.scores, False, 1.0) tgt_trainer.load_model( os.path.join(dataset.folder, tgt_trainer.name + "_body_features.pickle")) tgt_pred = tgt_trainer.predict(dataset.body_features) tgt_model = TrainedModel(tgt_pred.scores) tgt_policy = MSLRPolicy(tgt_pred.scores, tgt_deterministic, 1.0) dt = time.process_time() - st logging.info(f"Preparing models and policies done: {dt}s") logging.info("Generating log...") st = time.process_time() slots = SlateSlots(slate_size) queries = dataset.queries episodes = [] for q in queries: query = SlateQuery(q) items = SlateItems([SlateItem(i) for i in range(q[2].item())]) if len(items) < slate_size: logging.warning(f"Number of items ({len(items)}) less than " f"number of slots ({slate_size})") continue context = SlateContext(query, slots, items) log_item_probs = log_policy(context) log_item_rewards = log_model.item_rewards(context) tgt_item_probs = tgt_policy(context) metric = NDCGSlateMetric(log_item_rewards) samples = [] for _ in range(num_samples): slate = log_item_probs.sample_slate(slots) samples.append( LogSample(slate, slate.slot_values(log_item_rewards))) episodes.append( LogEpisode(context, metric, samples, None, log_item_probs, None, tgt_item_probs)) if len(episodes) >= num_episodes: break dt = time.process_time() - st logging.info(f"Generating log done: {len(episodes)} samples in {dt}s") input = SlateEstimatorInput(episodes, tgt_model, log_model) evaluate(DMEstimator(device=device), input)
def evaluate( experiments: Iterable[Tuple[Iterable[SlateEstimator], int]], log_dataset: TrainingDataset, log_distribution: RewardDistribution, tgt_dataset: TrainingDataset, tgt_distribution: RewardDistribution, log_queries: Sequence[TrainingQuery], slate_size: int, item_size: int, metric_func: str, max_num_workers: int, device=None, ): log_length = len(log_queries) slots = SlateSlots(slate_size) logging.info("Generating log...") st = time.perf_counter() tasks = [] total_samples = 0 for estimators, num_samples in experiments: samples = [] if num_samples * 10 > log_length: logging.warning(f"not enough log data, needs {num_samples * 10}") continue query_choices = np.random.choice(log_length, num_samples, replace=False) for i in query_choices: q = log_queries[i] context = SlateContext(SlateQuery((q.query_id, *(q.query_terms))), slots) url_relevances = q.url_relevances if len(url_relevances) > item_size: url_relevances = { k: v for k, v in sorted(url_relevances.items(), key=lambda item: item[1])[:item_size] } items = url_relevances.keys() log_item_rewards = log_dataset.item_relevances( q.query_id, q.query_terms, items) log_item_probs = log_distribution(log_item_rewards) tgt_item_rewards = tgt_dataset.item_relevances( q.query_id, q.query_terms, items) tgt_item_probs = tgt_distribution(tgt_item_rewards) tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots) gt_item_rewards = SlateItemValues(url_relevances) if metric_func == "dcg": metric = DCGSlateMetric(device=device) elif metric_func == "err": metric = ERRSlateMetric(4.0, device=device) else: metric = NDCGSlateMetric(gt_item_rewards, device=device) slot_weights = metric.slot_weights(slots) if tgt_item_probs.is_deterministic: tgt_slate_prob = 1.0 log_slate = tgt_item_probs.sample_slate(slots) else: tgt_slate_prob = float("nan") log_slate = log_item_probs.sample_slate(slots) log_slate_prob = log_item_probs.slate_probability(log_slate) log_rewards = log_slate.slot_values(gt_item_rewards) log_reward = metric.calculate_reward(slots, log_rewards, None, slot_weights) gt_slot_rewards = tgt_slot_expectation.expected_rewards( gt_item_rewards) gt_reward = metric.calculate_reward(slots, gt_slot_rewards, None, slot_weights) samples.append( LogSample( context, metric, log_slate, log_reward, log_slate_prob, None, log_item_probs, tgt_slate_prob, None, tgt_item_probs, gt_reward, slot_weights, )) total_samples += 1 tasks.append((estimators, SlateEstimatorInput(samples))) dt = time.perf_counter() - st logging.info(f"Generating log done: {total_samples} samples in {dt}s") logging.info("start evaluating...") st = time.perf_counter() evaluator = Evaluator(tasks, max_num_workers) Evaluator.report_results(evaluator.evaluate()) logging.info(f"evaluating done in {time.perf_counter() - st}s")
def evaluate( experiments: Iterable[Tuple[Iterable[SlateEstimator], int]], dataset: MSLRDatasets, slate_size: int, item_size: int, metric_func: str, log_trainer: Trainer, log_distribution: RewardDistribution, log_features: str, tgt_trainer: Trainer, tgt_distribution: RewardDistribution, tgt_features: str, dm_features: str, max_num_workers: int, device=None, ): assert slate_size < item_size print( f"Evaluate All:" f" slate_size={slate_size}, item_size={item_size}, metric={metric_func}" f", Log=[{log_trainer.name}, {log_distribution}, {log_features}]" f", Target=[{tgt_trainer.name}, {tgt_distribution}, {tgt_features}]" f", DM=[{dm_features}]" f", Workers={max_num_workers}, device={device}", flush=True, ) logging.info("Preparing models and policies...") st = time.perf_counter() log_trainer.load_model( os.path.join(dataset.folder, log_trainer.name + "_all_" + log_features + ".pickle")) # calculate behavior model scores log_pred = log_trainer.predict(getattr(dataset, log_features)) tgt_trainer.load_model( os.path.join(dataset.folder, tgt_trainer.name + "_all_" + tgt_features + ".pickle")) # calculate target model scores tgt_pred = tgt_trainer.predict(getattr(dataset, tgt_features)) dm_train_features = getattr(dataset, dm_features) slots = SlateSlots(slate_size) dt = time.perf_counter() - st logging.info(f"Preparing models and policies done: {dt}s") total_samples = 0 for _, num_samples in experiments: total_samples += num_samples logging.info(f"Generating log: total_samples={total_samples}") st = time.perf_counter() tasks = [] samples_generated = 0 total_queries = dataset.queries.shape[0] for estimators, num_samples in experiments: samples = [] for _ in range(num_samples): # randomly sample a query q = dataset.queries[random.randrange(total_queries)] doc_size = int(q[2]) if doc_size < item_size: # skip if number of docs is less than item_size continue si = int(q[1]) ei = si + doc_size # using top item_size docs for logging log_scores, item_choices = log_pred.scores[si:ei].sort( dim=0, descending=True) log_scores = log_scores[:item_size] item_choices = item_choices[:item_size] log_item_probs = log_distribution(SlateItemValues(log_scores)) tgt_scores = tgt_pred.scores[si:ei][item_choices].detach().clone() tgt_item_probs = tgt_distribution(SlateItemValues(tgt_scores)) tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots) gt_item_rewards = SlateItemValues( dataset.relevances[si:ei][item_choices]) gt_rewards = tgt_slot_expectation.expected_rewards(gt_item_rewards) if metric_func == "dcg": metric = DCGSlateMetric(device=device) elif metric_func == "err": metric = ERRSlateMetric(4.0, device=device) else: metric = NDCGSlateMetric(gt_item_rewards, device=device) query = SlateQuery((si, ei)) context = SlateContext(query, slots, item_choices) slot_weights = metric.slot_weights(slots) gt_reward = metric.calculate_reward(slots, gt_rewards, None, slot_weights) if tgt_item_probs.is_deterministic: tgt_slate_prob = 1.0 log_slate = tgt_item_probs.sample_slate(slots) log_reward = gt_reward else: tgt_slate_prob = float("nan") log_slate = log_item_probs.sample_slate(slots) log_rewards = log_slate.slot_values(gt_item_rewards) log_reward = metric.calculate_reward(slots, log_rewards, None, slot_weights) log_slate_prob = log_item_probs.slate_probability(log_slate) item_features = SlateItemFeatures( dm_train_features[si:ei][item_choices]) sample = LogSample( context, metric, log_slate, log_reward, log_slate_prob, None, log_item_probs, tgt_slate_prob, None, tgt_item_probs, gt_reward, slot_weights, None, item_features, ) samples.append(sample) samples_generated += 1 if samples_generated % 1000 == 0: logging.info( f" samples generated: {samples_generated}, {100 * samples_generated / total_samples:.1f}%" ) tasks.append((estimators, SlateEstimatorInput(samples))) dt = time.perf_counter() - st logging.info(f"Generating log done: {total_samples} samples in {dt}s") logging.info("start evaluating...") st = time.perf_counter() evaluator = Evaluator(tasks, max_num_workers) Evaluator.report_results(evaluator.evaluate()) logging.info(f"evaluating done in {time.perf_counter() - st}s")