def test_validator_init_bad_metrics(): with pytest.raises(TypeError, match='Argument: metrics must be a list.'): Validator(model, dataloader, 'metrics', objectives) with pytest.raises(TypeError, match='All elements of argument: metrics' + ' must be of type MetricAtK.'): Validator(model, dataloader, ['metric1', RecallAtK(2)], objectives)
def create_metric_list(k, revenue): return [ RecallAtK(k), RevenueAtK(k, revenue), DiversityAtK(k, diversity_vector), HitRatioAtK(k), NDCGAtK(k), PrecisionAtK(k) ]
def test_validator_mock_opposite_model(): mock_dataset = MamoDataset(input_data, input_data.copy()) mock_dataloader = DataLoader(mock_dataset, batch_size=1, shuffle=False, drop_last=False) v_opposite = Validator(MockOpposite(), mock_dataloader, [RecallAtK(1)], [MSELoss()]) results = v_opposite.evaluate() assert isinstance(results, tuple) assert isinstance(results[0], list) assert (results[0][0] == 0) assert isinstance(results[1], list) assert (results[1][0] == 1) assert (v_opposite.combine_objectives(results[1]) == 1)
def test_validator_mock_shift_right_by_one_model(): mock_dataset = MamoDataset(input_data, np.roll(input_data.copy(), shift=1, axis=1)) mock_dataloader = DataLoader(mock_dataset, batch_size=1, shuffle=False, drop_last=False) v_shift_right = Validator(MockShiftRightByOne(), mock_dataloader, [RecallAtK(1)], [MSELoss()]) results = v_shift_right.evaluate() assert isinstance(results, tuple) assert isinstance(results[0], list) assert (results[0][0] == 1) assert isinstance(results[1], list) assert (results[1][0] == 0) assert (v_shift_right.combine_objectives(results[1]) == 0)
def test_recall_evaluate_correct_cases(): y_pred = torch.from_numpy(np.ones((2, 3))) y_true = torch.from_numpy(np.ones((2, 3))) recall = RecallAtK(k) assert (recall.evaluate(y_true, y_pred) == 1.0) y_pred = torch.from_numpy(np.array([[0.1, 0.4, 0.2], [0.5, 0.1, 0.7]])) y_true = torch.from_numpy(np.array([[1, 0, 1], [1, 1, 0]])) assert (recall.evaluate(y_true, y_pred) == 0.5) y_true = torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]])) assert (recall.evaluate(y_true, y_pred) == 0.0) y_pred = torch.from_numpy(np.array([[0.1, 0, 0.2], [0, 0.1, 0.7]])) assert (recall.evaluate(y_true, y_pred) == 1)
def test_validator_mock_no_change_model(): mock_dataset = MamoDataset(input_data, input_data.copy()) mock_dataloader = DataLoader(mock_dataset, batch_size=1, shuffle=False, drop_last=False) v_no_change = Validator(MockNoChange(), mock_dataloader, [RecallAtK(1)], [MSELoss()]) results = v_no_change.evaluate() assert isinstance(results, tuple) assert isinstance(results[0], list) assert (results[0][0] == 0) assert isinstance(results[1], list) # Removing chosen elements -so: mse = np.mean(input_data) assert (round(results[1][0], 2) == round(mse, 2)) assert (round(v_no_change.combine_objectives(results[1]), 2) == round(mse, 2))
def test_validator_mock_all_zeros_model(): mock_dataset = MamoDataset(input_data, input_data.copy()) mock_dataloader = DataLoader(mock_dataset, batch_size=1, shuffle=False, drop_last=False) v_all_zeros = Validator(MockAllZeros(), mock_dataloader, [RecallAtK(1)], None) results = v_all_zeros.evaluate() assert isinstance(results, tuple) assert isinstance(results[0], list) assert (results[0][0] == 0) assert isinstance(results[1], list) assert (results[1] == []) v_all_zeros = Validator(MockAllZeros(), mock_dataloader, None, [MSELoss()]) results = v_all_zeros.evaluate() assert isinstance(results, tuple) assert isinstance(results[0], list) assert (results[0] == []) assert isinstance(results[1], list) mse = np.mean(input_data) assert (round(results[1][0], 2) == round(mse, 2)) assert (round(v_all_zeros.combine_objectives(results[1]), 2) == round(mse, 2))
test_output_data_path = os.path.join(dir_path, 'movielens_small_test_test.npy') products_data_path = os.path.join(dir_path, 'movielens_products_data.npy') data_handler = AEDataHandler('MovieLensSmall', train_data_path, validation_input_data_path, validation_output_data_path, test_input_data_path, test_output_data_path) input_dim = data_handler.get_input_dim() output_dim = data_handler.get_output_dim() products_data_np = np.load(products_data_path) products_data_torch = torch.tensor(products_data_np, dtype=torch.float32).to(device) # create model model = MultiVAE(params='yaml_files/params_multi_VAE_training.yaml') correctness_loss = VAELoss() revenue_loss = VAELoss(weighted_vector=products_data_torch) losses = [correctness_loss, revenue_loss] recallAtK = RecallAtK(k=10) revenueAtK = RevenueAtK(k=10, revenue=products_data_np) validation_metrics = [recallAtK, revenueAtK] trainer = Trainer(data_handler, model, losses, validation_metrics, save_to_path) trainer.train() print(trainer.pareto_manager._pareto_front)
test_output_data_path = os.path.join(dir_path, 'movielens_small_test_test.npy') np.save(test_input_data_path, np.random.rand(2000, 8936).astype('float32')) np.save(test_output_data_path, np.random.rand(2000, 8936).astype('float32')) # Variables dataset = MamoDataset(np.load(test_input_data_path), np.load(test_output_data_path)) model = MultiVAE(params='yaml_files/params_multi_VAE.yaml') model.initialize_model() dataloader = DataLoader(dataset, batch_size=data_info['batch_size'], shuffle=True, drop_last=True) metrics = [RecallAtK(10)] objectives = [VAELoss()] obj_results = [0.4, 0.5, 0.7] alphas = [0.5, 0.2, 0.3] max_normalization = [1, 0.5, 2] # A Validator object cannot be created without a model. def test_validator_init_no_model(): with pytest.raises(TypeError, match='Argument: model must be set.'): Validator(None, dataloader, metrics, objectives) # A Validator object cannot be created without a dataloader. def test_validator_init_no_dataloader():