def test_mismatch_input_numbers(self): with pytest.raises(valid.ValidationError): report.compare( { 0: self.X, 1: self.X }, { 0: self.y, 1: self.y }, {1: self.prtc_attr}, self.model_dict, flag_oor=False, )
def test_missing_keys(self): with pytest.raises(valid.ValidationError): report.compare( { 0: self.X, 1: self.X }, { 0: self.y, 1: self.y }, None, self.model_dict, flag_oor=False, )
def test_single_dataInputs(self): result = report.compare(self.X, self.y, self.prtc_attr, self.model_dict, flag_oor=False) self.is_result_valid(result)
def test_preds_not_models(self): result = report.compare( [self.X, self.X], self.y, self.prtc_attr, predictions=[self.y, self.y], flag_oor=False, ) self.is_result_valid(result)
def test_with_protected_attributes(self): result = report.compare( [self.X, self.X], [self.y, self.y], [self.prtc_attr, self.prtc_attr], [self.model_dict[0], self.model_dict[1]], flag_oor=False, ) self.is_result_valid(result)
def test_mixed_groupings(self): result = report.compare( [self.X, self.X], self.y, self.prtc_attr, [self.model_dict[0], self.model_dict[1]], flag_oor=False, ) self.is_result_valid(result)
def test_differing_keys(self): with pytest.raises(valid.ValidationError): report.compare( { 5: self.X, 6: self.X }, { 0: self.y, 1: self.y }, { 0: self.prtc_attr, 1: self.prtc_attr }, self.model_dict, flag_oor=False, )
def test_invalid_model_member(self): with pytest.raises(valid.ValidationError): report.compare( { 0: self.X, 1: self.X }, { 0: self.y, 1: self.y }, { 0: self.prtc_attr, 1: None }, self.model_dict, flag_oor=False, )
def test_preds_and_probs(self): result = report.compare( [self.X, self.X], self.y, self.prtc_attr, predictions=[self.y, self.y], probabilities=[self.y, self.y], flag_oor=False, ) self.is_result_valid(result)
def test_outputType_withFlag_invalid(self): with pytest.raises(ValueError): _ = report.compare( [self.X, self.X], self.y, self.prtc_attr, predictions=[self.y, self.y], probabilities=[self.y, self.y], flag_oor=True, output_type="dataframe", )
def test_embeddedHTML_withFlag_valid(self): result = report.compare( [self.X, self.X], self.y, self.prtc_attr, predictions=[self.y, self.y], probabilities=[self.y, self.y], flag_oor=True, output_type="html", ) assert isinstance(result, str)
def test_compare_flags(self): result = report.compare( self.df, self.df["classification"], self.df["prtc_attr"], predictions=[ self.df["avg_classification"], self.df["avg_classification"] ], pred_type="classification", flag_oor=True, )
def test_missing_models(self): with pytest.raises(valid.ValidationError): report.compare( { 0: self.X, 1: self.X }, { 0: self.y, 1: self.y }, { 0: self.prtc_attr, 1: self.prtc_attr }, { 0: None, 1: None }, flag_oor=False, )
def test_model_list(self): result = report.compare(self.X, self.y, self.prtc_attr, [self.model_dict[0]], flag_oor=False) self.is_result_valid(result)
def test_multiple_calls(self): args = (self.X, self.y, self.prtc_attr, self.model_dict[0]) _ = report.compare(*args, flag_oor=False) result = report.compare(*args, flag_oor=False) self.is_result_valid(result)