def test_expected_columns_ok(): validator = ExpectedColumnNamesValidator(expected=( 'foo', 'bar', )) df = DataFrame(columns=['foo', 'bar']) validator.validate(df=df)
def test_expected_columns_overdefined(): validator = ExpectedColumnNamesValidator(expected=( 'foo', 'bar', )) df = DataFrame(columns=['foo', 'bar', 'baz']) with pytest.raises(ValidationError): validator.validate(df=df)
def __init__(self): super().__init__( file_loader=CSVLoader(), validators=( ExpectedColumnNamesValidator( expected=('image_id', 'x', 'y', 'score') ), ), join_key='image_id', ground_truth_path=( Path(__file__).parent / 'resources' / 'detection' / 'reference' ), predictions_path=( Path(__file__).parent / 'resources' / 'detection' / 'submission' ), output_file=Path('/tmp/metrics.json'), ) self.detection_threshold = 0.5 self.detection_radius = 1.0
def __init__(self): super().__init__( file_loader=CSVLoader(), ground_truth_path=(Path(__file__).parent / 'resources' / 'classification' / 'reference'), predictions_path=(Path(__file__).parent / 'resources' / 'classification' / 'submission'), output_file=Path('/tmp/metrics.json'), join_key='case', validators=(ExpectedColumnNamesValidator(expected=( 'case', 'class', )), ), )
def __init__(self): super().__init__( file_loader=CSVLoader(), validators=(ExpectedColumnNamesValidator(expected=( "image_id_roi_id", "pixel_size", "x", "y", "score", )), ), join_key="image_id_roi_id", ) self.detection_threshold = 0.0 self.detection_radius = 4 / 0.24309392273426056
def __init__(self, outdir): super().__init__( file_loader=CSVLoader(), validators=(ExpectedColumnNamesValidator(expected=("image_id", "x", "y", "score")), ), join_key="image_id", ground_truth_path=(Path(__file__).parent / "resources" / "detection" / "reference"), predictions_path=(Path(__file__).parent / "resources" / "detection" / "submission"), output_file=Path(outdir) / "metrics.json", detection_threshold=0.5, detection_radius=1.0, )
def __init__(self, outdir): super().__init__( file_loader=CSVLoader(), ground_truth_path=( Path(__file__).parent / "resources" / "classification" / "reference" ), predictions_path=( Path(__file__).parent / "resources" / "classification" / "submission" ), output_file=Path(outdir) / "metrics.json", join_key="case", validators=( ExpectedColumnNamesValidator(expected=("case", "class")), ), )
def test_expected_columns_ok(): validator = ExpectedColumnNamesValidator(expected=("foo", "bar")) df = DataFrame(columns=["foo", "bar"]) validator.validate(df=df)
def test_expected_columns_overdefined(): validator = ExpectedColumnNamesValidator(expected=("foo", "bar")) df = DataFrame(columns=["foo", "bar", "baz"]) with pytest.raises(ValidationError): validator.validate(df=df)
def test_expected_columns_creation(): with pytest.raises(ValueError): # noinspection PyTypeChecker ExpectedColumnNamesValidator(expected=())