def test_override_email(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--email", "*****@*****.**"]) assert(mod.args["email"] == "*****@*****.**") with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--email", "invalid"])
def test_override_float(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--float", "1.23456789"]) assert(abs(mod.args["float"] - 1.23456789) < 1e-10) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--float", "invalid"])
def test_override_decimal(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--decimal", "1.23456789"]) assert(mod.args["decimal"] == Decimal("1.23456789")) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--decimal", "invalid"])
def test_override_numpyarray(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--numpyarray", "[[4,3],[2,1]]"]) assert(np.all(mod.args["numpyarray"] == np.array([[4, 3], [2, 1]]))) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--numpyarray", "invalid"])
def test_override_boolean(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--boolean", "False"]) assert(not mod.args["boolean"]) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--boolean", "invalid"])
def test_classifier_model_path(self, classifier_model, s3_classifier, input_data, tmp_path): ArgSchemaParser( input_data=input_data, # input_data has local classifier default schema_type=InferenceInputSchema, args=[]) s3_input = input_data.copy() s3_input["classifier_model_path"] = s3_classifier ArgSchemaParser(input_data=s3_input, schema_type=InferenceInputSchema, args=[]) fake_s3 = input_data.copy() fake_s3["classifier_model_path"] = "s3://my-bucket/fake-hello.txt" with pytest.raises(ValidationError) as e: ArgSchemaParser(input_data=fake_s3, schema_type=InferenceInputSchema, args=[]) assert "does not exist" in str(e.value) fake_local = input_data.copy() fake_local["classifier_model_path"] = str(tmp_path / "hello-again.txt") with pytest.raises(ValidationError) as e: ArgSchemaParser(input_data=fake_local, schema_type=InferenceInputSchema, args=[]) assert "does not exist" in str(e.value)
def test_override_timedelta(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--timedelta", "0"]) assert(mod.args["timedelta"] == datetime.timedelta(0)) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--timedelta", "invalid"])
def test_override_datetime(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--datetime", "1977-05-04T00:00:00"]) assert(mod.args["datetime"] == datetime.datetime(1977, 5, 4, 0, 0, 0)) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--datetime", "invalid"])
def test_override_url(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--url", "http://www.alleninstitute.org"]) assert(mod.args["url"] == "http://www.alleninstitute.org") with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--url", "invalid"])
def test_override_number(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--number", "10"]) assert(mod.args["number"] == 10) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--number", "invalid"])
def test_override_slice(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--slice", "800:3:9000"]) assert(mod.args["slice"] == slice(800, 3, 9000)) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--slice", "invalid"])
def test_override_log_level(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--log_level", "CRITICAL"]) assert(mod.args["log_level"] == "CRITICAL") with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--log_level", "invalid"])
def test_override_list(test_data): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--list", "[1000,3000]"]) assert(mod.args["list"] == [1000, 3000]) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--list", "invalid"])
def test_parser_validation(input_dict, raises): if raises: with pytest.raises(mm.ValidationError): ArgSchemaParser(input_data=input_dict, schema_type=MySchema, args=[]) else: ArgSchemaParser(input_data=input_dict, schema_type=MySchema, args=[])
def test_override_uuid(test_data): val = "1a66e457-4d0f-474a-bb4e-bee91e61e084" mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--uuid", val]) assert(mod.args["uuid"] == uuid.UUID(val)) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--uuid", "invalid"])
def test_override_list_deprecated(deprecated_data): with pytest.warns(FutureWarning): mod = ArgSchemaParser(deprecated_data, schema_type=MyDeprecatedSchema, args=["--list_deprecated", "1000", "3000"]) assert(mod.args["list_deprecated"] == [1000, 3000]) with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(deprecated_data, schema_type=MyDeprecatedSchema, args=["--list_deprecated", "[1000,3000]"])
def main(): """Main entry point for running sg-prototype.""" mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) output = {} # YOUR STUFF GOES HERE output.update({"input_parameters": mod.args}) if "output_json" in mod.args: mod.output(output) else: print(mod.get_output_json(output))
def test_bad_output(tmpdir): file_out = tmpdir.join('test_output_bad.json') input_parameters = {'output_json': str(file_out)} mod = ArgSchemaParser(input_data=input_parameters, output_schema_type=MyOutputSchema, args=[]) M = [[5, 5], [7, 2]] Mnp = np.array(M) output = {"a": "example", "b": "not a number", "M": Mnp} with pytest.raises(mm.ValidationError): mod.output(output)
def test_output_unvalidated(tmpdir): file_out = tmpdir.join('test_output_unvalidated.json') input_parameters = {'output_json': str(file_out)} mod = ArgSchemaParser(input_data=input_parameters, args=[]) output = { "a": "example", } mod.output(output) with open(str(file_out), 'r') as fp: actual_output = json.load(fp) assert actual_output == output
def test_override_inputfile(test_data, tmpdir_factory): input2 = tmpdir_factory.mktemp("input3").join("input2.file") with pytest.raises(mm.ValidationError): mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--inputfile", str(input2)]) with open(str(input2), "w") as f: f.write("") mod = ArgSchemaParser(test_data, schema_type=MySchema, args=["--inputfile", str(input2)]) assert(mod.args["inputfile"] == str(input2)) assert(os.path.exists(mod.args["inputfile"]) and os.path.isfile(mod.args["inputfile"]))
def test_invalid_model_registry_env(self, input_data): # This should run without error ArgSchemaParser(input_data=input_data, schema_type=InferenceInputSchema, args=[]) # Now to test when the model_registry_env field is invalid invalid_model_registry_env = input_data.copy() invalid_model_registry_env["model_registry_env"] = "prode" with pytest.raises(ValidationError) as e: ArgSchemaParser(input_data=invalid_model_registry_env, schema_type=InferenceInputSchema, args=[]) assert "not a valid value for the 'model_registry_env'" in str(e.value)
def main(): """Main entry point for running AllenSDK Eye Tracking.""" try: mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) istream = CvInputStream(mod.args["input_source"]) ostream = setup_annotation( istream.frame_shape, **mod.args.get("annotation", DEFAULT_ANNOTATION)) qc_params = mod.args.get("qc", {}) generate_plots = qc_params.get("generate_plots", EyeTracker.DEFAULT_GENERATE_QC_OUTPUT) tracker = EyeTracker(istream, ostream, mod.args.get("starburst", {}), mod.args.get("ransac", {}), mod.args["pupil_bounding_box"], mod.args["cr_bounding_box"], generate_plots, **mod.args.get("eye_params", {})) cr_params, pupil_params, cr_err, pupil_err = tracker.process_stream( start=mod.args.get("start_frame", 0), stop=mod.args.get("stop_frame", None), step=mod.args.get("frame_step", 1)) output = write_output(mod.args["output_dir"], cr_params, pupil_params, tracker.mean_frame) pupil_intensity = None if tracker.adaptive_pupil: pupil_intensity = tracker.pupil_colors if generate_plots: write_QC_output(tracker.annotator, cr_params, pupil_params, cr_err, pupil_err, tracker.mean_frame, pupil_intensity=pupil_intensity, **mod.args) output["input_parameters"] = mod.args if "output_json" in mod.args: mod.output(output, indent=1) else: print(json.dumps(mod.get_output_json(output), indent=1)) except marshmallow.ValidationError as e: print(e) argparser = schema_argparser(InputParameters()) argparser.print_usage()
def main(): from ._schemas import InputParameters, OutputParameters mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) output = calculate_quality_metrics(mod.args) output.update({"input_parameters": mod.args}) if "output_json" in mod.args: mod.output(output, indent=2) else: print(mod.get_output_json(output))
def main(): from ._schemas import InputParameters, OutputParameters mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) # output = calculate_stimulus_metrics_ondisk(mod.args) output = calculate_stimulus_metrics_gather(mod.args) if MPI_rank == 0: output.update({"input_parameters": mod.args}) if "output_json" in mod.args: mod.output(output, indent=2) else: log_info(mod.get_output_json(output)) barrier()
def main(): from ._schemas import InputParameters, OutputParameters mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) output = classify_noise_templates(mod.args) output.update({"input_parameters": mod.args}) if "output_json" in mod.args: mod.output(output, indent=2) else: print(mod.get_output_json(output))
def main(): from ._schemas import InputParameters, OutputParameters """Main entry point:""" mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) output = get_psth_events(mod.args) output.update({"input_parameters": mod.args}) if "output_json" in mod.args: mod.output(output, indent=2) else: print(mod.get_output_json(output))
def main(): from ._schemas import InputParameters, OutputParameters mod = ArgSchemaParser(schema_type=InputParameters, output_schema_type=OutputParameters) output = run_automerging(mod.args) output.update({"input_parameters": mod.args}) if "output_json" in mod.args: mod.output(output, indent=2) else: print(mod.get_output_json(output))
def test_output(tmpdir): file_out = tmpdir.join('test_output.json') input_parameters = {'output_json': str(file_out)} mod = ArgSchemaParser(input_data=input_parameters, output_schema_type=MyOutputSchema, args=[]) M = [[5, 5], [7, 2]] Mnp = np.array(M) output = {"a": "example", "M": Mnp} expected_output = {"a": "example", "b": 5, "M": M} mod.output(output) with open(str(file_out), 'r') as fp: actual_output = json.load(fp) assert actual_output == expected_output
def test_bad_path(): with pytest.raises(mm.ValidationError): example = { "input_json": "a bad path", "output_json": "another example", "log_level": "DEBUG"} ArgSchemaParser(input_data=example, args=[])
def test_bad_option(): input_data = { 'a': 4 } with pytest.raises(mm.ValidationError): mod = ArgSchemaParser( input_data=input_data, schema_type=OptionSchema, args=[])