def test_tool_command_line_precedence(): """ ensure command-line has higher priority than config file """ from ctapipe.core.tool import run_tool class SubComponent(Component): component_param = Float(10.0, help="some parameter").tag(config=True) class MyTool(Tool): description = "test" userparam = Float(5.0, help="parameter").tag(config=True) classes = List([ SubComponent, ]) aliases = Dict({"component_param": "SubComponent.component_param"}) def setup(self): self.sub = self.add_component(SubComponent(parent=self)) config = Config({ "MyTool": { "userparam": 12.0 }, "SubComponent": { "component_param": 15.0 } }) tool = MyTool(config=config) # sets component_param to 15.0 run_tool(tool, ["--component_param", "20.0"]) assert tool.sub.component_param == 20.0 assert tool.userparam == 12.0
def test_tool_logging_multiple_loggers(capsys): """No-ctapipe loggers can be configured via tool config files.""" logger = logging.getLogger("another_logger") config = Config({ "MyLogTool": { "log_config": { "loggers": { "another_logger": { "level": "DEBUG", "handlers": ["console"] }, "ctapipe.ctapipe-test": { "level": "ERROR" }, } } } }) tool = MyLogTool(config=config) run_tool(tool) logger.debug("another-debug") # split lines and skip last empty line log = capsys.readouterr().err.split("\n")[:-1] assert len(log) == 3 assert "test-error" in log[0] assert "another-debug" in log[2]
def test_tool_logging_quiet(capsys): tool = MyLogTool() # setting log-level should not matter when given -q run_tool(tool, ["-q", "--log-level", "DEBUG"]) log = capsys.readouterr().err assert len(log) == 0
def test_tool_logging_setlevel(capsys): tool = MyLogTool() run_tool(tool, ["--log-level", "ERROR"]) # split lines and skip last empty line log = capsys.readouterr().err.split("\n")[:-1] assert len(log) == 2 assert "test-error" in log[0] assert "test-critical" in log[1]
def test_tool_logging_defaults(capsys): tool = MyLogTool() assert tool.log_level == 30 assert tool.log_file is None run_tool(tool) # split lines and skip last empty line log = capsys.readouterr().err.split("\n")[:-1] assert len(log) == 3 assert "test-warn" in log[0]
def test_tool_html_rep(tmp_path): """ check that the HTML rep for Jupyter notebooks works""" class MyTool(Tool): description = "test" userparam = Float(5.0, help="parameter").tag(config=True) tool = MyTool() assert len(tool._repr_html_()) > 0 class MyComponent(Component): val = Float(1.0, help="val").tag(config=True) class MyTool2(Tool): """ A docstring description""" userparam = Float(5.0, help="parameter").tag(config=True) classes = [MyComponent] def setup(self): self.comp = MyComponent(parent=self) def start(self): pass tool2 = MyTool2() assert len(tool2._repr_html_()) > 0 # make sure html repr works also after tool was run assert run_tool(tool2, argv=[], cwd=tmp_path) == 0 assert len(tool2._repr_html_()) > 0
def test_invalid_traits(tmp_path, caplog): caplog.set_level(logging.INFO, logger="ctapipe") class MyTool(Tool): name = "test" description = "test" param = Float(5.0, help="parameter").tag(config=True) # 2 means trait error assert run_tool(MyTool(), ["--MyTool.foo=5"]) == 2 # test that it also works for config files config = tmp_path / "config.json" with config.open("w") as f: json.dump({"MyTool": {"foo": 5}}, f) assert run_tool(MyTool(), [f"--config={config}"]) == 2
def test_tool_logging_file(capsys): tool = MyLogTool() with tempfile.NamedTemporaryFile("w+") as f: run_tool(tool, ["--log-file", f.name]) log = str(f.read()) assert len(log) > 0 assert "test-debug" not in log assert "test-info" in log assert "test-warn" in log # split lines and skip last empty line log = capsys.readouterr().err.split("\n")[:-1] assert len(log) > 0 assert "test-warn" in log[0]
def test_tool_exit_code(): """ Check that we can get the full instance configuration """ class MyTool(Tool): description = "test" userparam = Float(5.0, help="parameter").tag(config=True) tool = MyTool() with pytest.raises(SystemExit) as exc: tool.run(["--non-existent-option"]) assert exc.value.code == 2 with pytest.raises(SystemExit) as exc: tool.run(["--MyTool.userparam=foo"]) assert exc.value.code == 1 assert run_tool(tool, ["--help"]) == 0 assert run_tool(tool, ["--non-existent-option"]) == 2
def test_provenance_log_help(tmpdir): """ check that the tool does not write a provenance log if only the help was run""" from ctapipe.core.tool import run_tool class MyTool(Tool): description = "test" userparam = Float(5.0, help="parameter").tag(config=True) tool = MyTool() tool.provenance_log = Path(tmpdir) / "test_prov_log_help.log" for o in ["-h", "--help", "--help-all"]: assert run_tool(tool, [o], cwd=tmpdir) == 0 assert not tool.provenance_log.exists()
def test_optics_from_dump_instrument(): # test with file written by dump-instrument svc_path_before = os.getenv("CTAPIPE_SVC_PATH") cwd = os.getcwd() with tempfile.TemporaryDirectory() as tmp_dir: os.chdir(tmp_dir) os.environ["CTAPIPE_SVC_PATH"] = tmp_dir infile = get_dataset_path("gamma_test_large.simtel.gz") run_tool(DumpInstrumentTool(), [f"--input={infile}", "--format=ecsv"]) lst = OpticsDescription.from_name("LST_LST_LSTCam", "MonteCarloArray.optics") assert lst.num_mirrors == 1 assert lst.equivalent_focal_length.to_value(u.m) == 28 assert lst.num_mirror_tiles == 198 os.chdir(cwd) if svc_path_before is None: del os.environ["CTAPIPE_SVC_PATH"] else: os.environ["CTAPIPE_SVC_PATH"] = svc_path_before
def test_no_ff_tagging(tmpdir): """Test the ctapipe stage1 tool can read in LST real data using the event source""" from ctapipe.tools.stage1 import Stage1Tool from ctapipe.core.tool import run_tool tmpdir = Path(tmpdir) config_path = tmpdir / 'config.json' config = { 'LSTEventSource': { "use_flatfield_heuristic": False, 'LSTR0Corrections': { 'drs4_pedestal_path': str(test_drs4_pedestal_path), 'drs4_time_calibration_path': str(test_time_calib_path), 'calibration_path': str(test_calib_path), }, 'PointingSource': { 'drive_report_path': str(test_drive_report) }, 'EventTimeCalculator': { 'run_summary_path': str(test_run_summary), }, }, "CameraCalibrator": { "image_extractor_type": "LocalPeakWindowSum", "LocalPeakWindowSum": { "window_shift": 4, "window_width": 8, "apply_integration_correction": False, } }, "TailcutsImageCleaner": { "picture_threshold_pe": 6, "boundary_threshold_pe": 3, "keep_isolated_pixels": False, "min_picture_neighbors": 1, } } with config_path.open('w') as f: json.dump(config, f) tool = Stage1Tool() output = tmpdir / "test_dl1.h5" ret = run_tool(tool, argv=[ f'--input={test_r0_path}', f'--output={output}', f'--config={config_path}', ]) assert ret == 0 # test our custom default works assert tool.event_source.r0_r1_calibrator.gain_selector.threshold == 3500 parameters = read_table(output, '/dl1/event/telescope/parameters/tel_001') assert len(parameters) == 200 trigger = read_table(output, '/dl1/event/subarray/trigger') # test regression of event time calculation first_event_time = Time(59101.95035244, format='mjd', scale='tai') assert np.all((trigger['time'] - first_event_time).to_value(u.s) < 10) event_type_counts = np.bincount(trigger['event_type']) # one pedestal and flat field expected each, rest should be physics data # without ff heuristic, the ff event will have type SUBARRAY assert event_type_counts.sum() == 200 assert event_type_counts[EventType.FLATFIELD.value] == 0 assert event_type_counts[EventType.SKY_PEDESTAL.value] == 1 assert event_type_counts[EventType.SUBARRAY.value] == 199