Exemple #1
0
    def test_get_image(self):
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        self.assertIsNotNone(image)

        raw_image = simulated_camera.get_image(raw=True)
        self.assertIsNotNone(raw_image)
    def test_background_roi(self):
        pipeline_config = PipelineConfig("test_pipeline")

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        parameters = pipeline_config.get_configuration()
        camera_name = simulated_camera.get_name()

        parameters["roi_background"] = [0, 200, 0, 200]

        result = process_image(image=image,
                               pulse_id=0,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        required_fields_in_result = [
            camera_name + ".processing_parameters",
            camera_name + '.roi_background_x_profile'
        ]

        self.assertSetEqual(set(required_fields_in_result), set(result.keys()),
                            "Not all required keys are present in the result")
Exemple #3
0
    def test_noop_pipeline(self):
        pipeline_config = PipelineConfig("test_pipeline")

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        parameters = pipeline_config.get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        required_fields_in_result = ['x_center_of_mass', 'x_axis', 'y_axis', 'x_profile', 'y_fit_standard_deviation',
                                     'y_rms', 'timestamp', 'y_profile', 'image', 'max_value', 'x_fit_offset',
                                     'x_fit_gauss_function', 'y_center_of_mass', 'min_value', 'y_fit_mean',
                                     'x_fit_mean', 'x_rms', 'y_fit_amplitude', 'x_fit_amplitude',
                                     'y_fit_gauss_function', 'x_fit_standard_deviation', 'y_fit_offset',
                                     "processing_parameters", "intensity"]

        self.assertSetEqual(set(required_fields_in_result), set(result.keys()),
                            "Not all required keys are present in the result")

        self.assertTrue(numpy.array_equal(result["image"], image),
                        "The input and output image are not the same, but the pipeline should not change it.")

        self.assertDictEqual(parameters, json.loads(result["processing_parameters"]),
                             "The passed and the received processing parameters are not the same.")
    def test_both_rois(self):
        pipeline_config = PipelineConfig("test_pipeline")

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        parameters = pipeline_config.get_configuration()
        camera_name = simulated_camera.get_name()

        parameters["roi_signal"] = [0, 200, 0, 200]
        parameters["roi_background"] = [0, 200, 0, 200]

        for i in range(10):
            result = process_image(image=image,
                                   pulse_id=i,
                                   timestamp=time.time(),
                                   x_axis=x_axis,
                                   y_axis=y_axis,
                                   parameters=parameters)

        required_fields_in_result = [
            camera_name + ".processing_parameters",
            camera_name + '.roi_signal_x_profile',
            # camera_name + '.edge_position',
            # camera_name + '.cross_correlation_amplitude',
            camera_name + '.roi_background_x_profile'
        ]

        self.assertSetEqual(set(required_fields_in_result), set(result.keys()),
                            "Not all required keys are present in the result")
Exemple #5
0
    def test_wrong_background_size(self):
        pipeline_parameters = {
            "camera_name": "simulation",
            "image_background": "white_background"
        }

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        background_provider = MockBackgroundManager()

        # Invalid background size.
        background_provider.save_background("white_background", numpy.zeros(shape=(100, 100)),
                                            append_timestamp=False)

        parameters = PipelineConfig("test_pipeline", pipeline_parameters).get_configuration()
        image_background_array = background_provider.get_background("white_background")

        with self.assertRaisesRegex(RuntimeError, "Invalid background_image size "):
            process_image(image=image,
                          timestamp=time.time(),
                          x_axis=x_axis,
                          y_axis=y_axis,
                          parameters=parameters,
                          image_background_array=image_background_array)
    def test_single_function(self):
        # Profile only if LineProfiler present.
        # To install: conda install line_profiler
        try:
            from line_profiler import LineProfiler
        except ImportError:
            return

        function_to_perf = functions.subtract_background
        n_iterations = 200
        n_tests = 5

        simulated_camera = CameraSimulation(CameraConfig("simulation"), size_x=2048, size_y=2048)

        for _ in range(n_tests):

            profile = LineProfiler()
            wrapped_function = profile(function_to_perf)

            images = []
            backgrounds = []

            for _ in range(n_iterations):
                images.append(simulated_camera.get_image())
                backgrounds.append(simulated_camera.get_image())

            for index in range(n_iterations):
                wrapped_function(images[index], backgrounds[index])

            profile.print_stats()
    def test_process_image_performance(self):
        # Profile only if LineProfiler present.
        # To install: conda install line_profiler
        try:
            from line_profiler import LineProfiler
        except ImportError:
            return

        simulated_camera = CameraSimulation(CameraConfig("simulation"), size_x=2048, size_y=2048)
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        x_size, y_size = simulated_camera.get_geometry()
        image_background_array = numpy.zeros(shape=(y_size, x_size), dtype="uint16") + 3

        parameters = {

            "image_threshold": 1,
            "image_region_of_interest": [0, 2048, 0, 2048],

            "image_good_region": {
                "threshold": 0.3,
                "gfscale": 1.8
            },

            "image_slices": {
                "number_of_slices": 5,
                "scale": 1.0,
                "orientation": "horizontal"
            }
        }

        profile = LineProfiler(process_image)
        process_image_wrapper = profile(process_image)

        n_iterations = 300

        print("Generating images.")

        images = []
        for _ in range(n_iterations):
            images.append(simulated_camera.get_image())

        print("Processing images.")

        start_time = time.time()
        for image in images:
            process_image_wrapper(image=image,
                                  timestamp=time.time(),
                                  x_axis=x_axis,
                                  y_axis=y_axis,
                                  parameters=parameters,
                                  image_background_array=image_background_array)
        end_time = time.time()

        time_difference = end_time - start_time
        rate = n_iterations / time_difference

        print("Processing rate: ", rate)

        profile.print_stats()
Exemple #8
0
    def test_image_background(self):
        pipeline_parameters = {
            "camera_name": "simulation",
            "image_background": "white_background"
        }

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        background_provider = MockBackgroundManager()
        x_size, y_size = simulated_camera.get_geometry()
        background_provider.save_background("white_background", numpy.zeros(shape=(y_size, x_size)),
                                            append_timestamp=False)

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()
        image_background_array = background_provider.get_background(parameters.get("image_background"))

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters,
                               image_background_array=image_background_array)

        self.assertTrue(numpy.array_equal(result["image"], image),
                        "A zero background should not change the image.")

        max_value_in_image = result["max_value"]

        pipeline_parameters = {
            "camera_name": "simulation",
            "image_background": "max_background",
            "image_threshold": 0
        }

        max_background = numpy.zeros(shape=(y_size, x_size), dtype="uint16")
        max_background.fill(max_value_in_image)
        background_provider.save_background("max_background", max_background, append_timestamp=False)

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()
        image_background_array = background_provider.get_background(parameters.get("image_background"))

        expected_image = numpy.zeros(shape=(y_size, x_size))

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters,
                               image_background_array=image_background_array)

        self.assertTrue(numpy.array_equal(result["image"], expected_image),
                        "The image should be all zeros - negative numbers are not allowed.")
Exemple #9
0
        def run_the_pipeline(configuration, simulated_image=None):
            parameters = PipelineConfig("test_pipeline", configuration).get_configuration()

            simulated_camera = CameraSimulation(CameraConfig("simulation"))

            if simulated_image is None:
                simulated_image = simulated_camera.get_image()

            x_axis, y_axis = simulated_camera.get_x_y_axis()

            return process_image(image=simulated_image, timestamp=time.time(), x_axis=x_axis, y_axis=y_axis,
                                 parameters=parameters)
    def test_bsread_transceiver(self):
        manager = multiprocessing.Manager()
        stop_event = multiprocessing.Event()
        statistics = manager.Namespace()
        parameter_queue = multiprocessing.Queue()

        expected_width = 659
        expected_height = 494
        expected_shape = [expected_width, expected_height]

        mock_camera = MockCameraBsread(CameraConfig("SLG-LCAM-C102"),
                                       expected_width, expected_height,
                                       "tcp://0.0.0.0:9999")

        def transceiver():
            process_bsread_camera(stop_event, statistics, parameter_queue,
                                  mock_camera, 12000)

        thread1 = Thread(target=transceiver)
        thread1.start()

        test_base_dir = os.path.split(os.path.abspath(__file__))[0]
        thread2 = Thread(target=replay_dump,
                         args=("tcp://0.0.0.0:9999",
                               os.path.join(test_base_dir,
                                            "test_camera_dump")))
        thread2.start()

        with source(host="0.0.0.0", port=12000, mode=SUB) as stream:
            data1 = stream.receive()
            data2 = stream.receive()

        self.assertIsNotNone(data1)
        self.assertIsNotNone(data2)

        stop_event.set()
        thread1.join()
        thread2.join()

        self.assertListEqual(list(data1.data.data["image"].value.shape),
                             expected_shape[::-1])
        self.assertListEqual(list(data2.data.data["image"].value.shape),
                             expected_shape[::-1])

        self.assertEqual(data1.data.data["width"].value,
                         data2.data.data["width"].value)
        self.assertEqual(data1.data.data["height"].value,
                         data2.data.data["height"].value)
        self.assertEqual(data1.data.data["width"].value, expected_width)
        self.assertEqual(data1.data.data["height"].value, expected_height)

        self.assertEqual(data1.data.pulse_id, data2.data.pulse_id - 1)
    def test_camera_calibration(self):
        camera = CameraSimulation(CameraConfig("simulation"))
        size_x, size_y = camera.get_geometry()

        image = camera.get_image()

        self.assertEqual(image.shape[0], size_y)
        self.assertEqual(image.shape[1], size_x)

        x_axis, y_axis = camera.get_x_y_axis()

        self.assertEqual(x_axis.shape[0], size_x)
        self.assertEqual(y_axis.shape[0], size_y)
    def test_custom_hostname(self):
        config_manager = CameraConfigManager(
            config_provider=MockConfigStorage())
        camera_instance_manager = CameraInstanceManager(
            config_manager, None, hostname="custom_cam_hostname")
        config_manager.save_camera_config(
            "simulation",
            CameraConfig("simulation").get_configuration())

        stream_address = camera_instance_manager.get_instance_stream(
            "simulation")
        self.assertTrue(stream_address.startswith("tcp://custom_cam_hostname"))

        camera_instance_manager.stop_all_instances()
Exemple #13
0
    def test_sum_images(self):
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        accumulated_image = None

        n_images = 1000

        for _ in range(n_images):
            accumulated_image = sum_images(image=image, accumulator_image=accumulated_image)

        processed_image = accumulated_image / n_images
        processed_image = processed_image.astype(dtype="uint16")

        numpy.testing.assert_array_equal(image, processed_image)
    def set_camera_config(camera_name):
        """
        Set the camera settings.
        :param camera_name: Name of the camera to change the config for.
        :return: New config.
        """

        new_config = CameraConfig(camera_name, request.json).get_configuration()

        instance_manager.config_manager.save_camera_config(camera_name, new_config)
        instance_manager.set_camera_instance_config(camera_name, new_config)

        return {"state": "ok",
                "status": "Camera %s configuration saved." % camera_name,
                "config": new_config}
    def test_camera_frame_rate(self):
        camera = CameraSimulation(CameraConfig("simulation"))

        self.assertEqual(camera.frame_rate, 10)

        new_frame_rate = 1
        camera_config = CameraConfig("simulation")
        configuration = camera_config.get_configuration()
        configuration["frame_rate"] = new_frame_rate
        camera_config.set_configuration(configuration)

        camera = CameraSimulation(camera_config)

        self.assertEqual(camera.frame_rate, new_frame_rate)
    def test_camera_simulation(self):
        camera = CameraSimulation(CameraConfig("simulation"))

        n_images_to_receive = 5

        def callback_method(image, timestamp):
            self.assertIsNotNone(image, "Image should not be None")
            self.assertIsNotNone(timestamp, "Timestamp should not be None")

            nonlocal n_images_to_receive
            if n_images_to_receive <= 0:
                camera.clear_callbacks()
                camera.simulation_stop_event.set()

            n_images_to_receive -= 1

        camera.connect()
        camera.add_callback(callback_method)

        camera.simulation_stop_event.wait()
    def test_camera_simulation_interval(self):
        camera = CameraSimulation(CameraConfig("simulation"))

        self.assertEqual(camera.simulation_interval,
                         config.DEFAULT_CAMERA_SIMULATION_INTERVAL)

        new_simulation_interval = 1
        camera_config = CameraConfig("simulation")
        configuration = camera_config.get_configuration()
        configuration["simulation_interval"] = new_simulation_interval
        camera_config.set_configuration(configuration)

        camera = CameraSimulation(camera_config)

        self.assertEqual(camera.simulation_interval, new_simulation_interval)
Exemple #18
0
    def test_intensity(self):
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation"
        }).get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        x_sum = result["x_profile"].sum()
        y_sum = result["y_profile"].sum()

        # The sums of X and Y profile should always give us the same result as the intensity.
        self.assertAlmostEqual(x_sum, result["intensity"], delta=10000)
        self.assertAlmostEqual(y_sum, result["intensity"], delta=10000)
Exemple #19
0
    def test_image_threshold(self):
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        x_size, y_size = simulated_camera.get_geometry()

        pipeline_parameters = {
            "camera_name": "simulation",
            "image_threshold": 9999999
        }

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        expected_image = numpy.zeros(shape=(y_size, x_size))
        self.assertTrue(numpy.array_equal(result["image"], expected_image),
                        "An image of zeros should have been produced.")

        pipeline_parameters = {
            "camera_name": "simulation",
            "image_threshold": 0
        }

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertTrue(numpy.array_equal(result["image"], image),
                        "The image should be the same as the original image.")
    def test_default_config(self):
        configuration = {"source": "simulation"}

        configuration = CameraConfig("simulation", configuration)
        complete_config = configuration.get_configuration()

        self.assertIsNone(complete_config["camera_calibration"])
        self.assertFalse(complete_config["mirror_x"])
        self.assertFalse(complete_config["mirror_y"])
        self.assertEqual(complete_config["rotate"], 0)
        self.assertEqual(complete_config["source_type"], "epics")

        configuration = {"source": "simulation", "camera_calibration": {}}

        configuration = CameraConfig("simulation", configuration)
        complete_config = configuration.get_configuration()

        self.assertSetEqual(
            set(complete_config["camera_calibration"].keys()),
            set(CameraConfig.DEFAULT_CAMERA_CALIBRATION.keys()),
            "Missing keys in camera calibration.")
def get_simulated_camera(path="camera_config/", name="simulation"):
    return CameraSimulation(CameraConfig(name, get_config(path, name)))
    def test_invalid_source_type(self):
        configuration = {"source": "simulation", "source_type": "invalid"}

        with self.assertRaisesRegex(ValueError, "Invalid source_type "):
            configuration = CameraConfig("simulation", configuration)
    def test_camera_settings_change(self):
        stream_address = self.instance_manager.get_instance_stream(
            "simulation")
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        sim_x, sim_y = simulated_camera.get_geometry()

        camera_host, camera_port = get_host_port_from_stream_address(
            stream_address)

        # Collect from the pipeline.
        with source(host=camera_host, port=camera_port, mode=SUB) as stream:
            data = stream.receive()

            x_size = data.data.data["width"].value
            y_size = data.data.data["height"].value

            self.assertEqual(x_size, sim_x)
            self.assertEqual(y_size, sim_y)

            x_axis_1 = data.data.data["x_axis"].value
            y_axis_1 = data.data.data["y_axis"].value

            self.assertEqual(x_axis_1.shape[0], sim_x)
            self.assertEqual(y_axis_1.shape[0], sim_y)

        new_config = update_camera_config(
            self.instance_manager.get_instance(
                "simulation").get_configuration(), {"rotate": 1})
        new_config = CameraConfig("simulation", new_config).get_configuration()
        self.instance_manager.set_camera_instance_config(
            "simulation", new_config)

        sleep(0.5)

        # Collect from the pipeline.
        with source(host=camera_host, port=camera_port, mode=SUB) as stream:
            data = stream.receive()

            x_size = data.data.data["width"].value
            y_size = data.data.data["height"].value

            # We rotate the image for 90 degrees - X and Y size should be inverted.
            self.assertEqual(x_size, sim_y)
            self.assertEqual(y_size, sim_x)

            x_axis_2 = data.data.data["x_axis"].value
            y_axis_2 = data.data.data["y_axis"].value

            # We rotate the image for 90 degrees - X and Y size should be inverted.
            self.assertEqual(x_axis_2.shape[0], sim_y)
            self.assertEqual(y_axis_2.shape[0], sim_x)

        # The axis should just be switched.
        self.assertTrue(numpy.array_equal(x_axis_1, y_axis_2))
        self.assertTrue(numpy.array_equal(y_axis_1, x_axis_2))

        new_config = update_camera_config(
            self.instance_manager.get_instance(
                "simulation").get_configuration(), {"camera_calibration": {}})
        new_config = CameraConfig("simulation", new_config).get_configuration()
        self.instance_manager.set_camera_instance_config(
            "simulation", new_config)

        with source(host=camera_host, port=camera_port, mode=SUB) as stream:
            data = stream.receive()

            x_size = data.data.data["width"].value
            y_size = data.data.data["height"].value

            # We rotate the image for 90 degrees - X and Y size should be inverted.
            self.assertEqual(x_size, sim_y)
            self.assertEqual(y_size, sim_x)

            x_axis_3 = data.data.data["x_axis"].value
            y_axis_3 = data.data.data["y_axis"].value

            # We rotate the image for 90 degrees - X and Y size should be inverted.
            self.assertEqual(x_axis_3.shape[0], sim_y)
            self.assertEqual(y_axis_3.shape[0], sim_x)

        self.instance_manager.stop_all_instances()
    def setUp(self):
        self.instance_manager = get_test_instance_manager()

        self.instance_manager.config_manager.save_camera_config(
            self.simulation_camera,
            CameraConfig("simulation").get_configuration())
Exemple #25
0
    def test_slices(self):

        def run_the_pipeline(configuration, simulated_image=None):
            parameters = PipelineConfig("test_pipeline", configuration).get_configuration()

            simulated_camera = CameraSimulation(CameraConfig("simulation"))

            if simulated_image is None:
                simulated_image = simulated_camera.get_image()

            x_axis, y_axis = simulated_camera.get_x_y_axis()

            return process_image(image=simulated_image, timestamp=time.time(), x_axis=x_axis, y_axis=y_axis,
                                 parameters=parameters)

        pipeline_configuration = {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 1
            },
            "image_slices": {
                "number_of_slices": 9
            }
        }

        result = run_the_pipeline(pipeline_configuration)

        self.assertEqual(result["slice_amount"], 9)
        self.assertEqual(result["slice_orientation"], "vertical", "Default slice orientation should be vertical.")
        self.assertTrue("slice_length" in result)

        pipeline_configuration = {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 1
            },
            "image_slices": {
                "orientation": "horizontal"
            }
        }

        result = run_the_pipeline(pipeline_configuration)

        self.assertEqual(result["slice_orientation"], "horizontal")
        self.assertTrue("slice_length" in result)

        with self.assertRaisesRegex(ValueError, "Invalid slice orientation 'invalid'."):
            pipeline_configuration = {
                "camera_name": "simulation",
                "image_good_region": {
                    "threshold": 1
                },
                "image_slices": {
                    "orientation": "invalid"
                }
            }

            run_the_pipeline(pipeline_configuration)

        image = CameraSimulation(CameraConfig("simulation")).get_image()

        pipeline_configuration = {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 0.1
            },
            "image_slices": {
                "orientation": "vertical",
                "number_of_slices": 3
            }
        }

        result_1 = run_the_pipeline(pipeline_configuration, image)
        result_2 = run_the_pipeline(pipeline_configuration, image)

        # 2 calculations with the same data should give the same result.
        self.assertEqual(result_1["slice_0_center_x"], result_2["slice_0_center_x"])
        self.assertEqual(result_1["slice_0_center_y"], result_2["slice_0_center_y"])

        pipeline_configuration = {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 0.1
            },
            "image_slices": {
                "orientation": "horizontal",
                "number_of_slices": 3
            }
        }

        result_3 = run_the_pipeline(pipeline_configuration, image)

        # If we orientate the slices horizontally, the slice center has to change.
        self.assertNotEqual(result_1["slice_0_center_x"], result_3["slice_0_center_x"])
        self.assertNotEqual(result_1["slice_0_center_y"], result_3["slice_0_center_y"])
Exemple #26
0
    def test_region_of_interest_default_values(self):

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation"
        }).get_configuration()

        good_region_keys = set(["good_region", "gr_x_axis", "gr_y_axis", "gr_x_fit_gauss_function", "gr_x_fit_offset",
                                "gr_x_fit_amplitude", "gr_x_fit_standard_deviation", "gr_x_fit_mean",
                                "gr_y_fit_gauss_function", "gr_y_fit_offset", "gr_y_fit_amplitude",
                                "gr_y_fit_standard_deviation", "gr_y_fit_mean", "gr_intensity",
                                "gr_x_profile", "gr_y_profile"])

        slices_key_formats = set(["slice_%s_center_x", "slice_%s_center_y", "slice_%s_standard_deviation",
                                  "slice_%s_intensity"])

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertFalse(any((x in result for x in good_region_keys)), 'There should not be good region keys.')

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 99999
            }
        }).get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertTrue(all((x in result for x in good_region_keys)), 'There should be good region keys.')
        self.assertTrue(all((result[x] is None for x in good_region_keys)), 'All values should be None.')

        number_of_slices = 7

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 99999
            },
            "image_slices": {
                "number_of_slices": number_of_slices
            }
        }).get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertTrue(all((x in result for x in good_region_keys)), 'There should be good region keys.')
        self.assertTrue(all((x in result for x in (x % counter
                                                   for x in slices_key_formats
                                                   for counter in range(number_of_slices)))))
from cam_server.pipeline.data_processing.processor import process_image

# Size of simulated image.
image_size_x = 1280
image_size_y = 960

# Select compression options. Only this 2 are available in Python - cam_server uses "bitshuffle_lz4".
compression = "bitshuffle_lz4"
# compression = "none"

# Stream configuration. cam_server uses PUB for the output stream.
output_stream_port = 9999
output_stream_mode = PUB
# output_stream_mode = PUSH

simulated_camera = CameraSimulation(camera_config=CameraConfig("simulation"),
                                    size_x=image_size_x, size_y=image_size_y)
x_axis, y_axis = simulated_camera.get_x_y_axis()
x_size, y_size = simulated_camera.get_geometry()

# Documentation: https://github.com/datastreaming/cam_server#pipeline_configuration
pipeline_parameters = {
    "camera_name": "simulation"
}

pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
parameters = pipeline_config.get_configuration()

image_number = 0

with sender(port=output_stream_port, mode=output_stream_mode) as output_stream:
    def test_client(self):
        server_info = self.client.get_server_info()

        self.assertIsNot(server_info["active_instances"],
                         "There should be no running instances.")

        expected_cameras = set([
            "camera_example_1", "camera_example_2", "camera_example_3",
            "camera_example_4", "simulation"
        ])

        self.assertSetEqual(set(self.client.get_cameras()), expected_cameras,
                            "Not getting all expected cameras")

        camera_stream_address = self.client.get_camera_stream("simulation")

        self.assertTrue(bool(camera_stream_address),
                        "Camera stream address cannot be empty.")

        self.assertTrue(
            "simulation" in self.client.get_server_info()["active_instances"],
            "Simulation camera not present in server info.")

        # Check if we can connect to the stream and receive data (in less than 2 seconds).
        host, port = get_host_port_from_stream_address(camera_stream_address)
        with source(host=host, port=port, receive_timeout=2000,
                    mode=SUB) as stream:
            data = stream.receive()
            self.assertIsNotNone(data, "Received data was none.")

            required_fields = set(
                ["image", "timestamp", "width", "height", "x_axis", "y_axis"])
            self.assertSetEqual(required_fields, set(data.data.data.keys()),
                                "Required fields missing.")

            image = data.data.data["image"].value
            x_size, y_size = CameraSimulation(
                CameraConfig("simulation")).get_geometry()
            self.assertListEqual(
                list(image.shape), [y_size, x_size],
                "Original and received image are not the same.")

            self.assertEqual(data.data.data["width"].value, x_size,
                             "Width not correct.")
            self.assertEqual(data.data.data["height"].value, y_size,
                             "Height not correct.")

        # Stop the simulation instance.
        self.client.stop_camera("simulation")

        self.assertTrue(
            "simulation"
            not in self.client.get_server_info()["active_instances"],
            "Camera simulation did not stop.")

        self.client.get_camera_stream("simulation")

        self.assertTrue(
            "simulation" in self.client.get_server_info()["active_instances"],
            "Camera simulation did not start.")

        self.client.stop_all_cameras()

        self.assertTrue(
            "simulation"
            not in self.client.get_server_info()["active_instances"],
            "Camera simulation did not stop.")

        example_1_config = self.client.get_camera_config("camera_example_1")

        self.assertTrue(bool(example_1_config), "Cannot retrieve config.")

        # Change the name to reflect tha camera.
        example_1_config["name"] = "testing_camera"

        self.client.set_camera_config("testing_camera", example_1_config)

        testing_camera_config = self.client.get_camera_config("testing_camera")

        self.assertDictEqual(example_1_config, testing_camera_config,
                             "Saved and loaded configs are not the same.")

        geometry = self.client.get_camera_geometry("simulation")
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        size_x, size_y = simulated_camera.get_geometry()
        self.assertListEqual(
            geometry, [size_x, size_y],
            'The geometry of the simulated camera is not correct.')

        self.assertTrue("testing_camera" in self.client.get_cameras(),
                        "Testing camera should be present.")

        self.client.delete_camera_config("testing_camera")

        self.assertTrue("testing_camera" not in self.client.get_cameras(),
                        "Testing camera should not be present.")

        # Test if it fails quickly enough.
        with self.assertRaisesRegex(
                ValueError,
                "Camera with prefix EPICS_example_1 not online - Status None"):
            self.client.get_camera_stream("camera_example_1")

        self.assertTrue(self.client.is_camera_online("simulation"),
                        "Simulation should be always online")

        self.assertFalse(self.client.is_camera_online("camera_example_1"),
                         "Epics not working in this tests.")

        self.client.set_camera_config(
            "simulation_temp", self.client.get_camera_config("simulation"))

        stream_address = self.client.get_camera_stream("simulation_temp")
        camera_host, camera_port = get_host_port_from_stream_address(
            stream_address)
        sim_x, sim_y = CameraSimulation(
            CameraConfig("simulation")).get_geometry()

        instance_info = self.client.get_server_info(
        )["active_instances"]["simulation_temp"]
        self.assertTrue("last_start_time" in instance_info)
        self.assertTrue("statistics" in instance_info)

        # Collect from the pipeline.
        with source(host=camera_host, port=camera_port, mode=SUB) as stream:
            data = stream.receive()

            x_size = data.data.data["width"].value
            y_size = data.data.data["height"].value

            self.assertEqual(x_size, sim_x)
            self.assertEqual(y_size, sim_y)

            x_axis_1 = data.data.data["x_axis"].value
            y_axis_1 = data.data.data["y_axis"].value

            self.assertEqual(x_axis_1.shape[0], sim_x)
            self.assertEqual(y_axis_1.shape[0], sim_y)

        camera_config = self.client.get_camera_config("simulation_temp")
        camera_config["rotate"] = 1
        self.client.set_camera_config("simulation_temp", camera_config)
        sleep(0.5)

        # Collect from the pipeline.
        with source(host=camera_host, port=camera_port, mode=SUB) as stream:
            data = stream.receive()

            x_size = data.data.data["width"].value
            y_size = data.data.data["height"].value

            # We rotate the image for 90 degrees - X and Y size should be inverted.
            self.assertEqual(x_size, sim_y)
            self.assertEqual(y_size, sim_x)

            x_axis_2 = data.data.data["x_axis"].value
            y_axis_2 = data.data.data["y_axis"].value

            # We rotate the image for 90 degrees - X and Y size should be inverted.
            self.assertEqual(x_axis_2.shape[0], sim_y)
            self.assertEqual(y_axis_2.shape[0], sim_x)

        self.client.delete_camera_config("simulation_temp")

        image = self.client.get_camera_image("simulation")
        self.assertGreater(len(image.content), 0)

        image = self.client.get_camera_image_bytes("simulation")
        dtype = image["dtype"]
        shape = image["shape"]
        bytes = base64.b64decode(image["bytes"].encode())

        x_size, y_size = CameraSimulation(
            CameraConfig("simulation")).get_geometry()
        self.assertEqual(shape, [y_size, x_size])

        image_array = numpy.frombuffer(bytes, dtype=dtype).reshape(shape)
        self.assertIsNotNone(image_array)

        self.client.stop_all_cameras()