Beispiel #1
0
    def test_wrong_background_size(self):
        pipeline_parameters = {
            "camera_name": "simulation",
            "image_background": "white_background"
        }

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        background_provider = MockBackgroundManager()

        # Invalid background size.
        background_provider.save_background("white_background", numpy.zeros(shape=(100, 100)),
                                            append_timestamp=False)

        parameters = PipelineConfig("test_pipeline", pipeline_parameters).get_configuration()
        image_background_array = background_provider.get_background("white_background")

        with self.assertRaisesRegex(RuntimeError, "Invalid background_image size "):
            process_image(image=image,
                          timestamp=time.time(),
                          x_axis=x_axis,
                          y_axis=y_axis,
                          parameters=parameters,
                          image_background_array=image_background_array)
Beispiel #2
0
    def test_image_background(self):
        pipeline_parameters = {
            "camera_name": "simulation",
            "image_background": "white_background"
        }

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        background_provider = MockBackgroundManager()
        x_size, y_size = simulated_camera.get_geometry()
        background_provider.save_background("white_background", numpy.zeros(shape=(y_size, x_size)),
                                            append_timestamp=False)

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()
        image_background_array = background_provider.get_background(parameters.get("image_background"))

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters,
                               image_background_array=image_background_array)

        self.assertTrue(numpy.array_equal(result["image"], image),
                        "A zero background should not change the image.")

        max_value_in_image = result["max_value"]

        pipeline_parameters = {
            "camera_name": "simulation",
            "image_background": "max_background",
            "image_threshold": 0
        }

        max_background = numpy.zeros(shape=(y_size, x_size), dtype="uint16")
        max_background.fill(max_value_in_image)
        background_provider.save_background("max_background", max_background, append_timestamp=False)

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()
        image_background_array = background_provider.get_background(parameters.get("image_background"))

        expected_image = numpy.zeros(shape=(y_size, x_size))

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters,
                               image_background_array=image_background_array)

        self.assertTrue(numpy.array_equal(result["image"], expected_image),
                        "The image should be all zeros - negative numbers are not allowed.")
Beispiel #3
0
    def test_noop_pipeline(self):
        pipeline_config = PipelineConfig("test_pipeline")

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        parameters = pipeline_config.get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        required_fields_in_result = ['x_center_of_mass', 'x_axis', 'y_axis', 'x_profile', 'y_fit_standard_deviation',
                                     'y_rms', 'timestamp', 'y_profile', 'image', 'max_value', 'x_fit_offset',
                                     'x_fit_gauss_function', 'y_center_of_mass', 'min_value', 'y_fit_mean',
                                     'x_fit_mean', 'x_rms', 'y_fit_amplitude', 'x_fit_amplitude',
                                     'y_fit_gauss_function', 'x_fit_standard_deviation', 'y_fit_offset',
                                     "processing_parameters", "intensity"]

        self.assertSetEqual(set(required_fields_in_result), set(result.keys()),
                            "Not all required keys are present in the result")

        self.assertTrue(numpy.array_equal(result["image"], image),
                        "The input and output image are not the same, but the pipeline should not change it.")

        self.assertDictEqual(parameters, json.loads(result["processing_parameters"]),
                             "The passed and the received processing parameters are not the same.")
Beispiel #4
0
        def run_the_pipeline(configuration, simulated_image=None):
            parameters = PipelineConfig("test_pipeline", configuration).get_configuration()

            simulated_camera = CameraSimulation(CameraConfig("simulation"))

            if simulated_image is None:
                simulated_image = simulated_camera.get_image()

            x_axis, y_axis = simulated_camera.get_x_y_axis()

            return process_image(image=simulated_image, timestamp=time.time(), x_axis=x_axis, y_axis=y_axis,
                                 parameters=parameters)
Beispiel #5
0
def process_image(image,
                  pulse_id,
                  timestamp,
                  x_axis,
                  y_axis,
                  parameters,
                  bsdata=None):
    ret = processor.process_image(image, pulse_id, timestamp, x_axis, y_axis,
                                  parameters, bsdata)
    ret["average_value"] = float(ret["intensity"]) / len(ret["x_axis"]) / len(
        ret["y_axis"])
    return ret
Beispiel #6
0
    def test_image_threshold(self):
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()
        x_size, y_size = simulated_camera.get_geometry()

        pipeline_parameters = {
            "camera_name": "simulation",
            "image_threshold": 9999999
        }

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        expected_image = numpy.zeros(shape=(y_size, x_size))
        self.assertTrue(numpy.array_equal(result["image"], expected_image),
                        "An image of zeros should have been produced.")

        pipeline_parameters = {
            "camera_name": "simulation",
            "image_threshold": 0
        }

        pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
        parameters = pipeline_config.get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertTrue(numpy.array_equal(result["image"], image),
                        "The image should be the same as the original image.")
Beispiel #7
0
    def test_intensity(self):
        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation"
        }).get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        x_sum = result["x_profile"].sum()
        y_sum = result["y_profile"].sum()

        # The sums of X and Y profile should always give us the same result as the intensity.
        self.assertAlmostEqual(x_sum, result["intensity"], delta=10000)
        self.assertAlmostEqual(y_sum, result["intensity"], delta=10000)
Beispiel #8
0
    def test_profiles(self):
        height = 10
        width = 10

        square_start = 4
        square_end = 6
        n_pixels = square_end - square_start
        pixel_value = 10000

        image = numpy.zeros((height, width), dtype="uint16")
        x_axis = numpy.linspace(0, width - 1, width, dtype='f')
        y_axis = numpy.linspace(0, height - 1, height, dtype='f')

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation"
        }).get_configuration()

        # Add signal in the center
        image[square_start:square_end, square_start:square_end] = 10000

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        x_profile = result["x_profile"]
        y_profile = result["y_profile"]

        numpy.testing.assert_array_equal(x_profile[0:square_start], numpy.zeros(shape=square_start))
        numpy.testing.assert_array_equal(x_profile[square_start:square_end], numpy.zeros(shape=n_pixels) +
                                         (n_pixels * pixel_value))
        numpy.testing.assert_array_equal(x_profile[square_end], numpy.zeros(shape=width-square_end))

        numpy.testing.assert_array_equal(y_profile[0:square_start], numpy.zeros(shape=square_start))
        numpy.testing.assert_array_equal(y_profile[square_start:square_end], numpy.zeros(shape=n_pixels) +
                                         (n_pixels * pixel_value))
        numpy.testing.assert_array_equal(y_profile[square_end], numpy.zeros(shape=height - square_end))
Beispiel #9
0
    def test_region_of_interest_default_values(self):

        simulated_camera = CameraSimulation(CameraConfig("simulation"))
        image = simulated_camera.get_image()
        x_axis, y_axis = simulated_camera.get_x_y_axis()

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation"
        }).get_configuration()

        good_region_keys = set(["good_region", "gr_x_axis", "gr_y_axis", "gr_x_fit_gauss_function", "gr_x_fit_offset",
                                "gr_x_fit_amplitude", "gr_x_fit_standard_deviation", "gr_x_fit_mean",
                                "gr_y_fit_gauss_function", "gr_y_fit_offset", "gr_y_fit_amplitude",
                                "gr_y_fit_standard_deviation", "gr_y_fit_mean", "gr_intensity",
                                "gr_x_profile", "gr_y_profile"])

        slices_key_formats = set(["slice_%s_center_x", "slice_%s_center_y", "slice_%s_standard_deviation",
                                  "slice_%s_intensity"])

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertFalse(any((x in result for x in good_region_keys)), 'There should not be good region keys.')

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 99999
            }
        }).get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertTrue(all((x in result for x in good_region_keys)), 'There should be good region keys.')
        self.assertTrue(all((result[x] is None for x in good_region_keys)), 'All values should be None.')

        number_of_slices = 7

        parameters = PipelineConfig("test_pipeline", {
            "camera_name": "simulation",
            "image_good_region": {
                "threshold": 99999
            },
            "image_slices": {
                "number_of_slices": number_of_slices
            }
        }).get_configuration()

        result = process_image(image=image,
                               timestamp=time.time(),
                               x_axis=x_axis,
                               y_axis=y_axis,
                               parameters=parameters)

        self.assertTrue(all((x in result for x in good_region_keys)), 'There should be good region keys.')
        self.assertTrue(all((x in result for x in (x % counter
                                                   for x in slices_key_formats
                                                   for counter in range(number_of_slices)))))
x_size, y_size = simulated_camera.get_geometry()

# Documentation: https://github.com/datastreaming/cam_server#pipeline_configuration
pipeline_parameters = {
    "camera_name": "simulation"
}

pipeline_config = PipelineConfig("test_pipeline", pipeline_parameters)
parameters = pipeline_config.get_configuration()

image_number = 0

with sender(port=output_stream_port, mode=output_stream_mode) as output_stream:
    # Get simulated image.
    image = simulated_camera.get_image()

    # Generate timestamp.
    timestamp = time.time()

    # Pass data to processing pipeline.
    processed_data = process_image(image, 0, timestamp, x_axis, y_axis, pipeline_parameters)

    # Set height and width.
    processed_data["width"] = processed_data["image"].shape[1]
    processed_data["height"] = processed_data["image"].shape[0]

    print("Sending image number: ", image_number)
    image_number += 1

    output_stream.send(data=processed_data, timestamp=timestamp)
Beispiel #11
0
def process_image(image, pulse_id, timestamp, x_axis, y_axis, parameters, bsdata):
    ret = processor.process_image(image, pulse_id, timestamp, x_axis, y_axis, parameters, bsdata)
    ret.update(bsdata)
    return ret
Beispiel #12
0
def processing_pipeline(stop_event, statistics, parameter_queue, cam_client,
                        pipeline_config, output_stream_port,
                        background_manager):
    # TODO: Implement statistics: n_clients, input_throughput

    def no_client_timeout():
        _logger.warning(
            "No client connected to the pipeline stream for %d seconds. Closing instance."
            % config.MFLOW_NO_CLIENTS_TIMEOUT)
        stop_event.set()

    def process_pipeline_parameters():
        parameters = pipeline_config.get_configuration()
        _logger.debug("Processing pipeline parameters %s.", parameters)

        background_array = None
        if parameters.get("image_background_enable"):
            background_id = pipeline_config.get_background_id()
            _logger.debug("Image background enabled. Using background_id %s.",
                          background_id)

            background_array = background_manager.get_background(background_id)

        size_x, size_y = cam_client.get_camera_geometry(
            pipeline_config.get_camera_name())

        image_region_of_interest = parameters.get("image_region_of_interest")
        if image_region_of_interest:
            _, size_x, _, size_y = image_region_of_interest

        _logger.debug("Image width %d and height %d.", size_x, size_y)

        return parameters, background_array

    source = None
    sender = None

    try:
        pipeline_parameters, image_background_array = process_pipeline_parameters(
        )

        camera_stream_address = cam_client.get_camera_stream(
            pipeline_config.get_camera_name())
        _logger.debug("Connecting to camera stream address %s.",
                      camera_stream_address)

        source_host, source_port = get_host_port_from_stream_address(
            camera_stream_address)

        source = Source(host=source_host,
                        port=source_port,
                        receive_timeout=config.PIPELINE_RECEIVE_TIMEOUT,
                        mode=SUB)
        source.connect()

        _logger.debug("Opening output stream on port %d.", output_stream_port)

        sender = Sender(port=output_stream_port,
                        mode=PUB,
                        data_header_compression=config.
                        CAMERA_BSREAD_DATA_HEADER_COMPRESSION)

        sender.open(no_client_action=no_client_timeout,
                    no_client_timeout=config.MFLOW_NO_CLIENTS_TIMEOUT)
        # TODO: Register proper channels.

        # Indicate that the startup was successful.
        stop_event.clear()

        _logger.debug("Transceiver started.")

        while not stop_event.is_set():
            try:
                while not parameter_queue.empty():
                    new_parameters = parameter_queue.get()
                    pipeline_config.set_configuration(new_parameters)
                    pipeline_parameters, image_background_array = process_pipeline_parameters(
                    )

                data = source.receive()

                # In case of receiving error or timeout, the returned data is None.
                if data is None:
                    continue

                image = data.data.data["image"].value
                x_axis = data.data.data["x_axis"].value
                y_axis = data.data.data["y_axis"].value
                processing_timestamp = data.data.data["timestamp"].value

                processed_data = process_image(image, processing_timestamp,
                                               x_axis, y_axis,
                                               pipeline_parameters,
                                               image_background_array)

                processed_data["width"] = processed_data["image"].shape[1]
                processed_data["height"] = processed_data["image"].shape[0]

                pulse_id = data.data.pulse_id
                timestamp = (data.data.global_timestamp,
                             data.data.global_timestamp_offset)

                sender.send(data=processed_data,
                            timestamp=timestamp,
                            pulse_id=pulse_id)

            except:
                _logger.exception("Could not process message.")
                stop_event.set()

        _logger.info("Stopping transceiver.")

    except:
        _logger.exception(
            "Exception while trying to start the receive and process thread.")
        raise

    finally:
        if source:
            source.disconnect()

        if sender:
            sender.close()