def _collect_camera_settings(self): try: #self.width_raw, self.height_raw = 659, 494 source_host, source_port = get_host_port_from_stream_address(self.bsread_stream_address) stream = Source(host=source_host, port=source_port, mode=PULL,receive_timeout=3000) stream.connect() data = stream.receive() image = data.data.data[self.camera_config.get_source() + config.EPICS_PV_SUFFIX_IMAGE].value if image is None: self.height_raw, self.width_raw = 0,0 else: image = transform_image(image, self.camera_config) self.height_raw, self.width_raw = image.shape except: raise RuntimeError("Could not fetch camera settings cam_server:{}".format(self.camera_config.get_source())) finally: stream.disconnect()
source_host = source_host.split("//")[1] source_port = int(source_port) _logger.info("Input stream host '%s' and port '%s'.", source_host, source_port) stream = Source(host=source_host, port=source_port, mode=SUB, receive_timeout=1000) stream.connect() n_received_messages = 0 while n_received_messages < n_messages_per_connection: message = stream.receive(handler=handler.receive) # In case you set a receive timeout, the returned message can be None. if message is None: _logger.debug("Empty message.") continue n_received_messages += 1 pulse_id = message.data["header"]["pulse_id"] _logger.debug("Received message with pulse_id '%s'.", pulse_id) message_channels = [] for index, channel in enumerate(
def processing_pipeline(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager): # TODO: Implement statistics: n_clients, input_throughput def no_client_timeout(): _logger.warning( "No client connected to the pipeline stream for %d seconds. Closing instance." % config.MFLOW_NO_CLIENTS_TIMEOUT) stop_event.set() def process_pipeline_parameters(): parameters = pipeline_config.get_configuration() _logger.debug("Processing pipeline parameters %s.", parameters) background_array = None if parameters.get("image_background_enable"): background_id = pipeline_config.get_background_id() _logger.debug("Image background enabled. Using background_id %s.", background_id) background_array = background_manager.get_background(background_id) size_x, size_y = cam_client.get_camera_geometry( pipeline_config.get_camera_name()) image_region_of_interest = parameters.get("image_region_of_interest") if image_region_of_interest: _, size_x, _, size_y = image_region_of_interest _logger.debug("Image width %d and height %d.", size_x, size_y) return parameters, background_array source = None sender = None try: pipeline_parameters, image_background_array = process_pipeline_parameters( ) camera_stream_address = cam_client.get_camera_stream( pipeline_config.get_camera_name()) _logger.debug("Connecting to camera stream address %s.", camera_stream_address) source_host, source_port = get_host_port_from_stream_address( camera_stream_address) source = Source(host=source_host, port=source_port, receive_timeout=config.PIPELINE_RECEIVE_TIMEOUT, mode=SUB) source.connect() _logger.debug("Opening output stream on port %d.", output_stream_port) sender = Sender(port=output_stream_port, mode=PUB, data_header_compression=config. CAMERA_BSREAD_DATA_HEADER_COMPRESSION) sender.open(no_client_action=no_client_timeout, no_client_timeout=config.MFLOW_NO_CLIENTS_TIMEOUT) # TODO: Register proper channels. # Indicate that the startup was successful. stop_event.clear() _logger.debug("Transceiver started.") while not stop_event.is_set(): try: while not parameter_queue.empty(): new_parameters = parameter_queue.get() pipeline_config.set_configuration(new_parameters) pipeline_parameters, image_background_array = process_pipeline_parameters( ) data = source.receive() # In case of receiving error or timeout, the returned data is None. if data is None: continue image = data.data.data["image"].value x_axis = data.data.data["x_axis"].value y_axis = data.data.data["y_axis"].value processing_timestamp = data.data.data["timestamp"].value processed_data = process_image(image, processing_timestamp, x_axis, y_axis, pipeline_parameters, image_background_array) processed_data["width"] = processed_data["image"].shape[1] processed_data["height"] = processed_data["image"].shape[0] pulse_id = data.data.pulse_id timestamp = (data.data.global_timestamp, data.data.global_timestamp_offset) sender.send(data=processed_data, timestamp=timestamp, pulse_id=pulse_id) except: _logger.exception("Could not process message.") stop_event.set() _logger.info("Stopping transceiver.") except: _logger.exception( "Exception while trying to start the receive and process thread.") raise finally: if source: source.disconnect() if sender: sender.close()
def store_pipeline(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager): # TODO: Implement statistics: n_clients, input_throughput def no_client_timeout(): _logger.warning( "No client connected to the pipeline stream for %d seconds. Closing instance." % config.MFLOW_NO_CLIENTS_TIMEOUT) stop_event.set() source = None sender = None try: camera_stream_address = cam_client.get_camera_stream( pipeline_config.get_camera_name()) camera_name = pipeline_config.get_camera_name() _logger.debug("Connecting to camera %s on stream address %s.", camera_name, camera_stream_address) source_host, source_port = get_host_port_from_stream_address( camera_stream_address) source = Source(host=source_host, port=source_port, receive_timeout=config.PIPELINE_RECEIVE_TIMEOUT, mode=SUB) source.connect() _logger.debug("Opening output stream on port %d.", output_stream_port) sender = Sender(port=output_stream_port, mode=PUSH, data_header_compression=config. CAMERA_BSREAD_DATA_HEADER_COMPRESSION, block=False) sender.open(no_client_action=no_client_timeout, no_client_timeout=config.MFLOW_NO_CLIENTS_TIMEOUT) # TODO: Register proper channels. # Indicate that the startup was successful. stop_event.clear() _logger.debug("Transceiver started.") while not stop_event.is_set(): try: data = source.receive() # In case of receiving error or timeout, the returned data is None. if data is None: continue forward_data = {camera_name: data.data.data["image"].value} pulse_id = data.data.pulse_id timestamp = (data.data.global_timestamp, data.data.global_timestamp_offset) sender.send(data=forward_data, pulse_id=pulse_id, timestamp=timestamp) except: _logger.exception("Could not process message.") stop_event.set() _logger.info("Stopping transceiver.") except: _logger.exception( "Exception while trying to start the receive and process thread.") raise finally: if source: source.disconnect() if sender: sender.close()
class ReadGroupInterface(object): """ Provide a beam synchronous acquisition for PV data. """ def __init__(self, properties, conditions=None, host=None, port=None, filter_function=None): """ Create the bsread group read interface. :param properties: List of PVs to read for processing. :param conditions: List of PVs to read as conditions. :param filter_function: Filter the BS stream with a custom function. """ self.host = host self.port = port self.properties = convert_to_list(properties) self.conditions = convert_to_list(conditions) self.filter = filter_function self._message_cache = None self._message_cache_timestamp = None self._connect_bsread(config.bs_default_host, config.bs_default_port) def _connect_bsread(self, host, port): # Configure the connection type. if config.bs_connection_mode.lower() == "sub": mode = mflow.SUB elif config.bs_connection_mode.lower() == "pull": mode = mflow.PULL if host and port: self.stream = Source(host=host, port=port, queue_size=config.bs_queue_size, receive_timeout=config.bs_receive_timeout, mode=mode) else: channels = [x.identifier for x in self.properties ] + [x.identifier for x in self.conditions] self.stream = Source(channels=channels, queue_size=config.bs_queue_size, receive_timeout=config.bs_receive_timeout, mode=mode) self.stream.connect() @staticmethod def is_message_after_timestamp(message, timestamp): """ Check if the received message was captured after the provided timestamp. :param message: Message to inspect. :param timestamp: Timestamp to compare the message to. :return: True if the message is after the timestamp, False otherwise. """ # Receive might timeout, in this case we have nothing to compare. if not message: return False # This is how BSread encodes the timestamp. current_sec = int(timestamp) current_ns = int(math.modf(timestamp)[0] * 1e9) message_sec = message.data.global_timestamp message_ns = message.data.global_timestamp_offset # If the seconds are the same, the nanoseconds must be equal or larger. if message_sec == current_sec: return message_ns >= current_ns # If the seconds are not the same, the message seconds need to be larger than the current seconds. else: return message_sec > current_sec @staticmethod def _get_missing_property_default(property_definition): """ In case a bs read value is missing, either return the default value or raise an Exception. :param property_definition: :return: """ # Exception is defined, raise it. if Exception == property_definition.default_value: raise property_definition.default_value( "Property '%s' missing in bs stream." % property_definition.identifier) # Else just return the default value. else: return property_definition.default_value def _read_pvs_from_cache(self, properties): """ Read the requested properties from the cache. :param properties: List of properties to read. :return: List with PV values. """ if not self._message_cache: raise ValueError("Message cache is empty, cannot read PVs %s." % properties) pv_values = [] for property_name, property_definition in ((x.identifier, x) for x in properties): if property_name in self._message_cache.data.data: value = self._message_cache.data.data[property_name].value else: value = self._get_missing_property_default(property_definition) # TODO: Check if the python conversion works in every case? # BS read always return numpy, and we always convert it to Python. pv_values.append(value) return pv_values def read(self): """ Reads the PV values from BSread. It uses the first PVs data sampled after the invocation of this method. :return: List of values for read pvs. Note: Condition PVs are excluded. """ read_timestamp = time() while time() - read_timestamp < config.bs_read_timeout: message = self.stream.receive(filter=self.filter) if self.is_message_after_timestamp(message, read_timestamp): self._message_cache = message self._message_cache_timestamp = read_timestamp return self._read_pvs_from_cache(self.properties) else: raise Exception( "Read timeout exceeded for BS read stream. Could not find the desired package in time." ) def read_cached_conditions(self): """ Returns the conditions associated with the last read command. :return: List of condition values. """ return self._read_pvs_from_cache(self.conditions) def close(self): """ Disconnect from the stream and clear the message cache. """ if self.stream: self.stream.disconnect() self._message_cache = None self._message_cache_timestamp = None