def run(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager, user_scripts_manager=None): camera_name = pipeline_config.get_camera_name() set_log_tag(" [" + str(camera_name) + " | " + str(pipeline_config.get_name()) + ":" + str(output_stream_port) + "]") exit_code = 0 try: init_statistics(statistics) init_pipeline_parameters(pipeline_config) # Indicate that the startup was successful. stop_event.clear() connect_to_source(cam_client) setup_sender(output_stream_port, stop_event) _logger.debug("Transceiver started. %s" % log_tag) while not stop_event.is_set(): try: pulse_id, global_timestamp, data = receive_stream() if not data: continue stream_data = OrderedDict() try: for key, value in data.items(): stream_data[key] = value.value except Exception as e: _logger.error("Error parsing bsread message: " + str(e) + ". %s" % log_tag) continue send_data(stream_data, global_timestamp, pulse_id) except Exception as e: _logger.exception("Could not process message: " + str(e) + ". %s" % log_tag) stop_event.set() except Exception as e: _logger.exception("Exception starting the receive thread: " + str(e) + ". %s" % log_tag) exit_code = 1 raise finally: _logger.info("Stopping transceiver. %s" % log_tag) stop_event.set() cleanup() _logger.debug("Exiting process. %s" % log_tag) sys.exit(exit_code)
def process(self, stop_event, statistics, parameter_queue, port): sender = None dtype = None try: init_statistics(statistics) self.sender = self.create_sender(stop_event, port) self.connect() camera_name = self.get_name() x_size, y_size = self.get_geometry() x_axis, y_axis = self.get_x_y_axis() frame_rate = self.get_frame_rate() sample_interval = (1.0 / frame_rate) if frame_rate else None if not self.check_data: self.register_channels() # This signals that the camera has suc cessfully started. stop_event.clear() while not stop_event.is_set(): if sample_interval: start = time.time() image, timestamp, pulse_id = self.get_data() frame_size = ((image.size * image.itemsize) if (image is not None) else 0) frame_shape = str(x_size) + "x" + str(y_size) + "x" + str( image.itemsize) update_statistics(sender, -frame_size, 1 if (image is not None) else 0, frame_shape) # In case of receiving error or timeout, the returned data is None. if image is None: continue change = False x, y = self.get_geometry() if x != x_size or y != y_size: x_size, y_size = self.get_geometry() x_axis, y_axis = self.get_x_y_axis() change = True if (dtype is not None) and dtype != image.dtype: change = True dtype = image.dtype if change and not self.check_data: self.register_channels() default_channels = { "image": image, "timestamp": timestamp, "width": x_size, "height": y_size, "x_axis": x_axis, "y_axis": y_axis } data = self.get_send_channels(default_channels) try: self.sender.send(data=data, pulse_id=pulse_id, timestamp=timestamp, check_data=self.check_data) on_message_sent() except Again: _logger.warning( "Send timeout. Lost image with timestamp '%s' [%s]." % (str(timestamp), camera_name)) if sample_interval: sleep = sample_interval - (time.time() - start) if (sleep > 0): time.sleep(sleep) except Exception as e: _logger.exception("Error while processing camera stream: %s" % (str(e), )) finally: _logger.info("Stopping transceiver.") # Wait for termination / update configuration / etc. stop_event.wait() try: self.disconnect() except: pass if self.sender: try: self.sender.close() except: pass
def run(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager, user_scripts_manager=None): def no_client_action(): nonlocal parameters if parameters["no_client_timeout"] > 0: _logger.warning( "No client connected to the pipeline stream for %d seconds. Closing instance. %s" % (config.MFLOW_NO_CLIENTS_TIMEOUT, log_tag)) stop_event.set() source = None sender = None set_log_tag("store_pipeline") exit_code = 0 parameters = init_pipeline_parameters(pipeline_config, parameter_queue, user_scripts_manager) if parameters.get("no_client_timeout") is None: parameters["no_client_timeout"] = config.MFLOW_NO_CLIENTS_TIMEOUT modulo = parameters.get("modulo", None) try: init_statistics(statistics) camera_name = pipeline_config.get_camera_name() stream_image_name = camera_name + config.EPICS_PV_SUFFIX_IMAGE set_log_tag(" [" + str(camera_name) + " | " + str(pipeline_config.get_name()) + ":" + str(output_stream_port) + "]") source = connect_to_camera(cam_client) _logger.debug("Opening output stream on port %d. %s", output_stream_port, log_tag) sender = Sender(port=output_stream_port, mode=PUSH, data_header_compression=config. CAMERA_BSREAD_DATA_HEADER_COMPRESSION, block=False) sender.open(no_client_action=no_client_action, no_client_timeout=parameters["no_client_timeout"] if parameters["no_client_timeout"] > 0 else sys.maxsize) init_sender(sender, parameters) # TODO: Register proper channels. # Indicate that the startup was successful. stop_event.clear() _logger.debug("Transceiver started. %s" % log_tag) counter = 1 while not stop_event.is_set(): try: data = source.receive() update_statistics( sender, data.statistics.total_bytes_received if data else 0, 1 if data else 0) if modulo: if counter < modulo: counter = counter + 1 continue else: counter = 1 # In case of receiving error or timeout, the returned data is None. if data is None: continue forward_data = { stream_image_name: data.data.data["image"].value } pulse_id = data.data.pulse_id timestamp = (data.data.global_timestamp, data.data.global_timestamp_offset) send(sender, forward_data, timestamp, pulse_id) except: _logger.exception("Could not process message. %s" % log_tag) stop_event.set() _logger.info("Stopping transceiver. %s" % log_tag) except: _logger.exception( "Exception while trying to start the receive and process thread. %s" % log_tag) exit_code = 1 raise finally: _logger.info("Stopping transceiver. %s" % log_tag) if source: source.disconnect() if sender: try: sender.close() except: pass sys.exit(exit_code)
def run(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager, user_scripts_manager=None): set_log_tag("custom_pipeline") exit_code = 0 init_pipeline_parameters(pipeline_config) try: init_statistics(statistics) set_log_tag(" [" + str(pipeline_config.get_name()) + ":" + str(output_stream_port) + "]") create_sender(output_stream_port, stop_event) function = get_function(get_parameters(), user_scripts_manager) if function is None: raise Exception("Invalid function") max_frame_rate = get_parameters().get("max_frame_rate") sample_interval = (1.0 / max_frame_rate) if max_frame_rate else None _logger.debug("Transceiver started. %s" % log_tag) # Indicate that the startup was successful. stop_event.clear() init = True while not stop_event.is_set(): try: if sample_interval: start = time.time() check_parameters_changes() stream_data, timestamp, pulse_id, data_size = function( get_parameters(), init) init = False update_statistics(sender, -data_size, 1 if stream_data else 0) if not stream_data or stop_event.is_set(): continue send(sender, stream_data, timestamp, pulse_id) if sample_interval: sleep = sample_interval - (time.time() - start) if (sleep > 0): time.sleep(sleep) except Exception as e: _logger.exception("Could not process message: " + str(e) + ". %s" % log_tag) if abort_on_error(): stop_event.set() _logger.info("Stopping transceiver. %s" % log_tag) except: _logger.exception( "Exception while trying to start the receive and process thread. %s" % log_tag) exit_code = 1 raise finally: _logger.info("Stopping transceiver. %s" % log_tag) cleanup() sys.exit(exit_code)
def run(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager, user_scripts_manager=None): set_log_tag("stream_pipeline") exit_code = 0 init_pipeline_parameters(pipeline_config, parameter_queue, user_scripts_manager) def process_stream(pulse_id, global_tamestamp, function, input_data): try: return function(input_data, pulse_id, global_tamestamp, get_parameters()) except Exception as e: #import traceback #traceback.print_exc() _logger.warning( "Error processing PID %d at proc %d thread %d: %s" % (pulse_id, os.getpid(), threading.get_ident(), str(e))) if abort_on_error(): raise try: init_statistics(statistics) set_log_tag(" [" + str(pipeline_config.get_name()) + ":" + str(output_stream_port) + "]") # Indicate that the startup was successful. stop_event.clear() setup_sender(output_stream_port, stop_event, process_stream, user_scripts_manager) _logger.debug("Transceiver started. %s" % log_tag) with connect_to_stream(): while not stop_event.is_set(): try: check_parameters_changes() assert_function_defined() pulse_id, global_timestamp, data = receive_stream() if not data: continue stream_data = OrderedDict() try: for key, value in data.items(): stream_data[key] = value.value except Exception as e: _logger.error("Error parsing bsread message: " + str(e) + ". %s" % log_tag) continue process_data(process_stream, pulse_id, global_timestamp, stream_data) except Exception as e: _logger.exception("Could not process message: " + str(e) + ". %s" % log_tag) stop_event.set() except Exception as e: _logger.exception("Exception starting the receive thread: " + str(e) + ". %s" % log_tag) exit_code = 1 raise finally: _logger.info("Stopping transceiver. %s" % log_tag) stop_event.set() cleanup() _logger.debug("Exiting process. %s" % log_tag) sys.exit(exit_code)
def run(stop_event, statistics, parameter_queue, cam_client, pipeline_config, output_stream_port, background_manager, user_scripts_manager=None): camera_name = pipeline_config.get_camera_name() set_log_tag(" [" + str(camera_name) + " | " + str(pipeline_config.get_name()) + ":" + str(output_stream_port) + "]") exit_code = 0 def process_bsbuffer(bs_buffer, bs_img_buffer): i = 0 while i < len(bs_buffer): bs_pid, bsdata = bs_buffer[i] for j in range(len(bs_img_buffer)): img_pid = bs_img_buffer[0][0] if img_pid < bs_pid: bs_img_buffer.popleft() elif img_pid == bs_pid: [ pulse_id, [ global_timestamp, image, x_axis, y_axis, additional_data ] ] = bs_img_buffer.popleft() stream_data = OrderedDict() stream_data.update(bsdata) for key, value in bsdata.items(): stream_data[key] = value.value if additional_data is not None: try: stream_data.update(additional_data) except: pass process_data(process_image, pulse_id, global_timestamp, image, x_axis, y_axis, stream_data) for k in range(i): bs_buffer.popleft() i = -1 break else: break i = i + 1 def bs_send_task(bs_buffer, bs_img_buffer, stop_event): global sender if number_processing_threads <= 0: _logger.info("Start bs send thread") sender = create_sender(output_stream_port, stop_event) try: with connect_to_stream() as stream: while not stop_event.is_set(): message = stream.receive() if not message or stop_event.is_set(): if abort_on_timeout(): stop_event.set() continue bs_buffer.append( [message.data.pulse_id, message.data.data]) try: process_bsbuffer(bs_buffer, bs_img_buffer) except Exception as e: _logger.error("Error processing bs buffer: " + str(e)) except Exception as e: _logger.error("Error on bs_send_task: " + str(e)) finally: stop_event.set() if sender: try: sender.close() except: pass _logger.info("Exit bs send thread") def process_pipeline_parameters(): parameters = get_parameters() _logger.debug("Processing pipeline parameters %s. %s" % (parameters, log_tag)) background_array = None if parameters.get("image_background_enable"): background_id = pipeline_config.get_background_id() _logger.debug( "Image background enabled. Using background_id %s. %s" % (background_id, log_tag)) try: background_array = background_manager.get_background( background_id) parameters["image_background_ok"] = True except: _logger.warning("Invalid background_id: %s. %s" % (background_id, log_tag)) #if abort_on_error(): # raise parameters["image_background_ok"] = False if background_array is not None: background_array = background_array.astype("uint16", copy=False) size_x, size_y = cam_client.get_camera_geometry( pipeline_config.get_camera_name()) by, bx = int(parameters.get("binning_y", 1)), int(parameters.get("binning_x", 1)) bm = parameters.get("binning_mean", False) if (by > 1) or (bx > 1): size_x, size_y = int(size_x / bx), int(size_y / by) if background_array is not None: background_array, _, _ = binning(background_array, None, None, bx, by, bm) if background_array.shape != (size_y, size_x): _logger.warning( "Bad background shape: %s instead of %s. %s" % (image_background_array.shape, (size_y, size_x), log_tag)) image_region_of_interest = parameters.get("image_region_of_interest") if image_region_of_interest: _, size_x, _, size_y = image_region_of_interest if size_x and size_y: _logger.debug("Image width %d and height %d. %s" % (size_x, size_y, log_tag)) if parameters.get("rotation"): if not isinstance(parameters.get("rotation"), dict): parameters["rotation"] = { "angle": float(parameters.get("rotation")), "order": 1, "mode": "0.0" } if not parameters["rotation"].get("angle"): parameters["rotation"] = None elif not is_number(parameters["rotation"]["angle"]) or (float( parameters["rotation"]["angle"]) == 0): parameters["rotation"] = None else: if not parameters["rotation"].get("order"): parameters["rotation"]["order"] = 1 if not parameters["rotation"].get("mode"): parameters["rotation"]["mode"] = "0.0" if parameters.get("averaging"): try: parameters["averaging"] = int(parameters.get("averaging")) except: parameters["averaging"] = None if parameters["mode"] == "FILE": if parameters.get("layout") is None: parameters["layout"] = LAYOUT_DEFAULT if parameters.get("localtime") is None: parameters["localtime"] = LOCALTIME_DEFAULT if parameters.get("change") is None: parameters["change"] = CHANGE_DEFAULT if parameters.get("bsread_address"): if parameters.get("bsread_image_buf"): parameters["bsread_image_buf"] = min( parameters.get("bsread_image_buf"), config.BSREAD_IMAGE_BUFFER_SIZE_MAX) else: parameters[ "bsread_image_buf"] = config.BSREAD_IMAGE_BUFFER_SIZE_DEFAULT if parameters.get("bsread_data_buf"): parameters["bsread_data_buf"] = min( parameters.get("bsread_data_buf"), config.BSREAD_DATA_BUFFER_SIZE_MAX) else: parameters[ "bsread_data_buf"] = config.BSREAD_DATA_BUFFER_SIZE_DEFAULT return parameters, background_array def process_image(pulse_id, global_timestamp, function, image, x_axis, y_axis, bsdata): pars = get_parameters() try: image, x_axis, y_axis = pre_process_image(image, pulse_id, global_timestamp, x_axis, y_axis, pars, image_background_array) processed_data = function(image, pulse_id, global_timestamp, x_axis, y_axis, pars, bsdata) #print("Processing PID %d at proc %d thread %d" % (pulse_id, os.getpid(), threading.get_ident())) return processed_data except Exception as e: _logger.warning( "Error processing PID %d at proc %d thread %d: %s" % (pulse_id, os.getpid(), threading.get_ident(), str(e))) if abort_on_error(): raise bs_buffer, bs_img_buffer, bs_send_thread = None, None, None try: init_statistics(statistics) init_pipeline_parameters(pipeline_config, parameter_queue, user_scripts_manager, process_pipeline_parameters) pipeline_parameters, image_background_array = process_pipeline_parameters( ) connect_to_camera(cam_client) _logger.debug("Opening output stream on port %d. %s" % (output_stream_port, log_tag)) # Indicate that the startup was successful. stop_event.clear() image_with_stream = has_stream() if image_with_stream: bs_buffer = deque(maxlen=pipeline_parameters["bsread_data_buf"]) bs_img_buffer = deque( maxlen=pipeline_parameters["bsread_image_buf"]) bs_send_thread = Thread(target=bs_send_task, args=(bs_buffer, bs_img_buffer, stop_event)) bs_send_thread.start() if number_processing_threads > 0: setup_sender(output_stream_port, stop_event, process_image, user_scripts_manager) else: setup_sender(output_stream_port, stop_event, process_image, user_scripts_manager) _logger.debug("Transceiver started. %s" % (log_tag)) last_sent_timestamp = 0 image_buffer = [] while not stop_event.is_set(): try: ret = check_parameters_changes() if ret is not None: pipeline_parameters, image_background_array = ret assert_function_defined() pulse_id, global_timestamp, data = receive_stream(True) if not data: continue image = data["image"].value if image is None: continue x_axis = data["x_axis"].value y_axis = data["y_axis"].value if pipeline_parameters.get("rotation"): if pipeline_parameters["rotation"]["mode"] == "ortho": rotation_angle = int( pipeline_parameters["rotation"]["angle"] / 90) % 4 if rotation_angle == 1: x_axis, y_axis = y_axis, numpy.flip(x_axis) if rotation_angle == 2: x_axis, y_axis = numpy.flip(x_axis), numpy.flip( y_axis) if rotation_angle == 3: x_axis, y_axis = numpy.flip(y_axis), x_axis averaging = pipeline_parameters.get("averaging") if averaging: continuous = averaging < 0 averaging = abs(averaging) if continuous and (len(image_buffer) >= averaging): image_buffer.pop(0) image_buffer.append(image) if (len(image_buffer) >= averaging) or (continuous): try: frames = numpy.array(image_buffer) image = numpy.average(frames, 0) except: #Different shapes image_buffer = [] continue else: continue else: if pipeline_parameters.get("copy"): image = copy_image(image) if (not averaging) or (not continuous): image_buffer = [] #Check maximum frame rate parameter max_frame_rate = pipeline_parameters.get("max_frame_rate") if max_frame_rate: min_interval = 1.0 / max_frame_rate if (time.time() - last_sent_timestamp) < min_interval: continue additional_data = {} if len(data) != len(config.CAMERA_STREAM_REQUIRED_FIELDS): for key, value in data.items(): if not key in config.CAMERA_STREAM_REQUIRED_FIELDS: additional_data[key] = value.value pars = [ global_timestamp, image, x_axis, y_axis, additional_data ] if image_with_stream: bs_img_buffer.append([pulse_id, pars]) else: process_data(process_image, pulse_id, *pars) last_sent_timestamp = time.time() except ProcessingCompleted: break except Exception as e: exit_code = 2 _logger.exception("Could not process message %s: %s" % (log_tag, str(e))) break except Exception as e: exit_code = 1 _logger.exception( "Exception while trying to start the receive and process thread %s: %s" % (log_tag, str(e))) raise finally: _logger.info("Stopping transceiver. %s" % log_tag) stop_event.set() if bs_send_thread: try: bs_send_thread.join(0.1) except: pass cleanup() _logger.debug("Exiting process. %s" % log_tag) sys.exit(exit_code)
def process_epics_camera(stop_event, statistics, parameter_queue, camera, port): """ Start the camera stream and listen for image monitors. This function blocks until stop_event is set. :param stop_event: Event when to stop the process. :param statistics: Statistics namespace. :param parameter_queue: Parameters queue to be passed to the pipeline. :param camera: Camera instance to get the images from. :param port: Port to use to bind the output stream. """ sender = None exit_code = 0 try: init_statistics(statistics) def process_parameters(): nonlocal x_size, y_size, x_axis, y_axis x_size, y_size = camera.get_geometry() x_axis, y_axis = camera.get_x_y_axis() dtype = camera.get_dtype() sender.add_channel("image", metadata={ "compression": config.CAMERA_BSREAD_IMAGE_COMPRESSION, "shape": [x_size, y_size], "type": dtype }) sender.add_channel("x_axis", metadata={ "compression": config.CAMERA_BSREAD_SCALAR_COMPRESSION, "shape": [x_size], "type": "float32" }) sender.add_channel("y_axis", metadata={ "compression": config.CAMERA_BSREAD_SCALAR_COMPRESSION, "shape": [y_size], "type": "float32" }) x_size = y_size = x_axis = y_axis = None camera.connect() sender = camera.create_sender(stop_event, port) # Register the bsread channels - compress only the image. sender.add_channel("width", metadata={ "compression": config.CAMERA_BSREAD_SCALAR_COMPRESSION, "type": "int64" }) sender.add_channel("height", metadata={ "compression": config.CAMERA_BSREAD_SCALAR_COMPRESSION, "type": "int64" }) sender.add_channel("timestamp", metadata={ "compression": config.CAMERA_BSREAD_SCALAR_COMPRESSION, "type": "float64" }) process_parameters() def collect_and_send(image, timestamp, shape_changed=False): nonlocal x_size, y_size, x_axis, y_axis if shape_changed: process_parameters() data = { "image": image, "timestamp": timestamp, "width": x_size, "height": y_size, "x_axis": x_axis, "y_axis": y_axis } frame_size = ((image.size * image.itemsize) if (image is not None) else 0) frame_shape = str(x_size) + "x" + str(y_size) + "x" + str( image.itemsize) set_statistics(statistics, sender, statistics.total_bytes + frame_size, 1 if (image is not None) else 0, frame_shape) try: pulse_id = int( time.time() * 100) if camera.get_simulated_pulse_id() else None sender.send(data=data, pulse_id=pulse_id, timestamp=timestamp, check_data=False) on_message_sent(statistics) except Again: _logger.warning( "Send timeout. Lost image with timestamp '%s' [%s]." % (str(timestamp), camera.get_name())) while not parameter_queue.empty(): new_parameters = parameter_queue.get() camera.camera_config.set_configuration(new_parameters) process_parameters() camera.add_callback(collect_and_send) # This signals that the camera has successfully started. stop_event.clear() except: _logger.exception("Error while processing camera stream [%s]" % (camera.get_name(), )) exit_code = 1 finally: # Wait for termination / update configuration / etc. stop_event.wait() camera.disconnect() if sender: try: sender.close() except: pass sys.exit(exit_code)
def process_bsread_camera(stop_event, statistics, parameter_queue, camera, port): """ Start the camera stream and receive the incoming bsread streams. This function blocks until stop_event is set. :param stop_event: Event when to stop the process. :param statistics: Statistics namespace. :param parameter_queue: Parameters queue to be passed to the pipeline. :param camera: Camera instance to get the stream from. :param port: Port to use to bind the output stream. """ sender = None camera_streams = [] receive_threads = [] threaded = False message_buffer, message_buffer_send_thread, message_buffer_lock = None, None, None data_changed = False format_error = False exit_code = 0 data_format_changed = True try: init_statistics(statistics) def process_parameters(): nonlocal x_size, y_size, x_axis, y_axis, data_format_changed x_axis, y_axis = camera.get_x_y_axis() x_size, y_size = camera.get_geometry() data_format_changed = True def data_change_callback(channels): nonlocal data_changed data_changed = True def message_buffer_send_task(message_buffer, stop_event, message_buffer_lock): nonlocal sender, data_format_changed _logger.info("Start message buffer send thread [%s]" % (camera.get_name(), )) sender = camera.create_sender(stop_event, port) last_pid = None interval = 1 threshold = int(message_buffer.maxlen * camera.get_buffer_threshold()) buffer_logs = camera.get_buffer_logs() try: while not stop_event.is_set(): tx = False with message_buffer_lock: size = len(message_buffer) if size > 0: pids = sorted(message_buffer.keys()) pulse_id = pids[0] if (last_pid) and (pulse_id <= last_pid): message_buffer.pop( pulse_id) #Remove ancient PIDs _logger.info( "Removed ancient Pulse ID from queue: %d [%s]" % (pulse_id, camera.get_name())) else: if not last_pid or \ (pulse_id <= (last_pid+interval)) or (size > threshold): (data, timestamp) = message_buffer.pop(pulse_id) #sender.send(data=data, pulse_id=pulse_id, timestamp=timestamp, check_data=True) #Don't send inside the sync block tx = True if tx: sender.send(data=data, pulse_id=pulse_id, timestamp=timestamp, check_data=data_format_changed) data_format_changed = False on_message_sent(statistics) if (last_pid): expected = (last_pid + interval) if pulse_id != expected: interval = pulse_id - last_pid if buffer_logs: _logger.info( "Failed Pulse ID %d - received %d: Pulse ID interval set to: %d [%s]" % (expected, pulse_id, interval, camera.get_name())) last_pid = pulse_id if size == 0: time.sleep(0.001) #while not parameter_queue.empty(): # new_parameters = parameter_queue.get() # camera.camera_config.set_configuration(new_parameters) # process_parameters() _logger.info("stop_event set to send thread [%s]" % (camera.get_name(), )) except Exception as e: exit_code = 2 _logger.error("Error on message buffer send thread: %s [%s]" % (str(e), camera.get_name())) finally: isset = stop_event.is_set() stop_event.set() if sender: try: sender.close() except: pass _logger.info("Exit message buffer send thread [%s]" % (camera.get_name(), )) def flush_stream(camera_stream): while camera_stream.stream.receive( handler=camera_stream.handler.receive, block=False) is not None: pass # TODO: Use to register proper channels. But be aware that the size and dtype can change during the running. # def register_image_channel(size_x, size_y, dtype): # sender.add_channel("image", metadata={"compression": config.CAMERA_BSREAD_IMAGE_COMPRESSION, # "shape": [size_x, size_y], # "type": dtype}) x_size = y_size = x_axis = y_axis = None camera.connect() camera_name = camera.get_name() connections = camera.get_connections() buffer_size = camera.get_buffer_size() threaded = buffer_size > 0 process_parameters() # register_image_channel(x_size, y_size, dtype) # This signals that the camera has successfully started. stop_event.clear() stats_lock = RLock() if threaded: message_buffer_lock = RLock() message_buffer = MaxLenDict(maxlen=buffer_size) message_buffer_send_thread = Thread( target=message_buffer_send_task, args=(message_buffer, stop_event, message_buffer_lock)) message_buffer_send_thread.start() else: sender = camera.create_sender(stop_event, port) if connections > 1: _logger.info( "Connecting to camera '%s' over bsread with %d connections (buffer size = %d)" % (camera_name, connections, buffer_size)) else: _logger.info( "Connecting to camera '%s' over bsread (buffer size = %d)" % (camera_name, buffer_size)) if not threaded: for i in range(connections): stream = camera.get_stream( data_change_callback=data_change_callback) #stream.format_error_counter = 0 camera_streams.append(stream) stream.connect() # If multiple streams, ensure they are aligned if connections > 1: pid_offset = None def flush_streams(): for camera_stream in camera_streams: flush_stream(camera_stream) def get_next_pids(): pids = [] for camera_stream in camera_streams: data = camera_stream.receive() if data is None: data = camera_stream.receive() if data is None: raise Exception("Received no data from stream: " + str(camera_stream)) pids.append(data.data.pulse_id) return pids def check_pids(pids): nonlocal pid_offset pid_offset = pids[1] - pids[0] for i in range(1, len(pids)): if (pids[i] - pids[i - 1]) != pid_offset: return False return True def align_streams(): nonlocal camera_streams retries = 50 for retry in range(retries): _logger.info("Aligning streams: retry - %d [%s]" % (retry, camera.get_name())) # First flush streams. flush_streams() # Get a message from streams pids = get_next_pids() # Arrange the streams according to the PID indexes = sorted(range(len(pids)), key=pids.__getitem__) camera_streams = [camera_streams[x] for x in indexes] pids = [pids[x] for x in indexes] # Check if the PID offsets are constant if not check_pids(pids): if retry >= (retries - 1): raise Exception( "PID offsets of streams are not constant: " + str(pids)) else: _logger.info( "PID offsets of streams are not constant - retrying: %s [%s]" % (str(pids), camera.get_name())) else: _logger.info("Aligned streams: %s [%s]" % (str(pids), camera.get_name())) break align_streams() last_pid = None total_bytes = [0] * connections frame_shape = None def process_stream(camera_stream, index): nonlocal total_bytes, last_pid, frame_shape, format_error, data_format_changed try: if stop_event.is_set(): return False data = camera_stream.receive() #def on_format_error(): # camera_stream.format_error_counter = camera_stream.format_error_counter + 1 # _logger.warning( # "Invalid image format: retry %d of %d [%s]" % ( # camera_stream.format_error_counter, config.FORMAT_ERROR_COUNT, camera.get_name())) # if camera_stream.format_error_counter >= config.FORMAT_ERROR_COUNT: # raise Exception("Invalid image format") if data is not None: image = data.data.data[camera_name + config.EPICS_PV_SUFFIX_IMAGE].value if image is None: format_error = True #on_format_error() return True else: # Rotate and mirror the image if needed - this is done in the epics:_get_image for epics cameras. image = transform_image(image, camera.camera_config) # Numpy is slowest dimension first, but bsread is fastest dimension first. height, width = image.shape if (len(x_axis) != width) or (len(y_axis) != height): format_error = True #on_format_error() return True format_error = False #camera_stream.format_error_counter = 0 frame_shape = str(width) + "x" + str( height) + "x" + str(image.itemsize) total_bytes[ index] = data.statistics.total_bytes_received with stats_lock: set_statistics(statistics, sender, sum(total_bytes), 1 if data else 0, frame_shape) # In case of receiving error or timeout, the returned data is None. if data is None: return True pulse_id = data.data.pulse_id if not threaded: if connections > 1: if last_pid: if pulse_id != (last_pid + pid_offset): _logger.warning( "Wrong pulse offset: realigning streams last=%d current=%d [%s]" % (last_pid, pulse_id, camera.get_name())) align_streams() last_pid = None return False last_pid = pulse_id timestamp_s = data.data.global_timestamp timestamp_ns = data.data.global_timestamp_offset timestamp = timestamp_s + (timestamp_ns / 1e9) data = { "image": image, "height": height, "width": width, "x_axis": x_axis, "y_axis": y_axis, "timestamp": timestamp } if threaded: with message_buffer_lock: message_buffer[pulse_id] = (data, timestamp) else: sender.send(data=data, pulse_id=pulse_id, timestamp=timestamp, check_data=data_format_changed) data_format_changed = False on_message_sent(statistics) except Exception as e: _logger.error("Could not process message: %s [%s]" % (str(e), camera.get_name())) exit_code = 3 stop_event.set() return True def receive_task(index, message_buffer, stop_event, message_buffer_lock, camera_stream): _logger.info("Start receive thread %d [%s]" % (index, camera.get_name())) #camera_stream = camera.get_stream() camera_stream.connect() try: while not stop_event.is_set(): process_stream(camera_stream, index) _logger.info("stop_event set to receive thread %d [%s]" % (index, camera.get_name())) except Exception as e: _logger.error("Error on receive thread %d: %s [%s]" % (index, str(e), camera.get_name())) exit_code = 4 finally: stop_event.set() if camera_stream: try: camera_stream.disconnect() except: pass _logger.info("Exit receive thread %d [%s]" % (index, camera.get_name())) if threaded: for i in range(connections): camera_stream = camera.get_stream( data_change_callback=data_change_callback) #camera_stream.format_error_counter = 0 receive_thread = Thread(target=receive_task, args=(i, message_buffer, stop_event, message_buffer_lock, camera_stream)) receive_thread.start() receive_threads.append(receive_thread) start_error = 0 while not stop_event.is_set(): while not parameter_queue.empty(): new_parameters = parameter_queue.get() camera.camera_config.set_configuration(new_parameters) process_parameters() if data_changed: time.sleep( 0.1 ) #Sleeping in case channels are monitored and were not updated camera.updtate_size_raw() process_parameters() _logger.warning("Image shape changed: %dx%d [%s]." % (x_size, y_size, camera.get_name())) if threaded: time.sleep( 0.25 ) #If threaded give some time to other threads report the change data_changed = False if format_error: now = time.time() if start_error <= 0: _logger.warning("Invalid image format [%s]" % (camera.get_name())) start_error = now else: if (now - start_error) > config.BSREAD_FORMAT_ERROR_TIMEOUT: _logger.error( "Invalid image format timeout: stopping instance[%s]" % (camera.get_name())) stop_event.set() exit_code = 5 break else: if start_error > 0: _logger.info("Image format ok [%s]" % (camera.get_name())) start_error = 0 if threaded: time.sleep(0.01) else: for i in range(len(camera_streams)): if not process_stream(camera_streams[i], i): break _logger.info("Stopping transceiver [%s]" % (camera.get_name(), )) except Exception as e: _logger.exception("Error while processing camera stream: %s [%s]" % (str(e), camera.get_name())) exit_code = 1 finally: # Wait for termination / update configuration / etc. stop_event.wait() if camera: try: camera.disconnect() except: pass if not threaded: for stream in camera_streams: try: stream.disconnect() except: pass if sender: try: sender.close() except: pass else: for t in receive_threads + [message_buffer_send_thread]: if t: try: t.join(0.1) except: pass sys.exit(exit_code)