def wait_for_gui_process(self, retry_count=20, retry_duration_s=1): ''' .. versionchanged:: 2.7.2 Do not execute `refresh_gui()` while waiting for response from `hub_execute()`. ''' start = datetime.now() for i in xrange(retry_count): try: hub_execute(self.name, 'ping', timeout_s=5, silent=True) except Exception: logger.debug('[wait_for_gui_process] failed (%d of %d)', i + 1, retry_count, exc_info=True) else: logger.info('[wait_for_gui_process] success (%d of %d)', i + 1, retry_count) self.alive_timestamp = datetime.now() return for j in xrange(10): time.sleep(retry_duration_s / 10.) refresh_gui() raise IOError('Timed out after %ss waiting for GUI process to connect ' 'to hub.' % si_format( (datetime.now() - start).total_seconds()))
def on_step_run(self): ''' Handler called whenever a step is executed. Plugins that handle this signal must emit the on_step_complete signal once they have completed the step. The protocol controller will wait until all plugins have completed the current step before proceeding. .. versionchanged:: 2.2.2 Emit ``on_step_complete`` signal within thread-safe function, since signal callbacks may use GTK. ''' app = get_app() if (app.realtime_mode or app.running) and self.gui_process is not None: step_options = self.get_step_options() if not step_options['video_enabled']: command = 'disable_video' else: command = 'enable_video' hub_execute(self.name, command) # Call as thread-safe function, since signal callbacks may use GTK. gtk_threadsafe(emit_signal)('on_step_complete', [self.name, None])
def get_ui_json_settings(self): ''' Get current video settings from DMF device UI plugin. Returns ------- (dict) : DMF device UI plugin settings in JSON-compatible format (i.e., only basic Python data types). ''' video_settings = {} # Try to request video configuration. try: video_config = hub_execute(self.name, 'get_video_config', wait_func=lambda *args: refresh_gui(), timeout_s=2) except IOError: logger.warning('Timed out waiting for device window size and ' 'position request.') else: if video_config is not None: video_settings['video_config'] = video_config.to_json() else: video_settings['video_config'] = '' # Try to request allocation to save in app options. try: data = hub_execute(self.name, 'get_corners', wait_func=lambda *args: refresh_gui(), timeout_s=2) except IOError: logger.warning('Timed out waiting for device window size and ' 'position request.') else: if data: # Get window allocation settings (i.e., width, height, x, y). # Replace `df_..._corners` with CSV string named `..._corners` # (no `df_` prefix). for k in ('df_canvas_corners', 'df_frame_corners'): if k in data: data['allocation'][k[3:]] = data.pop(k).to_csv() video_settings.update(data['allocation']) # Try to request surface alphas. try: surface_alphas = hub_execute(self.name, 'get_surface_alphas', wait_func=lambda *args: refresh_gui(), timeout_s=2) except IOError: logger.warning('Timed out waiting for surface alphas.') else: if surface_alphas is not None: video_settings['surface_alphas'] = surface_alphas.to_json() else: video_settings['surface_alphas'] = '' return video_settings
def _wait_for_gui(): self.wait_for_gui_process() # Get current video settings from UI. app_values = self.get_app_values() # Convert JSON settings to 0MQ plugin API Python types. ui_settings = self.json_settings_as_python(app_values) self.set_ui_settings(ui_settings, default_corners=True) self.gui_heartbeat_id = gobject.timeout_add(1000, keep_alive) # Refresh list of electrode and route commands. hub_execute('microdrop.command_plugin', 'get_commands')
def wait_for_gui_process(self, retry_count=20, retry_duration_s=1): start = datetime.now() for i in xrange(retry_count): try: hub_execute(self.name, 'ping', wait_func=lambda *args: refresh_gui(), timeout_s=5, silent=True) except: logger.debug('[wait_for_gui_process] failed (%d of %d)', i + 1, retry_count, exc_info=True) else: logger.info('[wait_for_gui_process] success (%d of %d)', i + 1, retry_count) self.alive_timestamp = datetime.now() return for j in xrange(10): time.sleep(retry_duration_s / 10.) refresh_gui() raise IOError('Timed out after %ss waiting for GUI process to connect ' 'to hub.' % si_format((datetime.now() - start).total_seconds()))
def on_step_run(self): ''' Handler called whenever a step is executed. Plugins that handle this signal must emit the on_step_complete signal once they have completed the step. The protocol controller will wait until all plugins have completed the current step before proceeding. ''' app = get_app() if (app.realtime_mode or app.running) and self.gui_process is not None: step_options = self.get_step_options() if not step_options['video_enabled']: hub_execute(self.name, 'disable_video', wait_func=lambda *args: refresh_gui(), timeout_s=5, silent=True) else: hub_execute(self.name, 'enable_video', wait_func=lambda *args: refresh_gui(), timeout_s=5, silent=True) emit_signal('on_step_complete', [self.name, None])
def set_ui_settings(self, ui_settings, default_corners=False): ''' Set DMF device UI settings from settings dictionary. Args ---- ui_settings (dict) : DMF device UI plugin settings in format returned by `json_settings_as_python` method. .. versionchanged:: 2.7.2 Do not execute `refresh_gui()` while waiting for response from `hub_execute()`. ''' if self.alive_timestamp is None or self.gui_process is None: # Repeat until GUI process has started. raise IOError('GUI process not ready.') if 'video_config' in ui_settings: hub_execute(self.name, 'set_video_config', video_config=ui_settings['video_config'], timeout_s=5) if 'surface_alphas' in ui_settings: hub_execute(self.name, 'set_surface_alphas', surface_alphas=ui_settings['surface_alphas'], timeout_s=5) if all((k in ui_settings) for k in ('df_canvas_corners', 'df_frame_corners')): if default_corners: hub_execute(self.name, 'set_default_corners', canvas=ui_settings['df_canvas_corners'], frame=ui_settings['df_frame_corners'], timeout_s=5) else: hub_execute(self.name, 'set_corners', df_canvas_corners=ui_settings['df_canvas_corners'], df_frame_corners=ui_settings['df_frame_corners'], timeout_s=5)
def set_ui_settings(self, ui_settings, default_corners=False): ''' Set DMF device UI settings from settings dictionary. Args ---- ui_settings (dict) : DMF device UI plugin settings in format returned by `json_settings_as_python` method. ''' if self.alive_timestamp is None or self.gui_process is None: # Repeat until GUI process has started. raise IOError('GUI process not ready.') if 'video_config' in ui_settings: hub_execute(self.name, 'set_video_config', video_config=ui_settings['video_config'], wait_func=lambda *args: refresh_gui(), timeout_s=5) if 'surface_alphas' in ui_settings: hub_execute(self.name, 'set_surface_alphas', surface_alphas=ui_settings['surface_alphas'], wait_func=lambda *args: refresh_gui(), timeout_s=5) if all((k in ui_settings) for k in ('df_canvas_corners', 'df_frame_corners')): if default_corners: hub_execute(self.name, 'set_default_corners', canvas=ui_settings['df_canvas_corners'], frame=ui_settings['df_frame_corners'], wait_func=lambda *args: refresh_gui(), timeout_s=5) else: hub_execute(self.name, 'set_corners', df_canvas_corners=ui_settings['df_canvas_corners'], df_frame_corners=ui_settings['df_frame_corners'], wait_func=lambda *args: refresh_gui(), timeout_s=5)
def get_ui_json_settings(self): ''' Get current video settings from DMF device UI plugin. Returns ------- (dict) : DMF device UI plugin settings in JSON-compatible format (i.e., only basic Python data types). .. versionchanged:: 2.7.2 Do not execute `refresh_gui()` while waiting for response from `hub_execute()`. ''' video_settings = {} # Try to request video configuration. try: video_config = hub_execute(self.name, 'get_video_config', timeout_s=2) except IOError: logger.warning('Timed out waiting for device window size and ' 'position request.') else: if video_config is not None: video_settings['video_config'] = video_config.to_json() else: video_settings['video_config'] = '' # Try to request allocation to save in app options. try: data = hub_execute(self.name, 'get_corners', timeout_s=2) except IOError: logger.warning('Timed out waiting for device window size and ' 'position request.') else: if data: # Get window allocation settings (i.e., width, height, x, y). # Replace `df_..._corners` with CSV string named `..._corners` # (no `df_` prefix). for k in ('df_canvas_corners', 'df_frame_corners'): if k in data: data['allocation'][k[3:]] = data.pop(k).to_csv() video_settings.update(data['allocation']) # Try to request surface alphas. try: surface_alphas = hub_execute(self.name, 'get_surface_alphas', timeout_s=2) except IOError: logger.warning('Timed out waiting for surface alphas.') else: if surface_alphas is not None: video_settings['surface_alphas'] = surface_alphas.to_json() else: video_settings['surface_alphas'] = '' return video_settings
def check_dstat_status(self): ''' 1. Check to see if acquisition is finished. 2. If (1), emit `on_step_complete` signal. ''' try: completed_timestamp = hub_execute('dstat-interface', 'acquisition_complete', experiment_id= self.dstat_experiment_id, timeout_s=5.) if completed_timestamp is not None: # ## Acquisition is complete ## app = get_app() # Increment the number of completed DStat experiments for # current step. step_i = app.protocol.current_step_number count_i = 1 + self.dstat_experiment_count_by_step.get(step_i, 0) self.dstat_experiment_count_by_step[step_i] = count_i # ### Save results data and plot ### output_directory = (path(app.experiment_log.get_log_path()) .abspath()) output_namebase = str(app.protocol.current_step_number) step_label = self.get_step_label() if step_label is not None: # Replace characters that are not allowed in a filename # with underscore. output_namebase = re.sub(r'[:/\\\?{}]', '_', step_label) # Save results to a text file in the experiment log directory. output_txt_path = get_unique_path(output_directory .joinpath(output_namebase + '.txt')) logger.info('Save results to: %s', output_txt_path) dstat_params = hub_execute('dstat-interface', 'get_params') hub_execute('dstat-interface', 'save_text', save_data_path=output_txt_path) data_i = hub_execute('dstat-interface', 'get_experiment_data', experiment_id=self.dstat_experiment_id) metadata_i = self.get_step_metadata() # Compute (approximate) `utc_timestamp` for each DStat # measurement. max_time_s = data_i.time_s.max() metadata_i['utc_timestamp'] = (completed_timestamp - data_i.time_s .map(lambda t: timedelta(seconds= max_time_s - t))) # Step label from step label plugin. metadata_i['step_label'] = step_label # Compute UTC start time from local experiment start time. metadata_i['experiment_start'] = \ (dt.datetime.fromtimestamp(app.experiment_log.start_time()) + (dt.datetime.utcnow() - dt.datetime.now())) # Compute UTC start time from local experiment start time. metadata_i['experiment_length_min'] = \ (completed_timestamp - metadata_i['experiment_start']).total_seconds() / 60. # Record synchronous detection parameters from DStat (if set). if dstat_params['sync_true']: metadata_i['target_hz'] = float(dstat_params['sync_freq']) else: metadata_i['target_hz'] = None metadata_i['sample_frequency_hz'] = float(dstat_params['adc_rate_hz']) # Cast metadata `unicode` fields as `str` to enable HDF export. for k, v in metadata_i.iteritems(): if isinstance(v, types.StringTypes): metadata_i[k] = str(v) data_md_i = data_i.copy() for i, (k, v) in enumerate(metadata_i.iteritems()): try: data_md_i.insert(i, k, v) except Exception, e: logger.info('Skipping metadata field %s: %s.\n%s', k, v, e) # Set order for known columns. Unknown columns are ordered # last, alphabetically. column_order = ['instrument_id', 'experiment_id', 'experiment_uuid', 'experiment_start', 'experiment_length_min', 'utc_timestamp', 'device_id', 'batch_id', 'sample_id', 'step_label', 'step_number', 'attempt_number', 'temperature_celsius', 'relative_humidity', 'target_hz', 'sample_frequency_hz', 'time_s', 'current_amps'] column_index = dict([(k, i) for i, k in enumerate(column_order)]) ordered_columns = sorted(data_md_i.columns, key=lambda k: (column_index .get(k, len(column_order)), k)) data_md_i = data_md_i[ordered_columns] namebase_i = ('e[{}]-d[{}]-s[{}]' .format(metadata_i['experiment_uuid'][:8], metadata_i.get('device_id'), metadata_i.get('sample_id'))) if self.dstat_experiment_data is None: self.dstat_experiment_data = data_md_i else: combined = pd.concat([self.dstat_experiment_data, data_md_i]) self.dstat_experiment_data = combined.reset_index(drop= True) # Append DStat experiment data to CSV file. csv_output_path = self.data_dir().joinpath(namebase_i + '.csv') # Only include header if the file does not exist or is empty. include_header = not (csv_output_path.isfile() and (csv_output_path.size > 0)) with csv_output_path.open('a') as output: data_md_i.to_csv(output, index=False, header=include_header) df_dstat_summary = self.dstat_summary_frame(numeric=True) # Write DStat summary table to CSV file. csv_summary_path = self.data_dir().joinpath('dstat-summary' '.csv') with csv_summary_path.open('w') as output: df_dstat_summary.to_csv(output) # Turn light back on after photomultiplier tube (PMT) # measurement. self.dropbot_dx_remote.light_enabled = True # notify step complete. emit_signal('on_step_complete', [self.name, None]) self.dstat_timeout_id = None return False else:
def on_step_run(self): ''' Handler called whenever a step is executed. Note that this signal is only emitted in realtime mode or if a protocol is running. Plugins that handle this signal must emit the `on_step_complete` signal once they have completed the step. The protocol controller will wait until all plugins have completed the current step before proceeding. The `on_step_complete` signal is emitted with following signature: emit_signal('on_step_complete', [plugin_name, return_value]) where `plugin_name` is the name of the emitting plugin, and `return_value` can be one of: - `None`: Step completed successfully. - `'Repeat'`: Repeat the step. - `'Fail'`: Unrecoverable error (stop the protocol). ''' app = get_app() logger.info('[DropBotDxAccessoriesPlugin] on_step_run(): step #%d', app.protocol.current_step_number) options = self.get_step_options() app_values = self.get_app_values() if self.connected(): self.dropbot_dx_remote.light_enabled = not options['dstat_enabled'] self.dropbot_dx_remote.magnet_engaged=options['magnet_engaged'] try: if self.has_environment_data: env = self.get_environment_state().to_dict() logger.info('temp=%.1fC, Rel. humidity=%.1f%%' % (env['temperature_celsius'], 100 * env['relative_humidity'])) app.experiment_log.add_data({"environment state": env}, self.name) except ValueError: self.has_environment_data = False if options['dstat_enabled']: # D-stat is enabled for step. Request acquisition. try: if 'dstat_params_file' in options: # Load Dstat parameters. hub_execute('dstat-interface', 'load_params', params_path=options['dstat_params_file']) if self.dstat_timeout_id is not None: # Timer was already set, so cancel previous timer. gobject.source_remove(self.dstat_timeout_id) # Delay before D-stat measurement (e.g., to allow webcam # light to turn off). dstat_delay_s = app_values.get('dstat_delay_s', 0) time.sleep(max(0, dstat_delay_s)) step_label = self.get_step_label() # Send Microdrop step label (if available) to provide name # for DStat experiment. metadata = self.metadata.copy() metadata['name'] = (step_label if step_label else str(app.protocol.current_step_number + 1)) metadata['patient_id'] = metadata.get('sample_id', 'None') # Get target path for DStat database directory. dstat_database_path = (path(app.config['data_dir']) .realpath().joinpath('dstat-db')) self.dstat_experiment_id = \ hub_execute('dstat-interface', 'run_active_experiment', metadata=metadata, params={'db_path_entry': str(dstat_database_path), 'db_enable_checkbutton': True}) self._dstat_spinner = itertools.cycle(r'-\|/') print '' # Check every 250ms to see if dstat acquisition has # completed. self.dstat_timeout_id = \ gobject.timeout_add(250, self.check_dstat_status) except: print "Exception in user code:" print '-'*60 traceback.print_exc(file=sys.stdout) print '-'*60 # An error occurred while initializing Analyst remote # control. emit_signal('on_step_complete', [self.name, 'Fail']) else: # D-State is not enabled, so step is complete. emit_signal('on_step_complete', [self.name, None]) else: # DropBox-DX device is not connected, but allow protocol to # continue. # # N.B., A warning message is display once at the *start* of the # protocol if no DropBot-DX connection has been established, but # *not* on each step. emit_signal('on_step_complete', [self.name, None])