def _load_plugin(self, type_name, entrypoint: entrypoints.EntryPoint): # if the entrypoint was already loaded into cache and queued, do nothing if self._load_cache[type_name].get(entrypoint.name, None): return try: # Load the entrypoint (unless already cached), cache it, and put it on the instantiate queue msg.logMessage( f'Loading entrypoint {entrypoint.name} from module: {entrypoint.module_name}' ) with load_timer() as elapsed: plugin_class = self._load_cache[type_name][entrypoint.name] = \ self._load_cache[type_name].get(entrypoint.name, None) or entrypoint.load() except (Exception, SystemError) as ex: msg.logMessage( f"Unable to load {entrypoint.name} plugin from module: {entrypoint.module_name}", msg.ERROR) msg.logError(ex) msg.notifyMessage( repr(ex), title= f'An error occurred while starting the "{entrypoint.name}" plugin.', level=msg.CRITICAL) else: msg.logMessage( f"{int(elapsed() * 1000)} ms elapsed while loading {entrypoint.name}", level=msg.INFO) self._instantiate_queue.put((type_name, entrypoint, plugin_class))
def instanciatePlugin(self, plugin_info, element): """ The default behavior is that each plugin is instanciated at load time; the class is thrown away. Add the isSingleton = False attribute to your plugin class to prevent this behavior! """ msg.logMessage(f"Instanciating {plugin_info.name} plugin object.", level=msg.INFO) with load_timer() as elapsed: try: if getattr(element, "isSingleton", True): plugin_info.plugin_object = element() else: plugin_info.plugin_object = element except (Exception, SystemError) as ex: exc_info = sys.exc_info() msg.logMessage( "Unable to instanciate plugin: %s" % plugin_info.path, msg.ERROR) msg.logError(ex) msg.notifyMessage( repr(ex), title= f'An error occurred while starting the "{plugin_info.name}" plugin.', level=msg.CRITICAL) plugin_info.error = exc_info msg.logMessage( f"{int(elapsed()*1000)} ms elapsed while instanciating {plugin_info.name}", level=msg.INFO) self.notify()
def __init__(self, **kwargs): super(QRunEngine, self).__init__() self.RE = RunEngine(context_managers=[], during_task=DuringTask(), **kwargs) self.RE.subscribe(self.sigDocumentYield.emit) # TODO: pull from settings plugin from suitcase.mongo_normalized import Serializer #TODO create single databroker db #python-dotenv stores name-value pairs in .env (add to .gitginore) username = os.getenv("USER_MONGO") pw = os.getenv("PASSWD_MONGO") try: self.RE.subscribe( Serializer( f"mongodb://{username}:{pw}@localhost:27017/mds?authsource=mds", f"mongodb://{username}:{pw}@localhost:27017/fs?authsource=fs" )) except OperationFailure as err: msg.notifyMessage("Could not connect to local mongo database.", title="xicam.Acquire Error", level=msg.ERROR) msg.logError(err) self.queue = PriorityQueue() self.process_queue()
def _load_plugin(self, load_task: PluginTask): entrypoint = load_task.entry_point try: # Load the entrypoint (unless already cached), cache it, and put it on the instantiate queue msg.logMessage( f"Loading entrypoint {entrypoint.name} from module: {entrypoint.module_name}" ) with load_timer() as elapsed: load_task.plugin_class = entrypoint.load() except (Exception, SystemError) as ex: msg.logMessage( f"Unable to load {entrypoint.name} plugin from module: {entrypoint.module_name}", level=msg.ERROR) msg.logError(ex) msg.notifyMessage( repr(ex), title= f'An error occurred while starting the "{entrypoint.name}" plugin.', level=msg.CRITICAL) load_task.status = Status.FailedLoad else: msg.logMessage( f"{int(elapsed() * 1000)} ms elapsed while loading {entrypoint.name}", level=msg.INFO) self._instantiate_queue.put(load_task) load_task.status = Status.InstantiateQueue
def instanciatePlugin(self, plugin_info, element, category_name): """ The default behavior is that each plugin is instanciated at load time; the class is thrown away. Add the isSingleton = False attribute to your plugin class to prevent this behavior! """ msg.logMessage(f"Instanciating {plugin_info.name} plugin object.", level=msg.INFO) with load_timer() as elapsed: try: if list(filter(lambda plugin: plugin.name == plugin_info.name, self.category_mapping[category_name])): msg.logMessage(f'A plugin named "{plugin_info.name}" has already been loaded.') return if getattr(element, "isSingleton", True): plugin_info.plugin_object = element() else: plugin_info.plugin_object = element except (Exception, SystemError) as ex: exc_info = sys.exc_info() msg.logMessage("Unable to instanciate plugin: %s" % plugin_info.path, msg.ERROR) msg.logError(ex) msg.notifyMessage( repr(ex), title=f'An error occurred while starting the "{plugin_info.name}" plugin.', level=msg.CRITICAL ) plugin_info.error = exc_info else: plugin_info.categories.append(category_name) self.category_mapping[category_name].append(plugin_info) msg.logMessage(f"{int(elapsed() * 1000)} ms elapsed while instanciating {plugin_info.name}", level=msg.INFO) self.notify()
def ingest(cls, paths): updated_doc = dict() # TODO -- update for multiple paths (pending dbheader interface) if len(paths) > 1: paths = [paths[0]] message = 'Opening multiple already-processed data sources is not yet supported. ' message += f'Opening the first image, {paths[0]}...' notifyMessage(message, level=WARNING) print(f'PATHS: {paths}') for name, doc in cls._createDocument(paths): if name == 'start': updated_doc[name] = doc # TODO -- should 'sample_name' and 'paths' be something different? doc['sample_name'] = cls.title(paths) doc['paths'] = paths if name == 'descriptor': if updated_doc.get('descriptors'): updated_doc['descriptors'].append(doc) else: updated_doc['descriptors'] = [doc] if name == 'event': if updated_doc.get('events'): updated_doc['events'].append(doc) else: updated_doc['events'] = [doc] if name == 'stop': updated_doc[name] = doc return updated_doc
def diffusion_coefficient( relaxation_rates: np.ndarray, labels: np.ndarray, g2: np.ndarray, tau: np.ndarray, fit_curve: np.ndarray, geometry: AzimuthalIntegrator = None, transmission_mode: str = 'transmission', incidence_angle: float = None, ): # TODO: what should we do when we only get one relaxation rate (ie one roi / non-segmented roi) if geometry is None: msg.notifyMessage('Calibrate required for diffusion coefficients.') return np.array([0 ]), np.array([0 ]), relaxation_rates, g2, tau, fit_curve else: qs = np.asarray( average_q_from_labels(labels, geometry, transmission_mode, incidence_angle)) x = qs**2 # diffusion_values = relaxation_rates / x model = models.Linear1D() fitting_algorithm = fitting.LinearLSQFitter() fit = fitting_algorithm(model, x, relaxation_rates) return fit(x), x, relaxation_rates, g2, tau, fit_curve
def _create_catalog_item(self, ensemble_item, catalog, projectors: List[Callable[[BlueskyRun], List[Intent]]]): catalog_item = TreeItem(ensemble_item) catalog_name = display_name(catalog) catalog_item.setData(catalog_name, Qt.DisplayRole) catalog_item.setData(catalog, self.object_role) catalog_item.setData(WorkspaceDataType.Catalog, self.data_type_role) # We want to notify the user if all projections failed _any_projection_succeeded = False for projector in projectors: try: intents = projector(catalog) for intent in intents: self._create_intent_item(catalog_item, intent) except (AttributeError, ProjectionNotFound) as e: logMessage(e, level=WARNING) else: _any_projection_succeeded = True ensemble_item.appendChild(catalog_item) break if not _any_projection_succeeded: notifyMessage( "Data file was opened, but could not be interpreted in this GUI plugin." )
def checkDataShape(self, data): """Checks the shape of the data and gets the first frame if able to.""" if data.shape[0] > 1: msg.notifyMessage("Looks like you did not open a single data frame. " "Automated calibration only works with single frame data.") return None else: return data[0]
def accept(self): intersection = set(self.get_metadata().keys()).intersection(self.reserved) if intersection: msg.notifyMessage(f'The field name "{list(intersection)[0]}" is reserved and cannot be used.') else: super(MetadataDialog, self).accept() QSettings().setValue(self._qsettings_key, self.parameter.saveState())
def add_roi(self, roi): view = self._get_view() if view: view.getView().addItem(roi) self.workflow.insert_operation(self.index, roi.process) # Remove the roi process from the workflow when the roi is removed # TODO -- should this be in BetterROI? roi.sigRemoveRequested.connect(lambda roi: self.workflow.remove_operation(roi.process)) else: msg.notifyMessage("Please open an image before creating an ROI.", level=msg.WARNING)
def decorator(cls): if not isinstance(type_name, str): message = f"A plugin type must be specified for plugin named: {plugin_name or cls.__name__}." msg.notifyMessage(message) else: plugin = manager.collect_plugin(plugin_name or cls.__name__, cls, type_name, replace=replace) # If the manager didn't drop the plugin (live_plugins can't always be loaded, e.g. if not qt safe yet) # then indicate that the plugin is a live plugin if plugin: plugin._live = True return cls
def hide_column(self, header, logicalIndex): ''' Hide a column, adding the column from the list of hidden columns in QSettings ''' hidden_columns = QSettings().value("catalog.columns.hidden") or set() if len(hidden_columns) == header.count() - 1: msg.notifyMessage( "Only one column is left to hide, cannot hide all of them.") return hidden_columns.add(self._current_column_name(header, logicalIndex)) QSettings().setValue("catalog.columns.hidden", hidden_columns) header.setSectionHidden(logicalIndex, True)
def instanciateLatePlugins(self): if qt_is_safe: for plugin_info in self.getPluginsOfCategory('GUIPlugin'): if callable(plugin_info.plugin_object): try: plugin_info.plugin_object = plugin_info.plugin_object( ) # Force late singleton-ing of GUIPlugins except Exception as ex: msg.notifyMessage( f'The "{plugin_info.name}" plugin could not be loaded. {repr(ex)}', level=msg.CRITICAL) msg.logError(ex)
def run_workflow(self): # workflow has an exec() and an exec_all() to run itself # extract data from loaded catalog if not self.split_widget.catalog_view.catalog: notifyMessage("A catalog is not yet loaded, please load one first") return image_data = self.split_widget.catalog_view.catalog.primary.to_dask( )['img'].compute() # primary.image.read() # execute wkll take in named inputs in your OperationPlugin, # and a callback_slot will be called when it finished execution self.my_workflow.execute(input_image=image_data, callback_slot=self.show_fft)
def _instantiate_plugin(self): if not self._instantiate_queue.empty(): type_name, entrypoint, plugin_class = self._instantiate_queue.get() # if this plugin was already instantiated earlier, skip it; mark done if self.type_mapping[type_name].get(entrypoint.name, None) is None: # inject the entrypoint name into the class plugin_class._name = entrypoint.name success = False # ... and instantiate it (as long as its supposed to be singleton) try: if getattr(plugin_class, "is_singleton", False): msg.logMessage(f"Instantiating {entrypoint.name} plugin object.", level=msg.INFO) with load_timer() as elapsed: self.type_mapping[type_name][entrypoint.name] = plugin_class() msg.logMessage( f"{int(elapsed() * 1000)} ms elapsed while instantiating {entrypoint.name}", level=msg.INFO ) else: self.type_mapping[type_name][entrypoint.name] = plugin_class success = True except (Exception, SystemError) as ex: msg.logMessage( f"Unable to instantiate {entrypoint.name} plugin from module: {entrypoint.module_name}", msg.ERROR ) msg.logError(ex) msg.notifyMessage(repr(ex), title=f'An error occurred while starting the "{entrypoint.name}" plugin.') if success: msg.logMessage(f"Successfully collected {entrypoint.name} plugin.", level=msg.INFO) msg.showProgress(self._progress_count(), maxval=self._entrypoint_count()) self._notify(Filters.UPDATE) # mark it as completed self._instantiate_queue.task_done() # If this was the last plugin if self._load_queue.empty() and self._instantiate_queue.empty() and self.state in [State.INSTANTIATING, State.READY]: self.state = State.READY msg.logMessage("Plugin collection completed!") msg.hideProgress() self._notify(Filters.COMPLETE) if not self.state == State.READY: # if we haven't reached the last task, but there's nothing queued threads.invoke_as_event(self._instantiate_plugin) # return to the event loop, but come back soon
def two_time_correlation( images: np.ndarray, image_item: pg.ImageItem = None, rois: Iterable[pg.ROI] = None, autoset_num_bufs: bool = True, num_bufs: int = 2, num_levels: int = 1, geometry: AzimuthalIntegrator = None) -> Tuple[np.ndarray, np.ndarray]: # TODO -- make composite parameter item widget to allow default (all frames) or enter value num_frames = len(images) # Auto-select-buffers will override number of levels to be 1, # which requires that the number of buffers == number of frames # (unless odd; then use number of frames - 1) if autoset_num_bufs: num_levels = 1 num_frames = num_frames if num_frames % 2 == 0 else num_frames - 1 num_bufs = num_frames labels = get_label_array(images, rois=rois, image_item=image_item) if labels.max() == 0: msg.notifyMessage( "Please add an ROI over which to calculate one-time correlation.") raise ValueError( "Please add an ROI over which to calculate one-time correlation.") corr = two_time_corr(labels.astype(np.int_), np.asarray(images), num_frames, num_bufs, num_levels) g2 = corr.g2 lag_steps = corr.lag_steps # Calculate avg qs from label array for first dimension on returned g2 (so slice selector shows qs for indexing) qs = None if geometry is not None: qs = np.asarray(average_q_from_labels(labels, geometry)) # qs = np.asarray([f"q={q:.3f}" for q in qs]) # FIXME: why can't we return a python list for the catalog? # File "xi-cam/xicam/core/execution/workflow.py", line 886, in project_intents # kwargs[intent_kwarg_name] = getattr(run_catalog, operation_id).to_dask()[output_name] # ... # File "site-packages/xarray/core/dataarray.py", line 126, in _infer_coords_and_dims # raise ValueError( # ValueError: different number of dimensions on data and dims: 2 vs 1 # Rotate image plane 90 degrees g2 = np.rot90(g2, axes=(-2, -1)) num_labels = g2.shape[0] # first dimension represents labels if not qs: qs = np.array(list(range(1, num_labels + 1))) return g2, lag_steps, qs
def open(self, header): if self.currentGUIPlugin is None: msg.notifyMessage( "Please select a gui plugin from the top before trying to open an image." ) return if isinstance(header, Catalog): self.currentGUIPlugin.appendCatalog(header) elif isinstance(header, CatalogEntry): self.currentGUIPlugin.appendCatalog(header()) elif isinstance(header, NonDBHeader): self.currentGUIPlugin.appendHeader(header) else: raise TypeError(f"Cannot open {header}.")
def run_workflow(self): """ <SLOT> run our workflow Workflow class has an exec() and exec() all to run itself """ # extract data from loaded catalog (assumes button is enabled) if not self.center_widget.catalog(): notifyMessage('No catalog loaded, please open one') return # primary and 'img' should not be hard coded here # # to_dask gives lazy objects # image_data = self.catalog_view.catalog.primary.to_dask['img'].compute image_data = self.center_widget.catalog().primary.read()['img'] # can pass in input data, and function to call when it's done self.my_workflow.execute(input_image=image_data, callback_slot=self.show_fft)
def run_workflow(self, **kwargs): mixed_kwargs = self.kwargs.copy() if self.kwargs_callable is not None: try: called_kwargs = self.kwargs_callable(self) except RuntimeError as e: # NOTE: we do not want to raise an exception here (we are in a connected Qt slot) # Grab the user-oriented message from the kwargs callable exception msg.notifyMessage(str(e), title="Run Workflow Error", level=msg.ERROR) msg.logError(e) else: mixed_kwargs.update(called_kwargs) mixed_kwargs.update(kwargs) if self.execute_iterative: self.workflow.execute_all(**mixed_kwargs) else: self.workflow.execute(**mixed_kwargs)
def __init__(self, **opts): super(TupleGroupParameter, self).__init__(**opts) name = opts.get('name') # Add spacing for non single-character names if len(name) > 1: name += " " values = opts.get('value') defaults = opts.get('default') # Map an integer to its corresponding unicode subscript literal def subscript(n: int): subscripts = "\u2080\u2081\u2082\u2083\u2084\u2085\u2086\u2087\u2088\u2089" return "".join([subscripts[ord(c) - ord('0')] for c in str(n)]) # Create child param dicts (so Parameter.create is used in addChild) # where each element in the tuple is marked with a subscript of its index position # e.g. gains = (1, 4, 8) -> gains 0, gains 1, gains 2 (note that the numbers are subscript when shown) if values is None: if defaults is None: # FIXME: better exception message = f"TupleGroupParameter named \"{name}\" must have default values provided" msg.notifyMessage(message, level=msg.ERROR) raise Exception(message) else: values = defaults for i in range(len(values)): value = values[i] default_value = defaults[i] param_type = type(values[i]).__name__ child_name = f"{name}{subscript(i)}" child = { "name": child_name, "type": param_type, "default": default_value, "value": value } self.addChild(child)
def one_time_correlation( images: np.ndarray, rois: Iterable[pg.ROI] = None, image_item: pg.ImageItem = None, num_bufs: int = 16, num_levels: int = 8, ) -> Tuple[da.array, da.array, da.array, np.ndarray]: if images.ndim < 3: raise ValueError( f"Cannot compute correlation on data with {images.ndim} dimensions." ) labels = get_label_array(images, rois=rois, image_item=image_item) if labels.max() == 0: msg.notifyMessage( "Please add an ROI over which to calculate one-time correlation.") raise ValueError( "Please add an ROI over which to calculate one-time correlation.") # Trim the image based on labels, and resolve to memory si, se = np.where(np.flipud(labels)) trimmed_images = np.asarray(images[:, si.min():si.max() + 1, se.min():se.max() + 1]) trimmed_labels = np.asarray( np.flipud(labels)[si.min():si.max() + 1, se.min():se.max() + 1]) # trimmed_images[trimmed_images <= 0] = np.NaN # may be necessary to mask values trimmed_images -= np.min(trimmed_images, axis=0) g2, tau = corr.multi_tau_auto_corr(num_levels, num_bufs, trimmed_labels.astype(np.uint8), trimmed_images) g2 = g2[1:].squeeze() # FIXME: is it required to trim the 0th value off the tau and g2 arrays? return g2.T, tau[1:], images, labels
def process(self, processor: CorrelationParameterTree, widget, **kwargs): if processor: roiFuture = self.roiworkflow.execute(data=self.correlationView.currentWidget().image[0], image=self.correlationView.currentWidget().imageItem) # Pass in single frame for data shape roiResult = roiFuture.result() label = roiResult[-1]["roi"] if label is None: msg.notifyMessage("Please define an ROI using the toolbar before running correlation.") return workflow = processor.workflow # FIXME -- don't grab first match technique = \ [technique for technique in self.schema()['techniques'] if technique['technique'] == 'scattering'][0] stream, field = technique['data_mapping']['data_image'] # TODO: the compute() takes a long time..., do we need to do this here? If so, show a progress bar... # Trim the data frames catalog = self.currentCatalog() data = [getattr(catalog, stream).to_dask()[field][0].where( DataArray(label, dims=["dim_1", "dim_2"]), drop=True).compute()] # Trim the dark images msg.notifyMessage("Skipping dark correction...") darks = [None] * len(data) dark_stream, dark_field = technique['data_mapping']['dark_image'] if stream in catalog: darks = [getattr(catalog, dark_stream).to_dask()[dark_field][0].where( DataArray(label, dims=["dim_1", "dim_2"]), drop=True).compute()] else: msg.notifyMessage(f"No dark stream named \"{dark_stream}\" for current catalog. No dark correction.") label = label.compress(np.any(label, axis=0), axis=1).compress(np.any(label, axis=1), axis=0) labels = [label] * len(data) # TODO: update for multiple ROIs numLevels = [1] * len(data) numBufs = [] for i in range(len(data)): shape = data[i].shape[0] # multi_tau_corr requires num_bufs to be even if shape % 2: shape += 1 numBufs.append(shape) if kwargs.get('finished_slot'): finishedSlot = kwargs['finished_slot'] else: finishedSlot = self.updateDerivedDataModel # workflow_pickle = pickle.dumps(workflow) workflow.execute_all(None, # data=data, images=data, darks=darks, labels=labels, finished_slot=partial(finishedSlot, workflow=workflow))
def show_message(self, catalog: BlueskyRun): notifyMessage(f'Added catalog {catalog}')
def show_message(self, catalog:BlueskyRun): """ <SLOT> """ notifyMessage('print catalog: {}'.format(catalog))
def show_message(self): """ <SLOT> """ # self designates what widget to show above notifyMessage('Add Another 1.')
def exceptionCallback(self, ex): msg.notifyMessage("Reconstruction failed;\n see log for error") msg.showMessage("Reconstruction failed; see log for error") msg.logError(ex) msg.showReady()
def load_marked_plugin(self, candidate_infofile, candidate_filepath, plugin_info): msg.logMessage( f'Loading {plugin_info.name} plugin in {"main" if threads.is_main_thread() else "background"} thread.', level=msg.INFO, ) # make sure to attribute a unique module name to the one # that is about to be loaded plugin_module_name_template = ( NormalizePluginNameForModuleName("yapsy_loaded_plugin_" + plugin_info.name) + "_%d" # why? ) # make a uniquely numbered module name; again, why? for plugin_name_suffix in range(len(sys.modules)): plugin_module_name = plugin_module_name_template % plugin_name_suffix if plugin_module_name not in sys.modules: break try: # use imp to correctly load the plugin as a module from importlib._bootstrap_external import _POPULATE submodule_search_locations = ( os.path.dirname(plugin_info.path) if plugin_info.path.endswith("__init__.py") else _POPULATE ) spec = importlib.util.spec_from_file_location( plugin_info.name, plugin_info.path, submodule_search_locations=submodule_search_locations ) candidate_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(candidate_module) except Exception as ex: exc_info = sys.exc_info() msg.logMessage("Unable to import plugin: %s" % plugin_info.path, msg.ERROR) msg.logError(ex) msg.notifyMessage(repr(ex), title=f'The "{plugin_info.name}" plugin could not be loaded.', level=msg.CRITICAL) plugin_info.error = exc_info self.processed_plugins.append(plugin_info) return self.processed_plugins.append(plugin_info) if "__init__" in os.path.basename(plugin_info.name): # is this necessary? print("Yes, it is?") sys.path.remove(plugin_info.path) # now try to find and initialise the first subclass of the correct plugin interface #### ADDED BY RP dirlist = dir(candidate_module) if hasattr(candidate_module, "__plugin_exports__"): dirlist = candidate_module.__plugin_exports__ #### with load_timer() as elapsed: # cm for load timing element_name = plugin_info.details["Core"].get("Object", None) # Try explicitly defined element first success = False if element_name: element = getattr(candidate_module, element_name) success = self.load_element(element, candidate_infofile, plugin_info) if not success: for element in (getattr(candidate_module, name) for name in dirlist): # add filtering? success = self.load_element(element, candidate_infofile, plugin_info) if success: break if success: msg.logMessage(f"{int(elapsed() * 1000)} ms elapsed while loading {plugin_info.name}", level=msg.INFO) else: msg.logMessage(f"No plugin found in indicated module: {candidate_filepath}", msg.ERROR)
def report_error(self, value, **_): text = bytes(value).decode() title = "Camera Initialization Error" notifyMessage(text, title=title, level=ERROR)
def _instantiate_plugin(self, instantiate_task_request: PluginTask=None): """ Instantiate a single plugin by request or from the queue. This is typically invoked by an event, and will re-post an event to the event queue to repeat until the task queue is emptied. """ instantiate_task = None if instantiate_task_request or not self._instantiate_queue.empty(): if instantiate_task_request: instantiate_task = instantiate_task_request else: instantiate_task = self._instantiate_queue.get() entrypoint = instantiate_task.entry_point type_name = instantiate_task.type_name plugin_class = instantiate_task.plugin_class # if this plugin was already instantiated earlier, skip it; mark done; also skips if the group isn't active if self.type_mapping.get(type_name, {entrypoint.name: True}).get(entrypoint.name, None) is None: instantiate_task.status = Status.Instantiating # inject the entrypoint name into the class plugin_class._name = entrypoint.name # ... and instantiate it (as long as its supposed to be singleton) plugin_object = plugin_class try: if getattr(plugin_class, "is_singleton", False): msg.logMessage(f"Instantiating {entrypoint.name} plugin object.", level=msg.INFO) with load_timer() as elapsed: self.type_mapping[type_name][entrypoint.name] = plugin_object = plugin_class() msg.logMessage( f"{int(elapsed() * 1000)} ms elapsed while instantiating {entrypoint.name}", level=msg.INFO ) else: self.type_mapping[type_name][entrypoint.name] = plugin_class except (Exception, SystemError) as ex: msg.logMessage( f"Unable to instantiate {entrypoint.name} plugin from module: {entrypoint.module_name}", msg.ERROR ) msg.logError(ex) msg.notifyMessage(repr(ex), title=f'An error occurred while starting the "{entrypoint.name}" plugin.') instantiate_task.status = Status.FailedInstantiate else: # inject useful info into plugin plugin_object._entrypoint_name = entrypoint.name plugin_object._plugin_type = type_name msg.logMessage(f"Successfully collected {entrypoint.name} plugin.", level=msg.INFO) self._notify(Filters.UPDATE) msg.showProgress(self._progress_count(), maxval=self._task_count()) # mark it as completed if instantiate_task_request is None: self._instantiate_queue.task_done() instantiate_task.status = Status.Success # If this was the last plugin if self._load_queue.empty() and self._instantiate_queue.empty(): msg.logMessage("Plugin collection completed!") msg.hideProgress() self._notify(Filters.COMPLETE) self.instantiating = False self._tasks.clear() elif instantiate_task_request is None: # if we haven't reached the last task, but there's nothing queued threads.invoke_as_event(self._instantiate_plugin) # return to the event loop, but come back soon