def _handle_failure( exc, exc_info ): self.applet.busy = False self.applet.appletStateUpdateRequested.emit() msg = "Exception raised during export. See traceback above.\n" log_exception( logger, msg, exc_info=exc_info ) self.applet.progressSignal.emit(100) self._drawer.exportButton.setEnabled(True)
def addFiles(self, roleIndex, startingLane=None): """ The user clicked the "Add File" button. Ask him to choose a file (or several) and add them to both the GUI table and the top-level operator inputs. """ # Find the directory of the most recently opened image file mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' ) mostRecentImageFile = str(mostRecentImageFile) if mostRecentImageFile is not None: defaultDirectory = os.path.split(mostRecentImageFile)[0] else: defaultDirectory = os.path.expanduser('~') # Launch the "Open File" dialog fileNames = self.getImageFileNamesToOpen(self, defaultDirectory) # If the user didn't cancel if len(fileNames) > 0: PreferencesManager().set('DataSelection', 'recent image', fileNames[0]) try: self.addFileNames(fileNames, roleIndex, startingLane) except Exception as ex: log_exception( logger ) QMessageBox.critical(self, "Error loading file", str(ex))
def handleFailedStackLoad(self, files, originalNumLanes, exc, exc_info): msg = "Failed to load stack due to the following error:\n{}".format( exc ) msg += "\nAttempted stack files were:\n" msg += "\n".join(files) log_exception( logger, msg, exc_info ) QMessageBox.critical(self, "Failed to load image stack", msg) self.topLevelOperator.DatasetGroup.resize(originalNumLanes)
def saveProject(self, force_all_save=False): """ Update the project file with the state of the current workflow settings. Must not be called if the project file was opened in read-only mode. """ logger.debug("Save Project triggered") assert self.currentProjectFile != None assert self.currentProjectPath != None assert not self.currentProjectIsReadOnly, "Can't save a read-only project" # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal.emit(0) try: # Applet serializable items are given the whole file (root group) for now for aplt in self._applets: for serializer in aplt.dataSerializers: assert serializer.base_initialized, "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if force_all_save or serializer.isDirty() or serializer.shouldSerialize(self.currentProjectFile): serializer.serializeToHdf5(self.currentProjectFile, self.currentProjectPath) #save the current workflow as standard workflow if "workflowName" in self.currentProjectFile: del self.currentProjectFile["workflowName"] self.currentProjectFile.create_dataset("workflowName",data = self.workflow.workflowName) except Exception, err: log_exception( logger, "Project Save Action failed due to the exception shown above." ) raise ProjectManager.SaveError( str(err) )
def saveProjectSnapshot(self, snapshotPath): """ Copy the project file as it is, then serialize any dirty state into the copy. Original serializers and project file should not be touched. """ with h5py.File(snapshotPath, 'w') as snapshotFile: # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal.emit(0) # Start by copying the current project state into the file # This should be faster than serializing everything from scratch for key in self.currentProjectFile.keys(): snapshotFile.copy(self.currentProjectFile[key], key) try: # Applet serializable items are given the whole file (root group) for now for aplt in self._applets: for serializer in aplt.dataSerializers: assert serializer.base_initialized, "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if serializer.isDirty() or serializer.shouldSerialize(self.currentProjectFile): # Use a COPY of the serializer, so the original serializer doesn't forget it's dirty state serializerCopy = copy.copy(serializer) serializerCopy.serializeToHdf5(snapshotFile, snapshotPath) except Exception, err: log_exception( logger, "Project Save Snapshot Action failed due to the exception printed above." ) raise ProjectManager.SaveError(str(err)) finally:
def _applyNormalizeDisplayToTempOps(self): # Save a copy of our settings oldInfos = {} new_norm = {"True":True,"False":False,"Default":None}[str(self.normalizeDisplayComboBox.currentText())] new_drange = ( self.rangeMinSpinBox.value(), self.rangeMaxSpinBox.value() ) if new_norm is False and (new_drange[0] == self.rangeMinSpinBox.minimum() \ or new_drange[1] == self.rangeMaxSpinBox.minimum()): # no drange given, autonormalization cannot be switched off ! QMessageBox.warning(None, "Warning", "Normalization cannot be switched off without specifying the data range !") self.normalizeDisplayComboBox.setCurrentIndex(1) return for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) info.normalizeDisplay = new_norm op.Dataset.setValue( info ) self._error_fields.discard('Normalize Display') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not apply normalization settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Normalize Display') return False
def _applyInternalPathToTempOps(self, index): if index == -1: return newInternalPath = str( self.internalDatasetNameComboBox.currentText() ) # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) # Attempt to apply to all temp operators try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) pathComponents = PathComponents(info.filePath) if pathComponents.internalPath != newInternalPath: pathComponents.internalPath = newInternalPath info.filePath = pathComponents.totalPath() op.Dataset.setValue( info ) self._error_fields.discard('Internal Dataset Name') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not set new internal path settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Internal Dataset Name') return False
def _applyDisplayModeToTempOps(self, index): if index == -1: return newDisplayMode = str( self.displayModeComboBox.itemData( index ).toString() ) # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) # Attempt to apply to all temp operators try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) if info.display_mode != newDisplayMode: info.display_mode = newDisplayMode op.Dataset.setValue( info ) self._error_fields.discard('Channel Display') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not set new channel display settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Channel Display') return False finally: self._updateDisplayModeCombo()
def cleanUp(self): """ Should be called when the Projectmanager is canceled. Closes the project file. """ try: self._closeCurrentProject() except Exception,e: log_exception( logger ) raise e
def _applyStorageComboToTempOps(self, index): if index == -1: return newStorageLocation, goodcast = self.storageComboBox.itemData( index ).toInt() assert goodcast # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) # Attempt to apply to all temp operators try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) if info.location == DatasetInfo.Location.ProjectInternal: thisLaneStorage = StorageLocation.ProjectFile elif info.location == DatasetInfo.Location.FileSystem: # Determine if the path is relative or absolute if isUrl(info.filePath) or os.path.isabs(info.filePath): thisLaneStorage = StorageLocation.AbsoluteLink else: thisLaneStorage = StorageLocation.RelativeLink if thisLaneStorage != newStorageLocation: if newStorageLocation == StorageLocation.ProjectFile: info.location = DatasetInfo.Location.ProjectInternal else: info.location = DatasetInfo.Location.FileSystem cwd = op.WorkingDirectory.value absPath, relPath = getPathVariants( info.filePath, cwd ) if relPath is not None and newStorageLocation == StorageLocation.RelativeLink: info.filePath = relPath elif newStorageLocation == StorageLocation.AbsoluteLink: info.filePath = absPath else: assert False, "Unknown storage location setting." op.Dataset.setValue( info ) self._error_fields.discard('Storage Location') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not set new storage location settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Storage Location') return False finally: self._updateStorageCombo()
def cleanUp(self): """ Should be called when the Projectmanager is canceled. Closes the project file. """ try: self._closeCurrentProject() except Exception, e: log_exception(logger) raise e
def saveProject(self, force_all_save=False): """ Update the project file with the state of the current workflow settings. Must not be called if the project file was opened in read-only mode. """ logger.debug("Save Project triggered") assert self.currentProjectFile != None assert self.currentProjectPath != None assert not self.currentProjectIsReadOnly, "Can't save a read-only project" # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal(0) try: # Applet serializable items are given the whole file (root group) for now file_changed = False for aplt in self._applets: for serializer in aplt.dataSerializers: assert ( serializer.base_initialized ), "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if force_all_save or serializer.isDirty( ) or serializer.shouldSerialize(self.currentProjectFile): serializer.serializeToHdf5(self.currentProjectFile, self.currentProjectPath) file_changed = True if file_changed: Project(self.currentProjectFile).updateVersion() # save the current workflow as standard workflow if "workflowName" in self.currentProjectFile: del self.currentProjectFile["workflowName"] self.currentProjectFile.create_dataset( "workflowName", data=self.workflow.workflowName.encode("utf-8")) except Exception as err: log_exception( logger, "Project Save Action failed due to the exception shown above.") raise ProjectManager.SaveError(str(err)) finally: # save current time if "time" in self.currentProjectFile: del self.currentProjectFile["time"] self.currentProjectFile.create_dataset( "time", data=time.ctime().encode("utf-8")) # Flush any changes we made to disk, but don't close the file. self.currentProjectFile.flush() for applet in self._applets: applet.progressSignal(100)
def _addNewCrop(self): QApplication.setOverrideCursor(Qt.WaitCursor) """ Add a new crop to the crop list GUI control. Return the new number of crops in the control. """ color = self.getNextCropColor() crop = Crop(self.getNextCropName(), self.get_roi_4d(), color, pmapColor=self.getNextPmapColor()) crop.nameChanged.connect(self._updateCropShortcuts) crop.nameChanged.connect(self.onCropNameChanged) crop.colorChanged.connect(self.onCropColorChanged) crop.pmapColorChanged.connect(self.onPmapColorChanged) newRow = self._cropControlUi.cropListModel.rowCount() self._cropControlUi.cropListModel.insertRow(newRow, crop) if self._allowDeleteLastCropOnly: # make previous crop unremovable if newRow > 0: self._cropControlUi.cropListModel.makeRowPermanent(newRow - 1) newColorIndex = self._cropControlUi.cropListModel.index(newRow, 0) self.onCropListDataChanged( newColorIndex, newColorIndex ) # Make sure crop layer colortable is in sync with the new color # Update operator with new name operator_names = self._croppingSlots.cropNames.value if len(operator_names) < self._cropControlUi.cropListModel.rowCount(): operator_names.append(crop.name) try: self._croppingSlots.cropNames.setValue(operator_names, check_changed=False) except: # I have no idea why this is, but sometimes PyQt "loses" exceptions here. # Print it out before it's too late! log_exception( logger, "Logged the above exception just in case PyQt loses it.") raise # Call the 'changed' callbacks immediately to initialize any listeners self.onCropNameChanged() self.onCropColorChanged() self.onPmapColorChanged() self._maxCropNumUsed += 1 self._updateCropShortcuts() e = self._cropControlUi.cropListModel.rowCount() > 0 QApplication.restoreOverrideCursor()
def _loadProject(self, hdf5File, projectFilePath, readOnly): """ Load the data from the given hdf5File (which should already be open). :param hdf5File: An already-open h5py.File, usually created via ``ProjectManager.createBlankProjectFile`` :param projectFilePath: The path to the file represented in the ``hdf5File`` parameter. :param readOnly: Set to True if the project file should NOT be modified. """ # We are about to create a LOT of tiny objects. # Temporarily disable garbage collection while we do this. gc.disable() assert self.currentProjectFile is None # Minor GUI nicety: Pre-activate the progress signals for all applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: aplt.progressSignal.emit(0) # Save this as the current project self.currentProjectFile = hdf5File self.currentProjectPath = projectFilePath self.currentProjectIsReadOnly = readOnly try: # Applet serializable items are given the whole file (root group) for aplt in self._applets: with Timer() as timer: for serializer in aplt.dataSerializers: assert serializer.base_initialized, "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." serializer.ignoreDirty = True if serializer.caresOfHeadless: serializer.deserializeFromHdf5(self.currentProjectFile, projectFilePath, self._headless) else: serializer.deserializeFromHdf5(self.currentProjectFile, projectFilePath) serializer.ignoreDirty = False logger.debug('Deserializing applet "{}" took {} seconds'.format( aplt.name, timer.seconds() )) self.closed = False # Call the workflow's custom post-load initialization (if any) self.workflow.onProjectLoaded( self ) self.workflow.handleAppletStateUpdateRequested() except: msg = "Project could not be loaded due to the exception shown above.\n" msg += "Aborting Project Open Action" log_exception( logger, msg ) self._closeCurrentProject() raise finally: gc.enable() for aplt in self._applets: aplt.progressSignal.emit(100)
def exportSlots(self, laneViewList ): try: # Set the busy flag so the workflow knows not to allow # upstream changes or shell changes while we're exporting self.parentApplet.busy = True self.parentApplet.appletStateUpdateRequested.emit() # Disable our own gui QApplication.instance().postEvent( self, ThunkEvent( partial(self.setEnabledIfAlive, self.drawer, False) ) ) QApplication.instance().postEvent( self, ThunkEvent( partial(self.setEnabledIfAlive, self, False) ) ) # Start with 1% so the progress bar shows up self.progressSignal.emit(0) self.progressSignal.emit(1) def signalFileProgress(slotIndex, percent): self.progressSignal.emit( (100*slotIndex + percent) / len(laneViewList) ) for i, opLaneView in enumerate(laneViewList): logger.debug("Exporting result {}".format(i)) # If the operator provides a progress signal, use it. slotProgressSignal = opLaneView.progressSignal slotProgressSignal.subscribe( partial(signalFileProgress, i) ) try: opLaneView.run_export() except Exception as ex: if opLaneView.ExportPath.ready(): msg = "Failed to generate export file: \n" msg += opLaneView.ExportPath.value msg += "\n{}".format( ex ) else: msg = "Failed to generate export file." msg += "\n{}".format( ex ) log_exception( logger, msg ) self.showExportError(msg) # We're finished with this file. self.progressSignal.emit( 100*(i+1)/float(len(laneViewList)) ) # Ensure the shell knows we're really done. self.progressSignal.emit(100) except: # Cancel our progress. self.progressSignal.emit(0, True) raise finally: # We're not busy any more. Tell the workflow. self.parentApplet.busy = False self.parentApplet.appletStateUpdateRequested.emit() # Re-enable our own gui QApplication.instance().postEvent( self, ThunkEvent( partial(self.setEnabledIfAlive, self.drawer, True) ) ) QApplication.instance().postEvent( self, ThunkEvent( partial(self.setEnabledIfAlive, self, True) ) )
def _addNewLabel(self): QApplication.setOverrideCursor(Qt.WaitCursor) """ Add a new label to the label list GUI control. Return the new number of labels in the control. """ label = Label( self.getNextLabelName(), self.getNextLabelColor(), pmapColor=self.getNextPmapColor(), ) label.nameChanged.connect(self._updateLabelShortcuts) label.nameChanged.connect(self.onLabelNameChanged) label.colorChanged.connect(self.onLabelColorChanged) label.pmapColorChanged.connect(self.onPmapColorChanged) newRow = self._labelControlUi.labelListModel.rowCount() self._labelControlUi.labelListModel.insertRow( newRow, label ) if self._allowDeleteLastLabelOnly: # make previous label unremovable if newRow > 0: self._labelControlUi.labelListModel.makeRowPermanent(newRow - 1) newColorIndex = self._labelControlUi.labelListModel.index(newRow, 0) self.onLabelListDataChanged(newColorIndex, newColorIndex) # Make sure label layer colortable is in sync with the new color # Update operator with new name operator_names = self._labelingSlots.labelNames.value if len(operator_names) < self._labelControlUi.labelListModel.rowCount(): operator_names.append( label.name ) try: self._labelingSlots.labelNames.setValue( operator_names, check_changed=False ) except: # I have no idea why this is, but sometimes PyQt "loses" exceptions here. # Print it out before it's too late! log_exception( logger, "Logged the above exception just in case PyQt loses it." ) raise # Call the 'changed' callbacks immediately to initialize any listeners self.onLabelNameChanged() self.onLabelColorChanged() self.onPmapColorChanged() # Make the new label selected nlabels = self._labelControlUi.labelListModel.rowCount() selectedRow = nlabels-1 self._labelControlUi.labelListModel.select(selectedRow) self._updateLabelShortcuts() e = self._labelControlUi.labelListModel.rowCount() > 0 self._gui_enableLabeling(e) QApplication.restoreOverrideCursor()
def addFileNames(self, paths: List[Path], startingLaneNum: int, roleIndex: int): # If the user didn't cancel if paths: try: new_infos = self._createDatasetInfos(roleIndex, paths) self.addLanes(new_infos, roleIndex=roleIndex, startingLaneNum=startingLaneNum) except DataSelectionGui.UserCancelledError: pass except Exception as ex: log_exception(logger) QMessageBox.critical(self, "Error loading file", str(ex))
def _track(): self.applet.busy = True self.applet.appletStateUpdateRequested.emit() app = self._drawer.appSpinBox.value() dis = self._drawer.disSpinBox.value() opp = self._drawer.oppSpinBox.value() noiserate = self._drawer.noiseRateSpinBox.value() noiseweight = self._drawer.noiseWeightSpinBox.value() epGap = self._drawer.epGapSpinBox.value() n_neighbors = self._drawer.nNeighborsSpinBox.value() with_div = self._drawer.withDivisionsBox.isChecked() cplex_timeout = None if len(str(self._drawer.timeoutBox.text())): cplex_timeout = int(self._drawer.timeoutBox.text()) from_t = self._drawer.from_time.value() to_t = self._drawer.to_time.value() from_x = self._drawer.from_x.value() to_x = self._drawer.to_x.value() from_y = self._drawer.from_y.value() to_y = self._drawer.to_y.value() from_z = self._drawer.from_z.value() to_z = self._drawer.to_z.value() from_size = self._drawer.from_size.value() to_size = self._drawer.to_size.value() try: self.mainOperator.track(time_range=range(from_t, to_t + 1), x_range=(from_x, to_x + 1), y_range=(from_y, to_y + 1), z_range=(from_z, to_z + 1), size_range=(from_size, to_size + 1), x_scale=self._drawer.x_scale.value(), y_scale=self._drawer.y_scale.value(), z_scale=self._drawer.z_scale.value(), app=app, dis=dis, noiserate=noiserate, noiseweight=noiseweight, opp=opp, ep_gap=epGap, n_neighbors=n_neighbors, with_div=with_div, cplex_timeout=cplex_timeout) except Exception: log_exception( logger, "Error during tracking. See above error traceback.") self._criticalMessage( "Error during tracking. See error log.\n\n" "Exception was:\n\n{})".format(ex)) return
def _update_subvol_widget(self, node_uuid, dataname, typename): """ Update the subvolume widget with the min/max extents of the given node and dataname. Note: The node and dataname do not necessarily have to match the currently selected node and dataname. This enables the right-click behavior, which can be used to limit your data volume to the size of a different data volume. """ error_msg = None try: if typename == "roi": node_service = DVIDNodeService(self._hostname, str(node_uuid)) roi_blocks_zyx = numpy.array( node_service.get_roi(str(dataname))) maxindex = tuple(DVID_BLOCK_WIDTH * (1 + numpy.max(roi_blocks_zyx, axis=0))) minindex = (0, 0, 0) # Rois are always 3D axiskeys = "zyx" # If the current selection is a dataset, then include a channel dimension if self.get_selection().typename != "roi": axiskeys = "zyxc" minindex = minindex + (0, ) maxindex = maxindex + ( 1, ) # FIXME: This assumes that the selected data has only 1 channel... else: # Query the server raw_metadata = VoxelsAccessor.get_metadata( self._hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) maxindex = voxels_metadata.shape minindex = voxels_metadata.minindex axiskeys = voxels_metadata.axiskeys # If the current selection is a roi, then remove the channel dimension if self.get_selection().typename == "roi": axiskeys = "zyx" minindex = minindex[:-1] maxindex = maxindex[:-1] except (DVIDException, ErrMsg) as ex: error_msg = str(ex) log_exception(logger) else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) QMessageBox.critical(self, "DVID Error", error_msg) self._subvol_widget.initWithExtents("", (), (), ()) return self._subvol_widget.initWithExtents(axiskeys, maxindex, minindex, maxindex)
def _addNewCrop(self): QApplication.setOverrideCursor(Qt.WaitCursor) """ Add a new crop to the crop list GUI control. Return the new number of crops in the control. """ color = self.getNextCropColor() crop = Crop( self.getNextCropName(), self.get_roi_4d(), color, pmapColor=self.getNextPmapColor(), ) crop.nameChanged.connect(self._updateCropShortcuts) crop.nameChanged.connect(self.onCropNameChanged) crop.colorChanged.connect(self.onCropColorChanged) crop.pmapColorChanged.connect(self.onPmapColorChanged) newRow = self._cropControlUi.cropListModel.rowCount() self._cropControlUi.cropListModel.insertRow( newRow, crop ) if self._allowDeleteLastCropOnly: # make previous crop unremovable if newRow > 0: self._cropControlUi.cropListModel.makeRowPermanent(newRow - 1) newColorIndex = self._cropControlUi.cropListModel.index(newRow, 0) self.onCropListDataChanged(newColorIndex, newColorIndex) # Make sure crop layer colortable is in sync with the new color # Update operator with new name operator_names = self._croppingSlots.cropNames.value if len(operator_names) < self._cropControlUi.cropListModel.rowCount(): operator_names.append( crop.name ) try: self._croppingSlots.cropNames.setValue( operator_names, check_changed=False ) except: # I have no idea why this is, but sometimes PyQt "loses" exceptions here. # Print it out before it's too late! log_exception( logger, "Logged the above exception just in case PyQt loses it." ) raise # Call the 'changed' callbacks immediately to initialize any listeners self.onCropNameChanged() self.onCropColorChanged() self.onPmapColorChanged() self._maxCropNumUsed += 1 self._updateCropShortcuts() e = self._cropControlUi.cropListModel.rowCount() > 0 QApplication.restoreOverrideCursor()
def _track(): self.applet.busy = True self.applet.appletStateUpdateRequested.emit() app = self._drawer.appSpinBox.value() dis = self._drawer.disSpinBox.value() opp = self._drawer.oppSpinBox.value() noiserate = self._drawer.noiseRateSpinBox.value() noiseweight = self._drawer.noiseWeightSpinBox.value() epGap = self._drawer.epGapSpinBox.value() n_neighbors = self._drawer.nNeighborsSpinBox.value() with_div = self._drawer.withDivisionsBox.isChecked() cplex_timeout = None if len(str(self._drawer.timeoutBox.text())): cplex_timeout = int(self._drawer.timeoutBox.text()) from_t = self._drawer.from_time.value() to_t = self._drawer.to_time.value() from_x = self._drawer.from_x.value() to_x = self._drawer.to_x.value() from_y = self._drawer.from_y.value() to_y = self._drawer.to_y.value() from_z = self._drawer.from_z.value() to_z = self._drawer.to_z.value() from_size = self._drawer.from_size.value() to_size = self._drawer.to_size.value() try: self.mainOperator.track( time_range = range(from_t, to_t + 1), x_range = (from_x, to_x + 1), y_range = (from_y, to_y + 1), z_range = (from_z, to_z + 1), size_range = (from_size, to_size + 1), x_scale = self._drawer.x_scale.value(), y_scale = self._drawer.y_scale.value(), z_scale = self._drawer.z_scale.value(), app=app, dis=dis, noiserate = noiserate, noiseweight = noiseweight, opp=opp, ep_gap=epGap, n_neighbors=n_neighbors, with_div=with_div, cplex_timeout=cplex_timeout) except Exception: log_exception(logger, "Error during tracking. See above error traceback.") self._criticalMessage("Error during tracking. See error log.\n\n" "Exception was:\n\n{})".format( ex )) return
def saveProjectSnapshot(self, snapshotPath): """ Copy the project file as it is, then serialize any dirty state into the copy. Original serializers and project file should not be touched. """ with h5py.File(snapshotPath, "w") as snapshotFile: # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal(0) # Start by copying the current project state into the file # This should be faster than serializing everything from scratch for key in list(self.currentProjectFile.keys()): snapshotFile.copy(self.currentProjectFile[key], key) try: # Applet serializable items are given the whole file (root group) for now for aplt in self._applets: for serializer in aplt.dataSerializers: assert ( serializer.base_initialized ), "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if serializer.isDirty() or serializer.shouldSerialize( self.currentProjectFile): # Use a COPY of the serializer, so the original serializer doesn't forget it's dirty state serializerCopy = copy.copy(serializer) serializerCopy.serializeToHdf5( snapshotFile, snapshotPath) except Exception as err: log_exception( logger, "Project Save Snapshot Action failed due to the exception printed above." ) raise ProjectManager.SaveError(str(err)) finally: # save current time if "time" in snapshotFile: del snapshotFile["time"] snapshotFile.create_dataset("time", data=time.ctime().encode("utf-8")) # Flush any changes we made to disk, but don't close the file. snapshotFile.flush() for applet in self._applets: applet.progressSignal(100)
def _configureOpWithInfos(self, roleIndex, startingLane, endingLane, infos): """ Attempt to configure the specified role and lanes of the top-level operator with the given DatasetInfos. Returns True if all lanes were configured successfully, or False if something went wrong. """ opTop = self.topLevelOperator originalSize = len(opTop.DatasetGroup) # Resize the slot if necessary if len(opTop.DatasetGroup) < endingLane + 1: opTop.DatasetGroup.resize(endingLane + 1) # Configure each subslot for laneIndex, info in zip(list(range(startingLane, endingLane + 1)), infos): try: self.topLevelOperator.DatasetGroup[laneIndex][ roleIndex].setValue(info) except DatasetConstraintError as ex: return_val = [False] # Give the user a chance to fix the problem self.handleDatasetConstraintError(info, info.filePath, ex, roleIndex, laneIndex, return_val) if return_val[0]: # Successfully repaired graph. continue else: # Not successfully repaired. Roll back the changes self._opTopRemoveDset(originalSize, laneIndex, roleIndex) return False except OpDataSelection.InvalidDimensionalityError as ex: self._opTopRemoveDset(originalSize, laneIndex, roleIndex) QMessageBox.critical(self, "Dataset has different dimensionality", ex.message) return False except Exception as ex: self._opTopRemoveDset(originalSize, laneIndex, roleIndex) msg = "Wasn't able to load your dataset into the workflow. See error log for details." log_exception(logger, msg) QMessageBox.critical(self, "Dataset Load Error", msg) return False return True
def addFileNames(self, paths: List[Path], startingLaneNum: int, roleIndex: int): # If the user didn't cancel for path in paths or []: try: full_path = self._get_dataset_full_path(path, roleIndex=roleIndex) info = self.instantiate_dataset_info(url=str(full_path), role=roleIndex) self.addLanes([info], roleIndex=roleIndex, startingLaneNum=startingLaneNum) except DataSelectionGui.UserCancelledError: pass except Exception as ex: log_exception(logger) QMessageBox.critical(self, "Error loading file", str(ex))
def _update_subvol_widget(self, node_uuid, dataname, typename): """ Update the subvolume widget with the min/max extents of the given node and dataname. Note: The node and dataname do not necessarily have to match the currently selected node and dataname. This enables the right-click behavior, which can be used to limit your data volume to the size of a different data volume. """ error_msg = None try: if typename == "roi": node_service = DVIDNodeService(self._hostname, str(node_uuid)) roi_blocks_xyz = numpy.array(node_service.get_roi(str(dataname))) maxindex = tuple(DVID_BLOCK_WIDTH * (1 + numpy.max(roi_blocks_xyz, axis=0))) minindex = (0, 0, 0) # Rois are always 3D axiskeys = "xyz" # If the current selection is a dataset, then include a channel dimension if self.get_selection().typename != "roi": axiskeys = "cxyz" minindex = (0,) + minindex maxindex = (1,) + maxindex # FIXME: This assumes that the selected data has only 1 channel... else: # Query the server raw_metadata = VoxelsAccessor.get_metadata(self._hostname, node_uuid, dataname) voxels_metadata = VoxelsMetadata(raw_metadata) maxindex = voxels_metadata.shape minindex = voxels_metadata.minindex axiskeys = voxels_metadata.axiskeys # If the current selection is a roi, then remove the channel dimension if self.get_selection().typename == "roi": axiskeys = "xyz" minindex = minindex[1:] maxindex = maxindex[1:] except (DVIDException, ErrMsg) as ex: error_msg = str(ex) log_exception(logger) else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) QMessageBox.critical(self, "DVID Error", error_msg) self._subvol_widget.initWithExtents("", (), (), ()) return self._subvol_widget.initWithExtents(axiskeys, maxindex, minindex, maxindex)
def main(argv): logger.info("Starting at {}".format( datetime.datetime.now() )) logger.info( "Launching with sys.argv: {}".format(sys.argv) ) parser = getArgParser() ilastik.monkey_patches.extend_arg_parser(parser) parsed_args = parser.parse_args(argv[1:]) ilastik.monkey_patches.apply_setting_dict( parsed_args.__dict__ ) try: runWorkflow(parsed_args) except: log_exception( logger ) return 1 finally: logger.info("Finished at {}".format( datetime.datetime.now() )) return 0
def main(argv): logger.info("Starting at {}".format(datetime.datetime.now())) logger.info("Launching with sys.argv: {}".format(sys.argv)) parser = getArgParser() ilastik.monkey_patches.extend_arg_parser(parser) parsed_args = parser.parse_args(argv[1:]) ilastik.monkey_patches.apply_setting_dict(parsed_args.__dict__) try: runWorkflow(parsed_args) except: log_exception(logger) return 1 finally: logger.info("Finished at {}".format(datetime.datetime.now())) return 0
def _applyNormalizeDisplayToTempOps(self): # Save a copy of our settings oldInfos = {} new_norm = { "True": True, "False": False, "Default": None }[str(self.normalizeDisplayComboBox.currentText())] new_drange = (self.rangeMinSpinBox.value(), self.rangeMaxSpinBox.value()) if new_norm is False and (new_drange[0] == self.rangeMinSpinBox.minimum() \ or new_drange[1] == self.rangeMaxSpinBox.minimum()): # no drange given, autonormalization cannot be switched off ! QMessageBox.warning( None, "Warning", "Normalization cannot be switched off without specifying the data range !" ) self.normalizeDisplayComboBox.setCurrentIndex(1) return for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy(op.Dataset.value) try: for laneIndex, op in self.tempOps.items(): info = copy.copy(op.Dataset.value) info.normalizeDisplay = new_norm op.Dataset.setValue(info) self._error_fields.discard('Normalize Display') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue(oldInfos[laneIndex]) msg = "Could not apply normalization settings due to an exception:\n" msg += "{}".format(e) log_exception(logger, msg) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Normalize Display') return False
def _configureOpWithInfos(self, roleIndex, startingLane, endingLane, infos): """ Attempt to configure the specified role and lanes of the top-level operator with the given DatasetInfos. Returns True if all lanes were configured successfully, or False if something went wrong. """ opTop = self.topLevelOperator originalSize = len(opTop.DatasetGroup) # Resize the slot if necessary if len( opTop.DatasetGroup ) < endingLane+1: opTop.DatasetGroup.resize( endingLane+1 ) # Configure each subslot for laneIndex, info in zip(list(range(startingLane, endingLane+1)), infos): try: self.topLevelOperator.DatasetGroup[laneIndex][roleIndex].setValue( info ) except DatasetConstraintError as ex: return_val = [False] # Give the user a chance to fix the problem self.handleDatasetConstraintError(info, info.filePath, ex, roleIndex, laneIndex, return_val) if return_val[0]: # Successfully repaired graph. continue else: # Not successfully repaired. Roll back the changes self._opTopRemoveDset(originalSize, laneIndex, roleIndex) return False except OpDataSelection.InvalidDimensionalityError as ex: self._opTopRemoveDset(originalSize, laneIndex, roleIndex) QMessageBox.critical( self, "Dataset has different dimensionality", ex.message ) return False except Exception as ex: self._opTopRemoveDset(originalSize, laneIndex, roleIndex) msg = "Wasn't able to load your dataset into the workflow. See error log for details." log_exception( logger, msg ) QMessageBox.critical( self, "Dataset Load Error", msg ) return False return True
def _applyChannelDescriptionToTempOps(self, index): if index == -1: return newChannelDescription = str( self.channelDisplayComboBox.itemData(index).toString()) # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy(op.Dataset.value) # Attempt to apply to all temp operators try: for laneIndex, op in self.tempOps.items(): info = copy.copy(op.Dataset.value) if info.axistags is None or \ info.axistags.index('c') >= len(info.axistags) or \ info.axistags['c'].description != newChannelDescription: if info.axistags is None: info.axistags = op.Image.meta.original_axistags if info.axistags.index('c') < len(info.axistags): info.axistags['c'].description = newChannelDescription op.Dataset.setValue(info) self._error_fields.discard('Channel Display') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue(oldInfos[laneIndex]) msg = "Could not set new channel display settings due to an exception:\n" msg += "{}".format(e) log_exception(logger, msg) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Channel Display') return False finally: self._updateChannelDisplayCombo()
def _loadAnnotationFile(self, annotation_filepath): """ Load the annotation file using the path stored in our member variable. """ try: # Configure operator self.opSplitBodyCarving.AnnotationFilepath.setValue( annotation_filepath ) # Requesting annotations triggers parse. self._annotations = self.opSplitBodyCarving.Annotations.value self._ravelerLabels = self.opSplitBodyCarving.AnnotationBodyIds.value # Update gui self._reloadInfoWidgets() self.annotationFilepathEdit.setText( decode_to_qstring(annotation_filepath) ) except OpParseAnnotations.AnnotationParsingException as ex : if ex.original_exc is not None: log_exception( logger, exc_info=( type(ex.original_exc), ex.original_exc, sys.exc_info[2]) ) else: log_exception( logger ) QMessageBox.critical(self, "Failed to parse", ex.msg + "\n\nSee console output for details." ) self._annotations = None self._ravelerLabels = None self.annotationFilepathEdit.setText("") except: msg = "Wasn't able to parse your bookmark file. See console output for details." QMessageBox.critical(self, "Failed to parse", msg ) log_exception( logger, msg ) self._annotations = None self._ravelerLabels = None self.annotationFilepathEdit.setText("")
def _applyChannelDescriptionToTempOps(self, index): if index == -1: return newChannelDescription = str( self.channelDisplayComboBox.itemData( index ).toString() ) # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) # Attempt to apply to all temp operators try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) if info.axistags is None or \ info.axistags.index('c') >= len(info.axistags) or \ info.axistags['c'].description != newChannelDescription: if info.axistags is None: info.axistags = op.Image.meta.original_axistags if info.axistags.index('c') < len(info.axistags): info.axistags['c'].description = newChannelDescription op.Dataset.setValue( info ) self._error_fields.discard('Channel Display') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not set new channel display settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Channel Display') return False finally: self._updateChannelDisplayCombo()
def _update_display(self): super(DvidDataSelectionBrowser, self)._update_display() hostname, dset_uuid, dataname, node_uuid = self.get_selection() enable_contents = self._repos_info is not None and dataname != "" and node_uuid != "" self._roi_groupbox.setEnabled(enable_contents) if not dataname or not node_uuid: self._roi_widget.initWithExtents("", (), (), ()) return error_msg = None try: # Query the server connection = httplib.HTTPConnection(hostname) raw_metadata = pydvid.voxels.get_metadata(connection, node_uuid, dataname) voxels_metadata = pydvid.voxels.VoxelsMetadata(raw_metadata) except socket.error as ex: error_msg = "Socket Error: {} (Error {})".format(ex.args[1], ex.args[0]) except httplib.HTTPException as ex: error_msg = "HTTP Error: {}".format(ex.args[0]) except pydvid.errors.DvidHttpError as ex: # DVID will return an error if the selected dataset # isn't a 'voxels' dataset and thus has no voxels metadata # In that case, show the error on the console, and don't let the user hit 'okay'. log_exception(logger, level=logging.WARN) self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) return else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: QMessageBox.critical(self, "DVID Error", error_msg) self._roi_widget.initWithExtents("", (), (), ()) return self._roi_widget.initWithExtents( voxels_metadata.axiskeys, voxels_metadata.shape, voxels_metadata.minindex, voxels_metadata.shape )
def _update_display(self): super( DvidDataSelectionBrowser, self )._update_display() hostname, dset_uuid, dataname, node_uuid = self.get_selection() enable_contents = self._repos_info is not None and dataname != "" and node_uuid != "" self._roi_groupbox.setEnabled(enable_contents) if not dataname or not node_uuid: self._roi_widget.initWithExtents( "", (), (), () ) return error_msg = None try: # Query the server connection = httplib.HTTPConnection( hostname ) raw_metadata = pydvid.voxels.get_metadata( connection, node_uuid, dataname ) voxels_metadata = pydvid.voxels.VoxelsMetadata( raw_metadata ) except socket.error as ex: error_msg = "Socket Error: {} (Error {})".format( ex.args[1], ex.args[0] ) except httplib.HTTPException as ex: error_msg = "HTTP Error: {}".format( ex.args[0] ) except pydvid.errors.DvidHttpError as ex: # DVID will return an error if the selected dataset # isn't a 'voxels' dataset and thus has no voxels metadata # In that case, show the error on the console, and don't let the user hit 'okay'. log_exception( logger, level=logging.WARN ) self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False) return else: self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True) if error_msg: QMessageBox.critical(self, "DVID Error", error_msg) self._roi_widget.initWithExtents( "", (), (), () ) return self._roi_widget.initWithExtents( voxels_metadata.axiskeys, voxels_metadata.shape, voxels_metadata.minindex, voxels_metadata.shape )
def _applyTempOpSettingsRealOp(self): """ Apply the settings from our temporary operators to the real operators. """ # Save a copy of our settings originalInfos = {} for laneIndex in self._laneIndexes: realSlot = self._op.DatasetGroup[laneIndex][self._roleIndex] if realSlot.ready(): originalInfos[laneIndex] = copy.copy( realSlot.value ) else: originalInfos[laneIndex] = None try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) realSlot = self._op.DatasetGroup[laneIndex][self._roleIndex] realSlot.setValue( info ) return True except Exception as ex: if isinstance(ex, DatasetConstraintError): msg = "Failed to apply your new settings to the workflow " \ "because they violate a constraint of the {} applet.\n\n".format( ex.appletName ) + \ ex.message log_exception( logger, msg, level=logging.INFO ) QMessageBox.critical( self, "Unacceptable Settings", msg ) else: msg = "Failed to apply dialog settings due to an exception:\n" msg += "{}".format( ex ) log_exception( logger, msg ) QMessageBox.critical(self, "Error", msg) return False # Revert everything back to the previous state for laneIndex, info in originalInfos.items(): realSlot = self._op.DatasetGroup[laneIndex][self._roleIndex] if realSlot is not None: realSlot.setValue( info )
def _applyNicknameToTempOps(self): newNickname = encode_from_qstring(self.nicknameEdit.text(), 'utf-8') if "<multiple>" in newNickname: return try: # Remove the event filter while this function executes because we don't # want to trigger additional calls to this very function. self.nicknameEdit.removeEventFilter(self) # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) info.nickname = newNickname op.Dataset.setValue( info ) self._error_fields.discard('Nickname') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not set new nickname due to an exception:\n" msg += "{}".format( e ) QMessageBox.warning(self, "Error", msg) log_exception( logger, msg ) self._error_fields += 'Nickname' return False finally: self.nicknameEdit.installEventFilter(self) self._updateNickname()
def saveProject(self, force_all_save=False): """ Update the project file with the state of the current workflow settings. Must not be called if the project file was opened in read-only mode. """ logger.debug("Save Project triggered") assert self.currentProjectFile != None assert self.currentProjectPath != None assert not self.currentProjectIsReadOnly, "Can't save a read-only project" # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal.emit(0) try: # Applet serializable items are given the whole file (root group) for now for aplt in self._applets: for item in aplt.dataSerializers: assert item.base_initialized, "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if force_all_save or item.isDirty( ) or item.shouldSerialize(self.currentProjectFile): item.serializeToHdf5(self.currentProjectFile, self.currentProjectPath) #save the current workflow as standard workflow if "workflowName" in self.currentProjectFile: del self.currentProjectFile["workflowName"] self.currentProjectFile.create_dataset( "workflowName", data=self.workflow.workflowName) except Exception, err: log_exception( logger, "Project Save Action failed due to the exception shown above.") raise ProjectManager.SaveError(str(err))
def saveProjectSnapshot(self, snapshotPath): """ Copy the project file as it is, then serialize any dirty state into the copy. Original serializers and project file should not be touched. """ with h5py.File(snapshotPath, 'w') as snapshotFile: # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal.emit(0) # Start by copying the current project state into the file # This should be faster than serializing everything from scratch for key in self.currentProjectFile.keys(): snapshotFile.copy(self.currentProjectFile[key], key) try: # Applet serializable items are given the whole file (root group) for now for aplt in self._applets: for item in aplt.dataSerializers: assert item.base_initialized, "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if item.isDirty() or item.shouldSerialize( self.currentProjectFile): # Use a COPY of the serializer, so the original serializer doesn't forget it's dirty state itemCopy = copy.copy(item) itemCopy.serializeToHdf5(snapshotFile, snapshotPath) except Exception, err: log_exception( logger, "Project Save Snapshot Action failed due to the exception printed above." ) raise ProjectManager.SaveError(str(err)) finally:
def _applyDisplayModeToTempOps(self, index): if index == -1: return newDisplayMode = str( self.displayModeComboBox.itemData(index).toString()) # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy(op.Dataset.value) # Attempt to apply to all temp operators try: for laneIndex, op in self.tempOps.items(): info = copy.copy(op.Dataset.value) if info.display_mode != newDisplayMode: info.display_mode = newDisplayMode op.Dataset.setValue(info) self._error_fields.discard('Channel Display') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue(oldInfos[laneIndex]) msg = "Could not set new channel display settings due to an exception:\n" msg += "{}".format(e) log_exception(logger, msg) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Channel Display') return False finally: self._updateDisplayModeCombo()
def addFiles(self, roleIndex, startingLane=None): """ The user clicked the "Add File" button. Ask him to choose a file (or several) and add them to both the GUI table and the top-level operator inputs. """ # Find the directory of the most recently opened image file mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' ) if mostRecentImageFile is not None: defaultDirectory = os.path.split(mostRecentImageFile)[0] else: defaultDirectory = os.path.expanduser('~') # Launch the "Open File" dialog fileNames = self.getImageFileNamesToOpen(self, defaultDirectory) # If the user didn't cancel if len(fileNames) > 0: PreferencesManager().set('DataSelection', 'recent image', fileNames[0]) try: self.addFileNames(fileNames, roleIndex, startingLane) except Exception as ex: log_exception( logger ) QMessageBox.critical(self, "Error loading file", str(ex))
def _loadAnnotationFile(self, annotation_filepath): """ Load the annotation file using the path stored in our member variable. """ try: # Configure operator self.opSplitBodyCarving.AnnotationFilepath.setValue( annotation_filepath) # Requesting annotations triggers parse. self._annotations = self.opSplitBodyCarving.Annotations.value self._ravelerLabels = self.opSplitBodyCarving.AnnotationBodyIds.value # Update gui self._reloadInfoWidgets() self.annotationFilepathEdit.setText( decode_to_qstring(annotation_filepath)) except OpParseAnnotations.AnnotationParsingException as ex: if ex.original_exc is not None: log_exception(logger, exc_info=(type(ex.original_exc), ex.original_exc, sys.exc_info[2])) else: log_exception(logger) QMessageBox.critical( self, "Failed to parse", ex.msg + "\n\nSee console output for details.") self._annotations = None self._ravelerLabels = None self.annotationFilepathEdit.setText("") except: msg = "Wasn't able to parse your bookmark file. See console output for details." QMessageBox.critical(self, "Failed to parse", msg) log_exception(logger, msg) self._annotations = None self._ravelerLabels = None self.annotationFilepathEdit.setText("")
def run_export(self, role_path_dict ): """ Run the export for each dataset listed in role_path_dict, which must be a dict of {role_index : path_list}. For each dataset: 1. Append a lane to the workflow 2. Configure the new lane's DataSelection inputs with the new file (or files, if there is more than one role). 3. Export the results from the new lane 4. Remove the lane from the workflow. By appending/removing the batch lane for EACH dataset we process, we trigger the workflow's usual prepareForNewLane() and connectLane() logic, which ensures that we get a fresh new lane that's ready to process data. After each lane is processed, the given post-processing callback will be executed. signature: lane_postprocessing_callback(batch_lane_index) """ self.progressSignal.emit(0) try: assert isinstance(role_path_dict, OrderedDict) template_infos = self._get_template_dataset_infos() # Invert dict from [role][batch_index] -> path to a list-of-tuples, indexed by batch_index: # [ (role-1-path, role-2-path, ...), # (role-1-path, role-2-path,...) ] paths_by_batch_index = zip( *role_path_dict.values() ) # Call customization hook self.dataExportApplet.prepare_for_entire_export() batch_lane_index = len(self.dataSelectionApplet.topLevelOperator) for batch_dataset_index, role_input_paths in enumerate(paths_by_batch_index): # Add a lane to the end of the workflow for batch processing # (Expanding OpDataSelection by one has the effect of expanding the whole workflow.) self.dataSelectionApplet.topLevelOperator.addLane( batch_lane_index ) try: # The above setup can take a long time for a big workflow. # If the user has ALREADY cancelled, quit now instead of waiting for the first request to begin. Request.raise_if_cancelled() def emit_progress(dataset_percent): overall_progress = (batch_dataset_index + dataset_percent/100.0)/len(paths_by_batch_index) self.progressSignal.emit(100*overall_progress) # Now use the new lane to export the batch results for the current file. self._run_export_with_empty_batch_lane( role_input_paths, batch_lane_index, template_infos, emit_progress ) finally: # Remove the batch lane. See docstring above for explanation. try: self.dataSelectionApplet.topLevelOperator.removeLane( batch_lane_index, batch_lane_index ) except Request.CancellationException: log_exception(logger) # If you see this, something went wrong in a graph setup operation. raise RuntimeError("Encountered an unexpected CancellationException while removing the batch lane.") assert len(self.dataSelectionApplet.topLevelOperator.DatasetGroup) == batch_lane_index # Call customization hook self.dataExportApplet.post_process_entire_export() finally: self.progressSignal.emit(100)
def handle_batch_processing_failure(self, exc, exc_info): msg = "Error encountered during batch processing:\n{}".format( exc ) log_exception( logger, msg, exc_info ) self.handle_batch_processing_finished() self.handle_batch_processing_complete() QMessageBox.critical(self, "Batch Processing Error", msg)
def handle_batch_processing_failure(self, exc, exc_info): msg = "Error encountered during batch processing:\n{}".format(exc) log_exception(logger, msg, exc_info) self.handle_batch_processing_finished() self.handle_batch_processing_complete() QMessageBox.critical(self, "Batch Processing Error", msg)
def handleFeatureComputationFailure(self, exc, exc_info): msg = "Feature computation failed due to the following error:\n{}".format( exc ) log_exception( logger, msg, exc_info ) QMessageBox.critical(self, "Feature computation failed", msg)
def _applyRangeToTempOps(self): new_drange = ( self.rangeMinSpinBox.value(), self.rangeMaxSpinBox.value() ) if new_drange[0] == self.rangeMinSpinBox.minimum() \ or new_drange[1] == self.rangeMaxSpinBox.minimum(): new_drange = None def get_dtype_info(dtype): try: return numpy.iinfo(dtype) except ValueError: return numpy.finfo(dtype) try: # Remove the event filter while this function executes because we don't # want to trigger additional calls to this very function. self.rangeMinSpinBox.removeEventFilter(self) self.rangeMaxSpinBox.removeEventFilter(self) if new_drange is not None: if new_drange[0] >= new_drange[1]: QMessageBox.warning(self, "Error", "Can't apply data range values: Data range MAX must be greater than MIN.") self._error_fields.add('Data Range') return False # Make sure the new bounds don't exceed the dtype range for laneIndex, op in self.tempOps.items(): dtype_info = get_dtype_info(op.Image.meta.dtype) if new_drange[0] < dtype_info.min or new_drange[1] > dtype_info.max: QMessageBox.warning(self, "Error", "Can't apply data range values:\n" "Range {} is outside the allowed range for the data type of lane {}.\n" "(Full range of {} is [{}, {}].)".format( new_drange, laneIndex, dtype_info.dtype.name, dtype_info.min, dtype_info.max ) ) self._error_fields.add('Data Range') return False # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) dtype_info = get_dtype_info(op.Image.meta.dtype) dtype = dtype_info.dtype.type info.drange = None if new_drange is not None: info.drange = ( dtype(new_drange[0]), dtype(new_drange[1]) ) op.Dataset.setValue( info ) self._error_fields.discard('Data Range') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not apply data range settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Data Range') return False finally: self.rangeMinSpinBox.installEventFilter(self) self.rangeMaxSpinBox.installEventFilter(self) # Either way, show the current data range self._updateRange()
def export_failed(_, exc_info): """ Default callback for Request failure """ log_exception(logger, "Export failed", exc_info)
def _applyAxesToTempOps(self): newAxisOrder = str(self.axesEdit.text()) # Check for errors firstOp = self.tempOps.values()[0] shape = firstOp.Image.meta.shape original_shape = firstOp.Image.meta.original_shape if original_shape is not None: numaxes = len(original_shape) else: numaxes = len(shape) try: # Remove the event filter while this function executes because we don't # want to trigger additional calls to this very function. self.axesEdit.removeEventFilter(self) if numaxes != len( newAxisOrder ): QMessageBox.warning(self, "Error", "Can't use those axes: wrong number.") self._error_fields.add('Axis Order') return False for c in newAxisOrder: if c not in 'txyzc': QMessageBox.warning(self, "Error", "Can't use those axes: Don't understand axis '{}'.".format(c)) self._error_fields.add('Axis Order') return False if len(set(newAxisOrder)) != len(newAxisOrder): QMessageBox.warning(self, "Error", "Axis order has repeated axes.") return False # Save a copy of our settings oldInfos = {} for laneIndex, op in self.tempOps.items(): oldInfos[laneIndex] = copy.copy( op.Dataset.value ) try: for laneIndex, op in self.tempOps.items(): info = copy.copy( op.Dataset.value ) # Use new order, but keep the data from the old axis tags # (for all axes that were kept) #newTags = vigra.defaultAxistags(newAxisOrder) self.axistagsEditorWidget.change_axis_order(newAxisOrder) newTags = self.axistagsEditorWidget.axistags info.axistags = newTags op.Dataset.setValue( info ) self._error_fields.discard('Axis Order') return True except Exception as e: # Revert everything back to the previous state for laneIndex, op in self.tempOps.items(): op.Dataset.setValue( oldInfos[laneIndex] ) msg = "Could not apply axis settings due to an exception:\n" msg += "{}".format( e ) log_exception( logger, msg ) QMessageBox.warning(self, "Error", msg) self._error_fields.add('Axis Order') return False finally: self.axesEdit.installEventFilter(self) # Either way, show the axes self._updateAxes() self._updateDisplayModeCombo()
def _handle_failure( exc, exc_info ): msg = "Exception raised during export. See traceback above.\n" log_exception( logger, msg, exc_info ) self.applet.progressSignal.emit(100) self._drawer.exportTifButton.setEnabled(True)
def handleFeatureComputationFailure(self, exc, exc_info): msg = "Feature computation failed due to the following error:\n{}".format( exc) log_exception(logger, msg, exc_info) QMessageBox.critical(self, "Feature computation failed", msg)
def _track(): self.applet.busy = True self.applet.appletStateUpdateRequested.emit() maxDist = self._drawer.maxDistBox.value() maxObj = self._drawer.maxObjectsBox.value() divThreshold = self._drawer.divThreshBox.value() from_t = self._drawer.from_time.value() to_t = self._drawer.to_time.value() from_x = self._drawer.from_x.value() to_x = self._drawer.to_x.value() from_y = self._drawer.from_y.value() to_y = self._drawer.to_y.value() from_z = self._drawer.from_z.value() to_z = self._drawer.to_z.value() from_size = self._drawer.from_size.value() to_size = self._drawer.to_size.value() self.time_range = range(from_t, to_t + 1) avgSize = [self._drawer.avgSizeBox.value()] cplex_timeout = None if len(str(self._drawer.timeoutBox.text())): cplex_timeout = int(self._drawer.timeoutBox.text()) withTracklets = self._drawer.trackletsBox.isChecked() sizeDependent = self._drawer.sizeDepBox.isChecked() hardPrior = self._drawer.hardPriorBox.isChecked() classifierPrior = self._drawer.classifierPriorBox.isChecked() divWeight = self._drawer.divWeightBox.value() transWeight = self._drawer.transWeightBox.value() withDivisions = self._drawer.divisionsBox.isChecked() withOpticalCorrection = self._drawer.opticalBox.isChecked() withMergerResolution = self._drawer.mergerResolutionBox.isChecked() borderAwareWidth = self._drawer.bordWidthBox.value() withArmaCoordinates = True appearanceCost = self._drawer.appearanceBox.value() disappearanceCost = self._drawer.disappearanceBox.value() motionModelWeight = self._drawer.motionModelWeightBox.value() solver = self._drawer.solverComboBox.currentText() ndim=3 if (to_z - from_z == 0): ndim=2 try: self.mainOperator.track( time_range = self.time_range, x_range = (from_x, to_x + 1), y_range = (from_y, to_y + 1), z_range = (from_z, to_z + 1), size_range = (from_size, to_size + 1), x_scale = self._drawer.x_scale.value(), y_scale = self._drawer.y_scale.value(), z_scale = self._drawer.z_scale.value(), maxDist=maxDist, maxObj = maxObj, divThreshold=divThreshold, avgSize=avgSize, withTracklets=withTracklets, sizeDependent=sizeDependent, detWeight=10.0, divWeight=divWeight, transWeight=transWeight, withDivisions=withDivisions, withOpticalCorrection=withOpticalCorrection, withClassifierPrior=classifierPrior, ndim=ndim, withMergerResolution=withMergerResolution, borderAwareWidth =borderAwareWidth, withArmaCoordinates =withArmaCoordinates, cplex_timeout =cplex_timeout, appearance_cost =appearanceCost, disappearance_cost =disappearanceCost, motionModelWeight=motionModelWeight, force_build_hypotheses_graph =False, max_nearest_neighbors=self._drawer.maxNearestNeighborsSpinBox.value(), numFramesPerSplit=self._drawer.numFramesPerSplitSpinBox.value(), solverName=solver ) except Exception as ex: log_exception(logger, "Error during tracking. See above error traceback.") self._criticalMessage("Error during tracking. See error log.\n\n" "Exception was:\n\n{})".format( ex )) return
def handleFailedObjectCountExport(self, exception, exception_info): msg = "Failed to export object counts:\n{}".format( exception ) log_exception( logger, msg, exception_info ) QMessageBox.critical(self, "Failed to export counts", msg)