def _importProject(self, importedFilePath, newProjectFile, newProjectFilePath): """ Load the data from a project and save it to a different project file. importedFilePath - The path to a (not open) .ilp file to import data from newProjectFile - An hdf5 handle to a new .ilp to load data into (must be open already) newProjectFilePath - The path to the new .ilp we're loading. """ importedFile = h5py.File(importedFilePath, "r") self.workflow = self._workflowClass( self._shell, self._headless, self._workflow_cmdline_args, self._project_creation_args ) self.currentProjectFile = newProjectFile self.currentProjectPath = newProjectFilePath self.currentProjectIsReadOnly = False try: serializers = [serializer for aplt in self._applets for serializer in aplt.dataSerializers] newProject = Project(newProjectFile) newProject.populateFrom(importedFile, [s.topGroupName for s in serializers]) for serializer in serializers: serializer.ignoreDirty = True serializer.deserializeFromHdf5(self.currentProjectFile, newProjectFilePath, self._headless) serializer.ignoreDirty = False self.closed = False newProject.updateWorkflowName(self.workflow.workflowName) newProject.updateVersion() newProject.flush() self.workflow.onProjectLoaded(self) self.workflow.handleAppletStateUpdateRequested() except: self._closeCurrentProject() raise
def importAsLocalDataset( self, project_file: h5py.File, progress_signal: Callable[[int], None] = lambda x: None ) -> str: project = Project(project_file) inner_path = project.local_data_group.name + "/" + self.legacy_datasetId if project_file.get(inner_path) is not None: return inner_path self.dumpToHdf5(h5_file=project_file, inner_path=inner_path, progress_signal=progress_signal) return inner_path
def saveProject(self, force_all_save=False): """ Update the project file with the state of the current workflow settings. Must not be called if the project file was opened in read-only mode. """ logger.debug("Save Project triggered") assert self.currentProjectFile != None assert self.currentProjectPath != None assert not self.currentProjectIsReadOnly, "Can't save a read-only project" # Minor GUI nicety: Pre-activate the progress signals for dirty applets so # the progress manager treats these tasks as a group instead of several sequential jobs. for aplt in self._applets: for ser in aplt.dataSerializers: if ser.isDirty(): aplt.progressSignal(0) try: # Applet serializable items are given the whole file (root group) for now file_changed = False for aplt in self._applets: for serializer in aplt.dataSerializers: assert ( serializer.base_initialized ), "AppletSerializer subclasses must call AppletSerializer.__init__ upon construction." if force_all_save or serializer.isDirty( ) or serializer.shouldSerialize(self.currentProjectFile): serializer.serializeToHdf5(self.currentProjectFile, self.currentProjectPath) file_changed = True if file_changed: Project(self.currentProjectFile).updateVersion() # save the current workflow as standard workflow if "workflowName" in self.currentProjectFile: del self.currentProjectFile["workflowName"] self.currentProjectFile.create_dataset( "workflowName", data=self.workflow.workflowName.encode("utf-8")) except Exception as err: log_exception( logger, "Project Save Action failed due to the exception shown above.") raise ProjectManager.SaveError(str(err)) finally: # save current time if "time" in self.currentProjectFile: del self.currentProjectFile["time"] self.currentProjectFile.create_dataset( "time", data=time.ctime().encode("utf-8")) # Flush any changes we made to disk, but don't close the file. self.currentProjectFile.flush() for applet in self._applets: applet.progressSignal(100)
def _deserialize(self, mygroup, slot): logger.debug("Deserializing BlockSlot: {}".format(self.name)) num = len(mygroup) if len(self.inslot) < num: self.inslot.resize(num) # Annoyingly, some applets store their groups with names like, img0,img1,img2,..,img9,img10,img11 # which means that sorted() needs a special key to avoid sorting img10 before img2 # We have to find the index and sort according to its numerical value. index_capture = re.compile(r"[^0-9]*(\d*).*") def extract_index(s): return int(index_capture.match(s).groups()[0]) for index, t in enumerate( sorted(list(mygroup.items()), key=lambda k_v: extract_index(k_v[0]))): groupName, labelGroup = t for blockData in list(labelGroup.values()): slicing = stringToSlicing(blockData.attrs["blockSlice"]) # If it is suppose to be a masked array, # deserialize the pieces and rebuild the masked array. assert slot[index].meta.has_mask == mygroup.attrs.get( "meta.has_mask"), ( "The slot and stored data have different values for" + " `has_mask`. They are" + " `bool(slot[index].meta.has_mask)`=" + repr(bool(slot[index].meta.has_mask)) + " and" + ' `mygroup.attrs.get("meta.has_mask", False)`=' + repr(mygroup.attrs.get("meta.has_mask", False)) + ". Please fix this to proceed with deserialization.") if slot[index].meta.has_mask: blockArray = numpy.ma.masked_array( blockData["data"][()], mask=blockData["mask"][()], fill_value=blockData["fill_value"][()], shrink=False, ) else: blockArray = blockData[...] blockArray, slicing = self.reshape_datablock_and_slicing_for_input( blockArray, slicing, self.inslot[index], Project(mygroup.file)) self.inslot[index][slicing] = blockArray
def deserialize(self, group): super().deserialize(group) if self.deserialization_requires_data_conversion(Project(group.file)): self.ignoreDirty = False self.dirty = True