Ejemplo n.º 1
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        preproc = topGroup

        for opPre in self._o.innerOperators:
            mst = opPre._prepData[0]

            if mst is not None:

                #The values to be saved for sigma and filter are the
                #values of the last valid preprocess
                #!These may differ from the current settings!

                deleteIfPresent(preproc, "sigma")
                deleteIfPresent(preproc, "filter")
                deleteIfPresent(preproc, "watershed_source")
                deleteIfPresent(preproc, "invert_watershed_source")
                deleteIfPresent(preproc, "graph")

                preproc.create_dataset("sigma", data=opPre.initialSigma)
                preproc.create_dataset("filter", data=opPre.initialFilter)
                ws_source = str(opPre.WatershedSource.value)
                assert isinstance(
                    ws_source, str
                ), "WatershedSource was {}, but it should be a string.".format(
                    ws_source)
                preproc.create_dataset("watershed_source", data=ws_source)
                preproc.create_dataset("invert_watershed_source",
                                       data=opPre.InvertWatershedSource.value)

                preprocgraph = getOrCreateGroup(preproc, "graph")
                mst.saveH5G(preprocgraph)

            opPre._unsavedData = False
Ejemplo n.º 2
0
    def serialize(self, group):
        if not self.shouldSerialize(group):
            return
        deleteIfPresent(group, self.name)
        group = getOrCreateGroup(group, self.name)
        mainOperator = self.slot.getRealOperator()
        innerops = mainOperator.innerOperators
        for i, op in enumerate(innerops):
            gr = getOrCreateGroup(group, str(i))
            for cropKey in op.Annotations.value.keys():
                crop_gr = getOrCreateGroup(gr, str(cropKey))

                labels_gr = getOrCreateGroup(crop_gr, str("labels"))
                for t in op.Annotations.value[cropKey]["labels"].keys():
                    t_gr = getOrCreateGroup(labels_gr, str(t))
                    for oid in op.Annotations.value[cropKey]["labels"][t].keys(
                    ):
                        l = op.Annotations.value[cropKey]["labels"][t][oid]
                        dset = list(l)
                        if len(dset) > 0:
                            t_gr.create_dataset(name=str(oid), data=dset)

                divisions_gr = getOrCreateGroup(crop_gr, str("divisions"))
                dset = []
                for trackid in op.Annotations.value[cropKey]["divisions"].keys(
                ):
                    (children, t_parent
                     ) = op.Annotations.value[cropKey]["divisions"][trackid]
                    dset.append([trackid, children[0], children[1], t_parent])
                if len(dset) > 0:
                    divisions_gr.create_dataset(name=str(i), data=dset)
        self.dirty = False
 def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
     preproc = topGroup
     
     for opPre in self._o.innerOperators:
         mst = opPre._prepData[0]
         
         if mst is not None:
             
             #The values to be saved for sigma and filter are the
             #values of the last valid preprocess
             #!These may differ from the current settings!
             
             deleteIfPresent(preproc, "sigma")
             deleteIfPresent(preproc, "filter")
             deleteIfPresent(preproc, "watershed_source")
             deleteIfPresent(preproc, "invert_watershed_source")
             deleteIfPresent(preproc, "graph")
             
             preproc.create_dataset("sigma",data= opPre.initialSigma)
             preproc.create_dataset("filter",data= opPre.initialFilter)
             ws_source = str(opPre.WatershedSource.value)
             assert isinstance( ws_source, str ), "WatershedSource was {}, but it should be a string.".format( ws_source )
             preproc.create_dataset("watershed_source", data=ws_source)                 
             preproc.create_dataset("invert_watershed_source", data=opPre.InvertWatershedSource.value)
             
             preprocgraph = getOrCreateGroup(preproc, "graph")
             mst.saveH5G(preprocgraph)
         
         opPre._unsavedData = False
    def _serializeLabels(self, topGroup):
        with Tracer(traceLogger):
            # Delete all labels from the file
            deleteIfPresent(topGroup, 'LabelSets')
            labelSetDir = topGroup.create_group('LabelSets')

            numImages = len(self.mainOperator.NonzeroLabelBlocks)
            for imageIndex in range(numImages):
                # Create a group for this image
                labelGroupName = 'labels{:03d}'.format(imageIndex)
                labelGroup = labelSetDir.create_group(labelGroupName)

                # Get a list of slicings that contain labels
                nonZeroBlocks = self.mainOperator.NonzeroLabelBlocks[
                    imageIndex].value
                for blockIndex, slicing in enumerate(nonZeroBlocks):
                    # Read the block from the label output
                    block = self.mainOperator.LabelImages[imageIndex][
                        slicing].wait()

                    # Store the block as a new dataset
                    blockName = 'block{:04d}'.format(blockIndex)
                    labelGroup.create_dataset(blockName, data=block)

                    # Add the slice this block came from as an attribute of the dataset
                    labelGroup[blockName].attrs[
                        'blockSlice'] = self.slicingToString(slicing)

            self._dirtyFlags[Section.Labels] = False
    def _serializeClassifiers(self, topGroup):
        with Tracer(traceLogger):
            deleteIfPresent(topGroup, 'Classifiers')
            self._dirtyFlags[Section.Classifiers] = False
    
            if not self.mainOperator.Classifiers.ready():
                return

            
            classifiers = self.mainOperator.Classifiers
            topGroup.require_group("Classifiers")
            for i in range(len(classifiers)):
                classifier_forests = classifiers[i].value
                # Classifier can be None if there isn't any training data yet.
                if classifier_forests is None:
                    return
                for forest in classifier_forests:
                    if forest is None:
                        return
    
                # Due to non-shared hdf5 dlls, vigra can't write directly to our open hdf5 group.
                # Instead, we'll use vigra to write the classifier to a temporary file.
                tmpDir = tempfile.mkdtemp()
                cachePath = os.path.join(tmpDir, 'tmp_classifier_cache.h5').replace('\\', '/')
                for j, forest in enumerate(classifier_forests):
                    forest.writeHDF5( cachePath, 'ClassifierForests/Forest{:04d}'.format(j) )
                
                # Open the temp file and copy to our project group
                with h5py.File(cachePath, 'r') as cacheFile:
                    grouppath = "Classifiers/Classifier%d"%i
                    topGroup.copy(cacheFile['ClassifierForests'], grouppath)
                
                os.remove(cachePath)
                os.removedirs(tmpDir)
 def _serializeLabels(self, topGroup):
     with Tracer(traceLogger):
         # Delete all labels from the file
         deleteIfPresent(topGroup, 'LabelSets')
         labelSetDir = topGroup.create_group('LabelSets')
 
         numImages = len(self.mainOperator.NonzeroLabelBlocks)
         for imageIndex in range(numImages):
             # Create a group for this image
             labelGroupName = 'labels{:03d}'.format(imageIndex)
             labelGroup = labelSetDir.create_group(labelGroupName)
             
             # Get a list of slicings that contain labels
             nonZeroBlocks = self.mainOperator.NonzeroLabelBlocks[imageIndex].value
             for blockIndex, slicing in enumerate(nonZeroBlocks):
                 # Read the block from the label output
                 block = self.mainOperator.LabelImages[imageIndex][slicing].wait()
                 
                 # Store the block as a new dataset
                 blockName = 'block{:04d}'.format(blockIndex)
                 labelGroup.create_dataset(blockName, data=block)
                 
                 # Add the slice this block came from as an attribute of the dataset
                 labelGroup[blockName].attrs['blockSlice'] = self.slicingToString(slicing)
 
         self._dirtyFlags[Section.Labels] = False
Ejemplo n.º 7
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        getOrCreateGroup(topGroup, "local_data")
        deleteIfPresent(topGroup, "Role Names")
        role_names = [name.encode("utf-8") for name in self.topLevelOperator.DatasetRoles.value]
        topGroup.create_dataset("Role Names", data=role_names)

        # Access the info group
        infoDir = getOrCreateGroup(topGroup, "infos")

        # Delete all infos
        infoDir.clear()

        # Rebuild the list of infos
        roleNames = self.topLevelOperator.DatasetRoles.value
        internal_datasets_to_keep = set()
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            laneGroupName = "lane{:04d}".format(laneIndex)
            laneGroup = infoDir.create_group(laneGroupName)
            for roleIndex, slot in enumerate(multislot):
                infoGroup = laneGroup.create_group(roleNames[roleIndex])
                if slot.ready():
                    datasetInfo = slot.value
                    if isinstance(datasetInfo, ProjectInternalDatasetInfo):
                        internal_datasets_to_keep.add(hdf5File[datasetInfo.inner_path])
                    for k, v in datasetInfo.to_json_data().items():
                        if v is not None:
                            infoGroup.create_dataset(k, data=v)
        if self.local_data_path.as_posix() in hdf5File:
            for dataset in hdf5File[self.local_data_path.as_posix()].values():
                if dataset not in internal_datasets_to_keep:
                    del hdf5File[dataset.name]
        self._dirty = False
Ejemplo n.º 8
0
    def serialize(self, group):
        if not self.shouldSerialize(group):
            return
        deleteIfPresent(group, self.name)
        group = getOrCreateGroup(group, self.name)
        mainOperator = self.slot.getRealOperator()
        innerops = mainOperator.innerOperators
        for i, op in enumerate(innerops):
            gr = getOrCreateGroup(group, str(i))
            labels_gr = getOrCreateGroup(gr, str("labels"))
            if "labels" in op.Annotations.value.keys():
                for t in op.Annotations.value["labels"].keys():
                    t_gr = getOrCreateGroup(labels_gr, str(t))
                    for oid in op.Annotations.value["labels"][t].keys():
                        l = op.Annotations.value["labels"][t][oid]
                        dset = list(l)
                        if len(dset) > 0:
                            t_gr.create_dataset(name=str(oid), data=dset)

            divisions_gr = getOrCreateGroup(gr, str("divisions"))
            dset = []
            if "divisions" in op.Annotations.value.keys():
                for trackid in op.Annotations.value["divisions"].keys():
                    (children, t_parent) = op.Annotations.value["divisions"][trackid]
                    dset.append([trackid, children[0], children[1], t_parent])
            if len(dset) > 0:
                divisions_gr.create_dataset(name=str(i), data=dset)
        self.dirty = False
Ejemplo n.º 9
0
    def serialize(self, group):
        if not self.shouldSerialize(group):
            return
        deleteIfPresent(group, self.name)
        group = getOrCreateGroup(group, self.name)
        mainOperator = self.slot.getRealOperator()

        for i in range(len(mainOperator)):
            subgroup = getOrCreateGroup(group, "{:04}".format(i))

            cleanBlockRois = self.blockslot[i].value
            for roi in cleanBlockRois:
                region_features_arr = self.slot[i](*roi).wait()
                assert region_features_arr.shape == (1, )
                region_features = region_features_arr[0]
                roi_grp = subgroup.create_group(name=str(roi))
                logger.debug('Saving region features into group: "{}"'.format(
                    roi_grp.name))
                for key, val in region_features.iteritems():
                    plugin_group = getOrCreateGroup(roi_grp, key)
                    for featname, featval in val.iteritems():
                        plugin_group.create_dataset(name=featname,
                                                    data=featval)

        self.dirty = False
Ejemplo n.º 10
0
 def serialize(self, group):
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     innerops = mainOperator.innerOperators
     for i, op in enumerate(innerops):
         dset = []
         for trackid in op.divisions.keys():
             (children, t_parent) = op.divisions[trackid]
             dset.append([trackid, children[0], children[1], t_parent])
         if len(dset) > 0:
             group.create_dataset(name=str(i), data=dset)
     self.dirty = False
Ejemplo n.º 11
0
 def serialize(self, group):
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     innerops = mainOperator.innerOperators
     for i, op in enumerate(innerops):
         dset = []
         for trackid in op.divisions.keys():
             (children, t_parent) = op.divisions[trackid]
             dset.append([trackid, children[0], children[1], t_parent])
         if len(dset) > 0:
             group.create_dataset(name=str(i), data=dset)
     self.dirty = False
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        split_settings_grp = getOrCreateGroup(topGroup, "split_settings")

        for laneIndex in range(len( self._topLevelOperator )):
            lane_grp = getOrCreateGroup(split_settings_grp, "{}".format( laneIndex ))
            opLaneView = self._topLevelOperator.getLane(laneIndex)
            if opLaneView.AnnotationFilepath.ready():
                annotation_filepath = opLaneView.AnnotationFilepath.value
                deleteIfPresent( lane_grp, "annotation_filepath" )
                lane_grp.create_dataset("annotation_filepath", data=annotation_filepath)

        # Now save the regular the carving data.        
        super( SplitBodyCarvingSerializer, self )._serializeToHdf5( topGroup, hdf5File, projectFilePath )
 def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
     with Tracer(traceLogger):
         # Can't store anything without both scales and features
         if not self.topLevelOperator.Scales.ready() \
         or not self.topLevelOperator.FeatureIds.ready():
             return
     
         # Delete previous entries if they exist
         deleteIfPresent(topGroup, 'Scales')
         deleteIfPresent(topGroup, 'FeatureIds')
         deleteIfPresent(topGroup, 'SelectionMatrix')
         deleteIfPresent(topGroup, 'FeatureListFilename')
         
         # Store the new values (as numpy arrays)
         
         topGroup.create_dataset('Scales', data=self.topLevelOperator.Scales.value)
         
         topGroup.create_dataset('FeatureIds', data=self.topLevelOperator.FeatureIds.value)
         
         if self.topLevelOperator.SelectionMatrix.ready():
             topGroup.create_dataset('SelectionMatrix', data=self.topLevelOperator.SelectionMatrix.value)
             
         if self.topLevelOperator.FeatureListFilename.ready():
             fname = str(self.topLevelOperator.FeatureListFilename.value) 
             if fname:
                 topGroup.create_dataset('FeatureListFilename', data=fname)
             
         self._dirty = False
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        split_settings_grp = getOrCreateGroup(topGroup, "split_settings")

        for laneIndex in range(len( self._topLevelOperator )):
            lane_grp = getOrCreateGroup(split_settings_grp, "{}".format( laneIndex ))
            opLaneView = self._topLevelOperator.getLane(laneIndex)
            if opLaneView.AnnotationFilepath.ready():
                annotation_filepath = opLaneView.AnnotationFilepath.value
                deleteIfPresent( lane_grp, "annotation_filepath" )
                lane_grp.create_dataset("annotation_filepath", data=annotation_filepath)

        # Now save the regular the carving data.        
        super( SplitBodyCarvingSerializer, self )._serializeToHdf5( topGroup, hdf5File, projectFilePath )
        self.__dirty = False
 def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
     # Can't store anything without both scales and features
     if not self.topLevelOperator.Scales.ready() \
     or not self.topLevelOperator.FeatureIds.ready():
         return
 
     # Delete previous entries if they exist
     deleteIfPresent(topGroup, 'Scales')
     deleteIfPresent(topGroup, 'FeatureIds')
     deleteIfPresent(topGroup, 'SelectionMatrix')
     deleteIfPresent(topGroup, 'FeatureListFilename')
     
     # Store the new values (as numpy arrays)
     
     topGroup.create_dataset('Scales', data=self.topLevelOperator.Scales.value)
     
     feature_ids = list(map(lambda s: s.encode('utf-8'), self.topLevelOperator.FeatureIds.value))
     topGroup.create_dataset('FeatureIds', data=feature_ids)
     
     if self.topLevelOperator.SelectionMatrix.ready():
         topGroup.create_dataset('SelectionMatrix', data=self.topLevelOperator.SelectionMatrix.value)
         
     if self.topLevelOperator.FeatureListFilename.ready():
         fnames = []
         for slot in self.topLevelOperator.FeatureListFilename:
             fnames.append(slot.value)
         if fnames:
             fnames = map(lambda s: s.encode('utf-8'), fnames)
             topGroup.create_dataset('FeatureListFilename', data=fnames)
         
     self._dirty = False
 def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
     # Can't store anything without both scales and features
     if not self.topLevelOperator.Scales.ready() \
     or not self.topLevelOperator.FeatureIds.ready():
         return
 
     # Delete previous entries if they exist
     deleteIfPresent(topGroup, 'Scales')
     deleteIfPresent(topGroup, 'FeatureIds')
     deleteIfPresent(topGroup, 'SelectionMatrix')
     deleteIfPresent(topGroup, 'FeatureListFilename')
     
     # Store the new values (as numpy arrays)
     
     topGroup.create_dataset('Scales', data=self.topLevelOperator.Scales.value)
     
     topGroup.create_dataset('FeatureIds', data=self.topLevelOperator.FeatureIds.value)
     
     if self.topLevelOperator.SelectionMatrix.ready():
         topGroup.create_dataset('SelectionMatrix', data=self.topLevelOperator.SelectionMatrix.value)
         
     if self.topLevelOperator.FeatureListFilename.ready():
         fname = str(self.topLevelOperator.FeatureListFilename.value) 
         if fname:
             topGroup.create_dataset('FeatureListFilename', data=fname)
         
     self._dirty = False
 def serialize(self, group):
     if not self.shouldSerialize(group):
         return
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     for i, op in enumerate(mainOperator.innerOperators):            
         ts = op._processedTimeSteps
         if len(ts) > 0:
             subgroup = getOrCreateGroup(group, str(i))
             subgroup.create_dataset(name='timesteps', data=list(ts))
             
             src = op._mem_h5
             subgroup.copy(src['/TranslationVectors'], subgroup, name='data')                
     self.dirty = False
Ejemplo n.º 18
0
 def serialize(self, group):
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     innerops = mainOperator.innerOperators
     for i, op in enumerate(innerops):
         gr = getOrCreateGroup(group, str(i))
         for t in op.labels.keys():
             t_gr = getOrCreateGroup(gr, str(t))
             for oid in op.labels[t].keys():
                 l = op.labels[t][oid]
                 dset = list(l)
                 if len(dset) > 0:
                     t_gr.create_dataset(name=str(oid), data=dset)
     self.dirty = False
Ejemplo n.º 19
0
 def serialize(self, group):
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     innerops = mainOperator.innerOperators
     for i, op in enumerate(innerops):
         gr = getOrCreateGroup(group, str(i))
         for t in op.labels.keys():
             t_gr = getOrCreateGroup(gr, str(t))
             for oid in op.labels[t].keys():
                 l = op.labels[t][oid]
                 dset = list(l)
                 if len(dset) > 0:
                     t_gr.create_dataset(name=str(oid), data=dset)
     self.dirty = False
Ejemplo n.º 20
0
 def serialize(self, group):
     if not self.shouldSerialize(group):
         return
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     innerops = mainOperator.innerOperators
     for i, op in enumerate(innerops):
         gr = getOrCreateGroup(group, str(i))
         for t in list(op.disappearances.keys()):
             t_gr = getOrCreateGroup(gr, str(t))
             for oid in list(op.disappearances[t].keys()):
                 oid_gr = getOrCreateGroup(t_gr, str(oid))
                 for track in list(op.disappearances[t][oid].keys()):
                     app = op.disappearances[t][oid][track]
                     if app:
                         oid_gr.create_dataset(name=str(track), data=app)
     self.dirty = False
Ejemplo n.º 21
0
 def serialize(self, group):
     if not self.shouldSerialize(group):
         return
     deleteIfPresent(group, self.name)
     group = getOrCreateGroup(group, self.name)
     mainOperator = self.slot.getRealOperator()
     innerops = mainOperator.innerOperators
     for i, op in enumerate(innerops):
         gr = getOrCreateGroup(group, str(i))
         for t in list(op.disappearances.keys()):
             t_gr = getOrCreateGroup(gr, str(t))
             for oid in list(op.disappearances[t].keys()):
                 oid_gr = getOrCreateGroup(t_gr, str(oid))
                 for track in list(op.disappearances[t][oid].keys()):
                     app = op.disappearances[t][oid][track]
                     if app:
                         oid_gr.create_dataset(name=str(track), data=app)
     self.dirty = False
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        # Can't store anything without both scales and features
        if not self.topLevelOperator.Scales.ready() \
                or not self.topLevelOperator.FeatureIds.ready():
            return

        # Delete previous entries if they exist
        deleteIfPresent(topGroup, 'Scales')
        deleteIfPresent(topGroup, 'FeatureIds')
        deleteIfPresent(topGroup, 'SelectionMatrix')
        deleteIfPresent(topGroup, 'FeatureListFilename')
        deleteIfPresent(topGroup, 'ComputeIn2d')

        # Store the new values (as numpy arrays)

        topGroup.create_dataset('Scales',
                                data=self.topLevelOperator.Scales.value)

        feature_ids = list(
            map(lambda s: s.encode('utf-8'),
                self.topLevelOperator.FeatureIds.value))
        topGroup.create_dataset('FeatureIds', data=feature_ids)

        if self.topLevelOperator.SelectionMatrix.ready():
            topGroup.create_dataset(
                'SelectionMatrix',
                data=self.topLevelOperator.SelectionMatrix.value)

        if self.topLevelOperator.FeatureListFilename.ready():
            fnames = []
            for slot in self.topLevelOperator.FeatureListFilename:
                fnames.append(slot.value)
            if fnames:
                fnames = map(lambda s: s.encode('utf-8'), fnames)
                topGroup.create_dataset('FeatureListFilename', data=fnames)

        if self.topLevelOperator.ComputeIn2d.ready():
            topGroup.create_dataset(
                'ComputeIn2d', data=self.topLevelOperator.ComputeIn2d.value)

        self._dirty = False
    def serialize(self, group):
        if not self.shouldSerialize(group):
            return
        deleteIfPresent(group, self.name)
        group = getOrCreateGroup(group, self.name)
        mainOperator = self.slot.getRealOperator()

        for i in range(len(mainOperator)):
            subgroup = getOrCreateGroup(group, str(i))

            cleanBlockRois = self.blockslot[i].value
            for roi in cleanBlockRois:
                region_features_arr = self.outslot[i]( *roi ).wait()
                assert region_features_arr.shape == (1,)
                region_features = region_features_arr[0]
                roi_grp = subgroup.create_group(name=str(roi))
                logger.debug('Saving region features into group: "{}"'.format( roi_grp.name ))
                for key, val in region_features.iteritems():
                    roi_grp.create_dataset(name=key, data=val)

        self.dirty = False
    def _serializeClassifiers(self, topGroup):
        with Tracer(traceLogger):
            deleteIfPresent(topGroup, 'Classifiers')
            self._dirtyFlags[Section.Classifiers] = False

            if not self.mainOperator.Classifiers.ready():
                return

            classifiers = self.mainOperator.Classifiers
            topGroup.require_group("Classifiers")
            for i in range(len(classifiers)):
                classifier_forests = classifiers[i].value
                # Classifier can be None if there isn't any training data yet.
                if classifier_forests is None:
                    return
                for forest in classifier_forests:
                    if forest is None:
                        return

                # Due to non-shared hdf5 dlls, vigra can't write directly to our open hdf5 group.
                # Instead, we'll use vigra to write the classifier to a temporary file.
                tmpDir = tempfile.mkdtemp()
                cachePath = os.path.join(tmpDir,
                                         'tmp_classifier_cache.h5').replace(
                                             '\\', '/')
                for j, forest in enumerate(classifier_forests):
                    forest.writeHDF5(
                        cachePath, 'ClassifierForests/Forest{:04d}'.format(j))

                # Open the temp file and copy to our project group
                with h5py.File(cachePath, 'r') as cacheFile:
                    grouppath = "Classifiers/Classifier%d" % i
                    topGroup.copy(cacheFile['ClassifierForests'], grouppath)

                os.remove(cachePath)
                os.removedirs(tmpDir)
Ejemplo n.º 25
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        preproc = topGroup

        for opPre in self._o.innerOperators:
            mst = opPre._prepData[0]

            if mst is not None:

                # The values to be saved for sigma and filter are the
                # values of the last valid preprocess
                #!These may differ from the current settings!

                deleteIfPresent(preproc, "sigma")
                deleteIfPresent(preproc, "filter")
                deleteIfPresent(preproc, "graph")

                preproc.create_dataset("sigma", data=opPre.initialSigma)
                preproc.create_dataset("filter", data=opPre.initialFilter)

                preprocgraph = getOrCreateGroup(preproc, "graph")
                mst.saveH5G(preprocgraph)

            opPre._unsavedData = False
Ejemplo n.º 26
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        preproc = topGroup

        for opPre in self._o.innerOperators:
            mst = opPre._prepData[0]

            if mst is not None:

                # The values to be saved for sigma and filter are the
                # values of the last valid preprocess
                #!These may differ from the current settings!

                deleteIfPresent(preproc, "sigma")
                deleteIfPresent(preproc, "filter")
                deleteIfPresent(preproc, "graph")

                preproc.create_dataset("sigma", data=opPre.initialSigma)
                preproc.create_dataset("filter", data=opPre.initialFilter)

                preprocgraph = getOrCreateGroup(preproc, "graph")
                mst.saveH5G(preprocgraph)

            opPre._unsavedData = False
    def _serializePredictions(self, topGroup, startProgress, endProgress):
        """
        Called when the currently stored predictions are dirty.
        If prediction storage is currently enabled, store them to the file.
        Otherwise, just delete them/
        (Avoid inconsistent project states, e.g. don't allow old predictions to be stored with a new classifier.)
        """
        with Tracer(traceLogger):
            # If the predictions are missing, then maybe the user wants them stored (even if they aren't dirty)
            if self._dirtyFlags[Section.Predictions] or 'Pdigital signal processing bookredictions' not in topGroup.keys():

                deleteIfPresent(topGroup, 'Predictions')
                
                # Disconnect the precomputed prediction inputs.
                for i,slot in enumerate( self.mainOperator.PredictionsFromDisk ):
                    slot.disconnect()

                if self.predictionStorageEnabled:
                    predictionDir = topGroup.create_group('Predictions')

                    failedToSave = False
                    try:                    
                        numImages = len(self.mainOperator.PredictionProbabilities)
        
                        if numImages > 0:
                            increment = (endProgress - startProgress) / float(numImages)
        
                        for imageIndex in range(numImages):
                            # Have we been cancelled?
                            if not self.predictionStorageEnabled:
                                break
        
                            datasetName = 'predictions{:04d}'.format(imageIndex)
        
                            progress = [startProgress]
        
                            # Use a big dataset writer to do this in chunks
                            opWriter = OpH5WriterBigDataset(graph=self.mainOperator.graph)
                            opWriter.hdf5File.setValue( predictionDir )
                            opWriter.hdf5Path.setValue( datasetName )
                            opWriter.Image.connect( self.mainOperator.PredictionProbabilities[imageIndex] )
                            
                            # Create the request
                            self._predictionStorageRequest = opWriter.WriteImage[...]
        
                            def handleProgress(percent):
                                # Stop sending progress if we were cancelled
                                if self.predictionStorageEnabled:
                                    progress[0] = startProgress + percent * (increment / 100.0)
                                    self.progressSignal.emit( progress[0] )
                            opWriter.progressSignal.subscribe( handleProgress )
        
                            finishedEvent = threading.Event()
                            def handleFinish(request):
                                finishedEvent.set()
        
                            def handleCancel(request):
                                self._predictionStorageRequest = None
                                finishedEvent.set()
        
                            # Trigger the write and wait for it to complete or cancel.
                            self._predictionStorageRequest.notify(handleFinish)
                            self._predictionStorageRequest.onCancel(handleCancel)
                            finishedEvent.wait()
                    except:
                        failedToSave = True
                        raise
                    finally:
                        # If we were cancelled, delete the predictions we just started
                        if not self.predictionStorageEnabled or failedToSave:
                            deleteIfPresent(predictionDir, datasetName)
                            self._predictionsPresent = False
                            startProgress = progress[0]
                        else:
                            # Re-load the operator with the prediction groups we just saved
                            self._deserializePredictions(topGroup)
Ejemplo n.º 28
0
         shell.openProjectFile(projectFilename)
     
     workflowKwargs={'hintoverlayFile' : options.hintoverlayFile,
                     'pmapoverlayFile' : options.pmapoverlayFile }
     startShellGui( functools.partial(CarvingWorkflow, **workflowKwargs), loadProject)
     
 elif len(args) == 2:
     
     carvingGraphFilename = os.path.abspath(args[0]).replace("\\","/")
     projectFilename = args[1]
     
     projectFile = h5py.File(projectFilename)
     
     preproc = getOrCreateGroup(projectFile,"preprocessing")
     
     deleteIfPresent(preproc, "sigma")
     deleteIfPresent(preproc, "filter")
     deleteIfPresent(preproc, "StorageVersion")
     deleteIfPresent(preproc, "graph")
     deleteIfPresent(preproc, "graphfile")
     
     preproc.create_dataset("sigma",data= 1.6)
     preproc.create_dataset("filter",data= 0)
     preproc.create_dataset("graphfile",data = carvingGraphFilename)
     preproc.create_dataset("StorageVersion",data = 0.1)
     
     preproc = getOrCreateGroup(projectFile,"preprocessing")
     dataSelect = getOrCreateGroup(projectFile,"Input Data")
     dataInfo = getOrCreateGroup(dataSelect,"infos")
     dataThisInfo = getOrCreateGroup(dataInfo,"info0000")
     deleteIfPresent(dataThisInfo,"filePath")
Ejemplo n.º 29
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        obj = getOrCreateGroup(topGroup, "objects")
        for imageIndex, opCarving in enumerate(self._o.innerOperators):
            mst = opCarving._mst
            for name in opCarving._dirtyObjects:
                print "[CarvingSerializer] serializing %s" % name

                if name in obj and name in mst.object_seeds_fg_voxels:
                    # group already exists
                    print "  -> changed"
                elif name not in mst.object_seeds_fg_voxels:
                    print "  -> deleted"
                else:
                    print "  -> added"

                g = getOrCreateGroup(obj, name)
                deleteIfPresent(g, "fg_voxels")
                deleteIfPresent(g, "bg_voxels")
                deleteIfPresent(g, "sv")
                deleteIfPresent(g, "bg_prio")
                deleteIfPresent(g, "no_bias_below")

                if not name in mst.object_seeds_fg_voxels:
                    # this object was deleted
                    deleteIfPresent(obj, name)
                    continue

                v = mst.object_seeds_fg_voxels[name]
                v = [v[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("fg_voxels", data=v)
                v = mst.object_seeds_bg_voxels[name]
                v = [v[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("bg_voxels", data=v)
                g.create_dataset("sv", data=mst.object_lut[name])

                d1 = numpy.asarray(mst.bg_priority[name], dtype=numpy.float32)
                d2 = numpy.asarray(mst.no_bias_below[name], dtype=numpy.int32)
                g.create_dataset("bg_prio", data=d1)
                g.create_dataset("no_bias_below", data=d2)

            opCarving._dirtyObjects = set()

            # save current seeds
            deleteIfPresent(topGroup, "fg_voxels")
            deleteIfPresent(topGroup, "bg_voxels")

            fg_voxels, bg_voxels = opCarving.get_label_voxels()
            if fg_voxels is None:
                return

            if fg_voxels[0].shape[0] > 0:
                v = [fg_voxels[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("fg_voxels", data=v)

            if bg_voxels[0].shape[0] > 0:
                v = [bg_voxels[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("bg_voxels", data=v)

            print "saved seeds"
Ejemplo n.º 30
0
    def _serialize(self, group, name, slot):
        """Called when the currently stored predictions are dirty. If
        prediction storage is currently enabled, store them to the
        file. Otherwise, just delete them/

        (Avoid inconsistent project states, e.g. don't allow old
        predictions to be stored with a new classifier.)

        """
        predictionDir = group.create_group(self.name)

        # Disconnect the operators that might be using the old data.
        self.deserialize(group)
        
        failedToSave = False
        opWriter = None
        try:
            num = len(slot)
            if num > 0:
                increment = 100 / float(num)

            progress = 0
            for imageIndex in range(num):
                # Have we been cancelled?
                if not self.predictionStorageEnabled:
                    break

                datasetName = self.subname.format(imageIndex)

                # Use a big dataset writer to do this in chunks
                opWriter = OpH5WriterBigDataset(graph=self.operator.graph, parent = self.operator.parent)
                opWriter.hdf5File.setValue(predictionDir)
                opWriter.hdf5Path.setValue(datasetName)
                opWriter.Image.connect(slot[imageIndex])

                def handleProgress(percent):
                    # Stop sending progress if we were cancelled
                    if self.predictionStorageEnabled:
                        curprogress = progress + percent * (increment / 100.0)
                        self.progressSignal.emit(curprogress)
                opWriter.progressSignal.subscribe(handleProgress)

                # Create the request
                self._predictionStorageRequest = opWriter.WriteImage[...]

                # Must use a threading event here because if we wait on the 
                # request from within a "real" thread, it refuses to be cancelled.
                finishedEvent = threading.Event()
                def handleFinish(result):
                    finishedEvent.set()

                def handleCancel():
                    logger.info("Full volume prediction save CANCELLED.")
                    self._predictionStorageRequest = None
                    finishedEvent.set()

                # Trigger the write and wait for it to complete or cancel.
                self._predictionStorageRequest.notify_finished(handleFinish)
                self._predictionStorageRequest.notify_cancelled(handleCancel)
                self._predictionStorageRequest.submit() # Can't call wait().  See note above.
                finishedEvent.wait()
                progress += increment
                opWriter.cleanUp()
                opWriter = None
        except:
            failedToSave = True
            raise
        finally:
            if opWriter is not None:
                opWriter.cleanUp()

            # If we were cancelled, delete the predictions we just started
            if not self.predictionStorageEnabled or failedToSave:
                deleteIfPresent(group, name)
Ejemplo n.º 31
0
    def _serialize(self, group, name, slot):
        """Called when the currently stored predictions are dirty. If
        prediction storage is currently enabled, store them to the
        file. Otherwise, just delete them/

        (Avoid inconsistent project states, e.g. don't allow old
        predictions to be stored with a new classifier.)

        """
        predictionDir = group.create_group(self.name)

        # Disconnect the operators that might be using the old data.
        self.deserialize(group)

        failedToSave = False
        opWriter = None
        try:
            num = len(slot)
            if num > 0:
                increment = 100 / float(num)

            progress = 0
            for imageIndex in range(num):
                # Have we been cancelled?
                if not self.predictionStorageEnabled:
                    break

                datasetName = self.subname.format(imageIndex)

                # Use a big dataset writer to do this in chunks
                opWriter = OpH5WriterBigDataset(graph=self.operator.graph,
                                                parent=self.operator.parent)
                opWriter.hdf5File.setValue(predictionDir)
                opWriter.hdf5Path.setValue(datasetName)
                opWriter.Image.connect(slot[imageIndex])

                def handleProgress(percent):
                    # Stop sending progress if we were cancelled
                    if self.predictionStorageEnabled:
                        curprogress = progress + percent * (increment / 100.0)
                        self.progressSignal(curprogress)

                opWriter.progressSignal.subscribe(handleProgress)

                # Create the request
                self._predictionStorageRequest = opWriter.WriteImage[...]

                # Must use a threading event here because if we wait on the
                # request from within a "real" thread, it refuses to be cancelled.
                finishedEvent = threading.Event()

                def handleFinish(result):
                    finishedEvent.set()

                def handleCancel():
                    logger.info("Full volume prediction save CANCELLED.")
                    self._predictionStorageRequest = None
                    finishedEvent.set()

                # Trigger the write and wait for it to complete or cancel.
                self._predictionStorageRequest.notify_finished(handleFinish)
                self._predictionStorageRequest.notify_cancelled(handleCancel)
                self._predictionStorageRequest.submit(
                )  # Can't call wait().  See note above.
                finishedEvent.wait()
                progress += increment
                opWriter.cleanUp()
                opWriter = None
        except:
            failedToSave = True
            raise
        finally:
            if opWriter is not None:
                opWriter.cleanUp()

            # If we were cancelled, delete the predictions we just started
            if not self.predictionStorageEnabled or failedToSave:
                deleteIfPresent(group, name)
Ejemplo n.º 32
0
    def test_deleteIfPresent_2(self):
        self.assertTrue("b" not in self.tmpFile)

        deleteIfPresent(self.tmpFile, "b")

        self.assertTrue("b" not in self.tmpFile)
Ejemplo n.º 33
0
         shell.openProjectFile(projectFilename)
     
     workflowKwargs={'hintoverlayFile' : options.hintoverlayFile,
                     'pmapoverlayFile' : options.pmapoverlayFile }
     startShellGui( functools.partial(CarvingWorkflow, **workflowKwargs), loadProject)
     
 elif len(args) == 2:
     
     carvingGraphFilename = os.path.abspath(args[0]).replace("\\","/")
     projectFilename = args[1]
     
     projectFile = h5py.File(projectFilename)
     
     preproc = getOrCreateGroup(projectFile,"preprocessing")
     
     deleteIfPresent(preproc, "sigma")
     deleteIfPresent(preproc, "filter")
     deleteIfPresent(preproc, "StorageVersion")
     deleteIfPresent(preproc, "graph")
     deleteIfPresent(preproc, "graphfile")
     
     preproc.create_dataset("sigma",data= 1.6)
     preproc.create_dataset("filter",data= 0)
     preproc.create_dataset("graphfile",data = carvingGraphFilename)
     preproc.create_dataset("StorageVersion",data = 0.1)
     
     preproc = getOrCreateGroup(projectFile,"preprocessing")
     dataSelect = getOrCreateGroup(projectFile,"Input Data")
     dataInfo = getOrCreateGroup(dataSelect,"infos")
     dataThisInfo = getOrCreateGroup(dataInfo,"info0000")
     deleteIfPresent(dataThisInfo,"filePath")
Ejemplo n.º 34
0
    def test_deleteIfPresent_1(self):
        self.assertTrue("a" in self.tmpFile)

        deleteIfPresent(self.tmpFile, "a")

        self.assertTrue("a" not in self.tmpFile)
Ejemplo n.º 35
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        obj = getOrCreateGroup(topGroup, "objects")
        for imageIndex, opCarving in enumerate(self._o.innerOperators):
            mst = opCarving._mst

            if mst is None:
                # Nothing to save
                return
            # Populate a list of objects to save:
            objects_to_save = set(list(mst.object_names.keys()))
            objects_already_saved = set(list(topGroup["objects"]))
            # 1.) all objects that are in mst.object_names that are not in saved
            objects_to_save = objects_to_save.difference(objects_already_saved)

            # 2.) add opCarving._dirtyObjects:
            objects_to_save = objects_to_save.union(opCarving._dirtyObjects)

            for name in objects_to_save:
                logger.info("[CarvingSerializer] serializing %s" % name)

                if name in obj and name in mst.object_seeds_fg_voxels:
                    # group already exists
                    logger.info("  -> changed")
                elif name not in mst.object_seeds_fg_voxels:
                    logger.info("  -> deleted")
                else:
                    logger.info("  -> added")

                if name not in mst.object_seeds_fg_voxels:
                    # this object was deleted
                    deleteIfPresent(obj, name)
                    continue

                g = getOrCreateGroup(obj, name)
                deleteIfPresent(g, "fg_voxels")
                deleteIfPresent(g, "bg_voxels")
                deleteIfPresent(g, "sv")
                deleteIfPresent(g, "bg_prio")
                deleteIfPresent(g, "no_bias_below")

                v = mst.object_seeds_fg_voxels[name]
                v = [v[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("fg_voxels", data=v)
                v = mst.object_seeds_bg_voxels[name]
                v = [v[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("bg_voxels", data=v)
                g.create_dataset("sv", data=mst.object_lut[name])

                d1 = numpy.asarray(mst.bg_priority[name], dtype=numpy.float32)
                d2 = numpy.asarray(mst.no_bias_below[name], dtype=numpy.int32)
                g.create_dataset("bg_prio", data=d1)
                g.create_dataset("no_bias_below", data=d2)

            opCarving._dirtyObjects = set()

            # save current seeds
            deleteIfPresent(topGroup, "fg_voxels")
            deleteIfPresent(topGroup, "bg_voxels")

            fg_voxels, bg_voxels = opCarving.get_label_voxels()
            if fg_voxels is None:
                return

            if fg_voxels[0].shape[0] > 0:
                v = [fg_voxels[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("fg_voxels", data=v)

            if bg_voxels[0].shape[0] > 0:
                v = [bg_voxels[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("bg_voxels", data=v)

            logger.info("saved seeds")
Ejemplo n.º 36
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        obj = getOrCreateGroup(topGroup, "objects")
        for imageIndex, opCarving in enumerate( self._o.innerOperators ):
            mst = opCarving._mst

            # Populate a list of objects to save:
            objects_to_save = set(list(mst.object_names.keys()))
            objects_already_saved = set(list(topGroup["objects"]))
            # 1.) all objects that are in mst.object_names that are not in saved
            objects_to_save = objects_to_save.difference(objects_already_saved)

            # 2.) add opCarving._dirtyObjects:
            objects_to_save = objects_to_save.union(opCarving._dirtyObjects)

            for name in objects_to_save:
                logger.info( "[CarvingSerializer] serializing %s" % name )
               
                if name in obj and name in mst.object_seeds_fg_voxels: 
                    #group already exists
                    logger.info( "  -> changed" )
                elif name not in mst.object_seeds_fg_voxels:
                    logger.info( "  -> deleted" )
                else:
                    logger.info( "  -> added" )
                    
                g = getOrCreateGroup(obj, name)
                deleteIfPresent(g, "fg_voxels")
                deleteIfPresent(g, "bg_voxels")
                deleteIfPresent(g, "sv")
                deleteIfPresent(g, "bg_prio")
                deleteIfPresent(g, "no_bias_below")
                
                if not name in mst.object_seeds_fg_voxels:
                    #this object was deleted
                    deleteIfPresent(obj, name)
                    continue
               
                v = mst.object_seeds_fg_voxels[name]
                v = [v[i][:,numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("fg_voxels", data=v)
                v = mst.object_seeds_bg_voxels[name]
                v = [v[i][:,numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("bg_voxels", data=v)
                g.create_dataset("sv", data=mst.object_lut[name])
                
                d1 = numpy.asarray(mst.bg_priority[name], dtype=numpy.float32)
                d2 = numpy.asarray(mst.no_bias_below[name], dtype=numpy.int32)
                g.create_dataset("bg_prio", data=d1)
                g.create_dataset("no_bias_below", data=d2)
                
            opCarving._dirtyObjects = set()
        
            # save current seeds
            deleteIfPresent(topGroup, "fg_voxels")
            deleteIfPresent(topGroup, "bg_voxels")

            fg_voxels, bg_voxels = opCarving.get_label_voxels()
            if fg_voxels is None:
                return

            if fg_voxels[0].shape[0] > 0:
                v = [fg_voxels[i][:,numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("fg_voxels", data = v)

            if bg_voxels[0].shape[0] > 0:
                v = [bg_voxels[i][:,numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("bg_voxels", data = v)

            logger.info( "saved seeds" )
Ejemplo n.º 37
0
    def test_deleteIfPresent_1(self):
        self.assertTrue("a" in self.tmpFile)

        deleteIfPresent(self.tmpFile, "a")

        self.assertTrue("a" not in self.tmpFile)
    def _serializePredictions(self, topGroup, startProgress, endProgress):
        """
        Called when the currently stored predictions are dirty.
        If prediction storage is currently enabled, store them to the file.
        Otherwise, just delete them/
        (Avoid inconsistent project states, e.g. don't allow old predictions to be stored with a new classifier.)
        """
        with Tracer(traceLogger):
            # If the predictions are missing, then maybe the user wants them stored (even if they aren't dirty)
            if self._dirtyFlags[
                    Section.
                    Predictions] or 'Pdigital signal processing bookredictions' not in topGroup.keys(
                    ):

                deleteIfPresent(topGroup, 'Predictions')

                # Disconnect the precomputed prediction inputs.
                for i, slot in enumerate(
                        self.mainOperator.PredictionsFromDisk):
                    slot.disconnect()

                if self.predictionStorageEnabled:
                    predictionDir = topGroup.create_group('Predictions')

                    failedToSave = False
                    try:
                        numImages = len(
                            self.mainOperator.PredictionProbabilities)

                        if numImages > 0:
                            increment = (endProgress -
                                         startProgress) / float(numImages)

                        for imageIndex in range(numImages):
                            # Have we been cancelled?
                            if not self.predictionStorageEnabled:
                                break

                            datasetName = 'predictions{:04d}'.format(
                                imageIndex)

                            progress = [startProgress]

                            # Use a big dataset writer to do this in chunks
                            opWriter = OpH5WriterBigDataset(
                                graph=self.mainOperator.graph)
                            opWriter.hdf5File.setValue(predictionDir)
                            opWriter.hdf5Path.setValue(datasetName)
                            opWriter.Image.connect(
                                self.mainOperator.
                                PredictionProbabilities[imageIndex])

                            # Create the request
                            self._predictionStorageRequest = opWriter.WriteImage[
                                ...]

                            def handleProgress(percent):
                                # Stop sending progress if we were cancelled
                                if self.predictionStorageEnabled:
                                    progress[0] = startProgress + percent * (
                                        increment / 100.0)
                                    self.progressSignal.emit(progress[0])

                            opWriter.progressSignal.subscribe(handleProgress)

                            finishedEvent = threading.Event()

                            def handleFinish(request):
                                finishedEvent.set()

                            def handleCancel(request):
                                self._predictionStorageRequest = None
                                finishedEvent.set()

                            # Trigger the write and wait for it to complete or cancel.
                            self._predictionStorageRequest.notify(handleFinish)
                            self._predictionStorageRequest.onCancel(
                                handleCancel)
                            finishedEvent.wait()
                    except:
                        failedToSave = True
                        raise
                    finally:
                        # If we were cancelled, delete the predictions we just started
                        if not self.predictionStorageEnabled or failedToSave:
                            deleteIfPresent(predictionDir, datasetName)
                            self._predictionsPresent = False
                            startProgress = progress[0]
                        else:
                            # Re-load the operator with the prediction groups we just saved
                            self._deserializePredictions(topGroup)
Ejemplo n.º 39
0
    def test_deleteIfPresent_2(self):
        self.assertTrue("b" not in self.tmpFile)

        deleteIfPresent(self.tmpFile, "b")

        self.assertTrue("b" not in self.tmpFile)
Ejemplo n.º 40
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        # Write any missing local datasets to the local_data group
        localDataGroup = getOrCreateGroup(topGroup, 'local_data')
        wroteInternalData = False
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            for roleIndex, slot in enumerate( multislot ):
                if not slot.ready():
                    continue
                info = slot.value
                # If this dataset should be stored in the project, but it isn't there yet
                if  info.location == DatasetInfo.Location.ProjectInternal \
                and info.datasetId not in localDataGroup.keys():
                    # Obtain the data from the corresponding output and store it to the project.
                    dataSlot = self.topLevelOperator._NonTransposedImageGroup[laneIndex][roleIndex]

                    try:    
                        opWriter = OpH5WriterBigDataset(parent=self.topLevelOperator.parent, graph=self.topLevelOperator.graph)
                        opWriter.CompressionEnabled.setValue(False) # Compression slows down browsing a lot, and raw data tends to be noisy and doesn't compress very well, anyway.
                        opWriter.hdf5File.setValue( localDataGroup )
                        opWriter.hdf5Path.setValue( info.datasetId )
                        opWriter.Image.connect(dataSlot)
        
                        # Trigger the copy
                        success = opWriter.WriteImage.value
                        assert success
                    finally:
                        opWriter.cleanUp()
    
                    # Add axistags and drange attributes, in case someone uses this dataset outside ilastik
                    localDataGroup[info.datasetId].attrs['axistags'] = dataSlot.meta.axistags.toJSON()
                    if dataSlot.meta.drange is not None:
                        localDataGroup[info.datasetId].attrs['drange'] = dataSlot.meta.drange
    
                    # Make sure the dataSlot's axistags are updated with the dataset as we just wrote it
                    # (The top-level operator may use an OpReorderAxes, which changed the axisorder)
                    info.axistags = dataSlot.meta.axistags
    
                    wroteInternalData = True

        # Construct a list of all the local dataset ids we want to keep
        localDatasetIds = set()
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            for roleIndex, slot in enumerate(multislot):
                if slot.ready() and slot.value.location == DatasetInfo.Location.ProjectInternal:
                    localDatasetIds.add( slot.value.datasetId )
        
        # Delete any datasets in the project that aren't needed any more
        for datasetName in localDataGroup.keys():
            if datasetName not in localDatasetIds:
                del localDataGroup[datasetName]

        if wroteInternalData:
            # We can only re-configure the operator if we're not saving a snapshot
            # We know we're saving a snapshot if the project file isn't the one we deserialized with.
            if self._projectFilePath is None or self._projectFilePath == projectFilePath:
                # Force the operator to setupOutputs() again so it gets data from the project, not external files
                firstInfo = self.topLevelOperator.DatasetGroup[0][0].value
                self.topLevelOperator.DatasetGroup[0][0].setValue(firstInfo, check_changed=False)

        deleteIfPresent(topGroup, 'Role Names')
        topGroup.create_dataset('Role Names', data=self.topLevelOperator.DatasetRoles.value)

        # Access the info group
        infoDir = getOrCreateGroup(topGroup, 'infos')
        
        # Delete all infos
        for infoName in infoDir.keys():
            del infoDir[infoName]
                
        # Rebuild the list of infos
        roleNames = self.topLevelOperator.DatasetRoles.value
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            laneGroupName = 'lane{:04d}'.format(laneIndex)
            laneGroup = infoDir.create_group( laneGroupName )
            
            for roleIndex, slot in enumerate(multislot):
                infoGroup = laneGroup.create_group( roleNames[roleIndex] )
                if slot.ready():
                    datasetInfo = slot.value
                    locationString = self.LocationStrings[datasetInfo.location]
                    infoGroup.create_dataset('location', data=locationString)
                    infoGroup.create_dataset('filePath', data=datasetInfo.filePath)
                    infoGroup.create_dataset('datasetId', data=datasetInfo.datasetId)
                    infoGroup.create_dataset('allowLabels', data=datasetInfo.allowLabels)
                    infoGroup.create_dataset('nickname', data=datasetInfo.nickname)
                    infoGroup.create_dataset('fromstack', data=datasetInfo.fromstack)
                    if datasetInfo.drange is not None:
                        infoGroup.create_dataset('drange', data=datasetInfo.drange)

                    # Pull the axistags from the NonTransposedImage, 
                    #  which is what the image looks like before 'forceAxisOrder' is applied, 
                    #  and before 'c' is automatically appended
                    axistags = self.topLevelOperator._NonTransposedImageGroup[laneIndex][roleIndex].meta.axistags
                    infoGroup.create_dataset('axistags', data=axistags.toJSON())
                    axisorder = "".join(tag.key for tag in axistags)
                    infoGroup.create_dataset('axisorder', data=axisorder)
                    if datasetInfo.subvolume_roi is not None:
                        infoGroup.create_dataset('subvolume_roi', data=datasetInfo.subvolume_roi)

        self._dirty = False
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        # Write any missing local datasets to the local_data group
        localDataGroup = getOrCreateGroup(topGroup, 'local_data')
        wroteInternalData = False
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            for roleIndex, slot in enumerate( multislot ):
                if not slot.ready():
                    continue
                info = slot.value
                # If this dataset should be stored in the project, but it isn't there yet
                if  info.location == DatasetInfo.Location.ProjectInternal \
                and info.datasetId not in localDataGroup.keys():
                    # Obtain the data from the corresponding output and store it to the project.
                    dataSlot = self.topLevelOperator._NonTransposedImageGroup[laneIndex][roleIndex]

                    try:    
                        opWriter = OpH5WriterBigDataset(parent=self.topLevelOperator.parent, graph=self.topLevelOperator.graph)
                        opWriter.CompressionEnabled.setValue(False) # Compression slows down browsing a lot, and raw data tends to be noisy and doesn't compress very well, anyway.
                        opWriter.hdf5File.setValue( localDataGroup )
                        opWriter.hdf5Path.setValue( info.datasetId )
                        opWriter.Image.connect(dataSlot)
        
                        # Trigger the copy
                        success = opWriter.WriteImage.value
                        assert success
                    finally:
                        opWriter.cleanUp()
    
                    # Add axistags and drange attributes, in case someone uses this dataset outside ilastik
                    localDataGroup[info.datasetId].attrs['axistags'] = dataSlot.meta.axistags.toJSON()
                    if dataSlot.meta.drange is not None:
                        localDataGroup[info.datasetId].attrs['drange'] = dataSlot.meta.drange
    
                    # Make sure the dataSlot's axistags are updated with the dataset as we just wrote it
                    # (The top-level operator may use an OpReorderAxes, which changed the axisorder)
                    info.axistags = dataSlot.meta.axistags
    
                    wroteInternalData = True

        # Construct a list of all the local dataset ids we want to keep
        localDatasetIds = set()
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            for roleIndex, slot in enumerate(multislot):
                if slot.ready() and slot.value.location == DatasetInfo.Location.ProjectInternal:
                    localDatasetIds.add( slot.value.datasetId )
        
        # Delete any datasets in the project that aren't needed any more
        for datasetName in localDataGroup.keys():
            if datasetName not in localDatasetIds:
                del localDataGroup[datasetName]

        if wroteInternalData:
            # We can only re-configure the operator if we're not saving a snapshot
            # We know we're saving a snapshot if the project file isn't the one we deserialized with.
            if self._projectFilePath is None or self._projectFilePath == projectFilePath:
                # Force the operator to setupOutputs() again so it gets data from the project, not external files
                firstInfo = self.topLevelOperator.DatasetGroup[0][0].value
                self.topLevelOperator.DatasetGroup[0][0].setValue(firstInfo, check_changed=False)

        deleteIfPresent(topGroup, 'Role Names')
        topGroup.create_dataset('Role Names', data=self.topLevelOperator.DatasetRoles.value)

        # Access the info group
        infoDir = getOrCreateGroup(topGroup, 'infos')
        
        # Delete all infos
        for infoName in infoDir.keys():
            del infoDir[infoName]
                
        # Rebuild the list of infos
        roleNames = self.topLevelOperator.DatasetRoles.value
        for laneIndex, multislot in enumerate(self.topLevelOperator.DatasetGroup):
            laneGroupName = 'lane{:04d}'.format(laneIndex)
            laneGroup = infoDir.create_group( laneGroupName )
            
            for roleIndex, slot in enumerate(multislot):
                infoGroup = laneGroup.create_group( roleNames[roleIndex] )
                if slot.ready():
                    datasetInfo = slot.value
                    locationString = self.LocationStrings[datasetInfo.location]
                    infoGroup.create_dataset('location', data=locationString)
                    infoGroup.create_dataset('filePath', data=datasetInfo.filePath)
                    infoGroup.create_dataset('datasetId', data=datasetInfo.datasetId)
                    infoGroup.create_dataset('allowLabels', data=datasetInfo.allowLabels)
                    infoGroup.create_dataset('nickname', data=datasetInfo.nickname)
                    if datasetInfo.drange is not None:
                        infoGroup.create_dataset('drange', data=datasetInfo.drange)
                    if datasetInfo.axistags is not None:
                        infoGroup.create_dataset('axistags', data=datasetInfo.axistags.toJSON())
                        axisorder = "".join(tag.key for tag in datasetInfo.axistags)
                        infoGroup.create_dataset('axisorder', data=axisorder)

        self._dirty = False
Ejemplo n.º 42
0
    def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
        obj = getOrCreateGroup(topGroup, "objects")
        for imageIndex, opCarving in enumerate(self._o.innerOperators):
            mst = opCarving._mst
            for name in opCarving._dirtyObjects:
                print "[CarvingSerializer] serializing %s" % name

                if name in obj and name in mst.object_seeds_fg_voxels:
                    #group already exists
                    print "  -> changed"
                elif name not in mst.object_seeds_fg_voxels:
                    print "  -> deleted"
                else:
                    print "  -> added"

                g = getOrCreateGroup(obj, name)
                deleteIfPresent(g, "fg_voxels")
                deleteIfPresent(g, "bg_voxels")
                deleteIfPresent(g, "sv")
                deleteIfPresent(g, "bg_prio")
                deleteIfPresent(g, "no_bias_below")

                if not name in mst.object_seeds_fg_voxels:
                    #this object was deleted
                    deleteIfPresent(obj, name)
                    continue

                v = mst.object_seeds_fg_voxels[name]
                v = [v[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("fg_voxels", data=v)
                v = mst.object_seeds_bg_voxels[name]
                v = [v[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                g.create_dataset("bg_voxels", data=v)
                g.create_dataset("sv", data=mst.object_lut[name])

                d1 = numpy.asarray(mst.bg_priority[name], dtype=numpy.float32)
                d2 = numpy.asarray(mst.no_bias_below[name], dtype=numpy.int32)
                g.create_dataset("bg_prio", data=d1)
                g.create_dataset("no_bias_below", data=d2)

            opCarving._dirtyObjects = set()

            # save current seeds
            deleteIfPresent(topGroup, "fg_voxels")
            deleteIfPresent(topGroup, "bg_voxels")

            fg_voxels, bg_voxels = opCarving.get_label_voxels()
            if fg_voxels is None:
                return

            if fg_voxels[0].shape[0] > 0:
                v = [fg_voxels[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("fg_voxels", data=v)

            if bg_voxels[0].shape[0] > 0:
                v = [bg_voxels[i][:, numpy.newaxis] for i in range(3)]
                v = numpy.concatenate(v, axis=1)
                topGroup.create_dataset("bg_voxels", data=v)

            print "saved seeds"