Example #1
0
    def _checkDataConstraint(self, *args):
        """
        The batch workflow uses the same classifier as the training workflow,
         and therefore the batch datasets must be compatible with the training datasets in certain respects.
        This function tests those constraints by comparing the batch input against a (arbitrary) training dataset,
        and raises a DatasetConstraintError if something is incorrect.
        """
        if not self.ConstraintDataset.ready() or not self.RawData.ready():
            return

        trainingMeta = self.ConstraintDataset.meta
        batchMeta = self.RawData.meta

        # Must have same number of channels
        training_channels = trainingMeta.getTaggedShape()['c']
        batch_channels = batchMeta.getTaggedShape()['c']
        if training_channels != batch_channels:
            raise DatasetConstraintError("Batch Prediction Input",
                                         "Batch input must have the same number of channels as training input."\
                                         "(Batch has {} channels, but training input used {}"\
                                         "".format( batch_channels, training_channels ))

        # Other than time, Must have same set of axes (but not necessarily in the same order)
        training_axes = set(trainingMeta.getAxisKeys())
        batch_axes = set(batchMeta.getAxisKeys())
        training_axes.discard('t')
        batch_axes.discard('t')

        if training_axes != batch_axes:
            raise DatasetConstraintError("Batch Prediction Input",
                                         "Batch input file does not have the same spatial input axes as the training input:"\
                                         "has {}, expected {}".format( batch_axes, training_axes ))
    def _checkConstraints(self, laneIndex):
        """
        Ensure that all input images have the same number of channels.
        """
        thisLaneTaggedShape = self.InputImages[laneIndex].meta.getTaggedShape()

        # Find a different lane and use it for comparison
        validShape = thisLaneTaggedShape
        for i, slot in enumerate(self.InputImages):
            if slot.ready() and i != laneIndex:
                validShape = slot.meta.getTaggedShape()
                break

        if validShape['c'] != thisLaneTaggedShape['c']:
            raise DatasetConstraintError(
                 "Pixel Classification",
                 "All input images must have the same number of channels.  "\
                 "Your new image has {} channel(s), but your other images have {} channel(s)."\
                 .format( thisLaneTaggedShape['c'], validShape['c'] ) )

        if len(validShape) != len(thisLaneTaggedShape):
            raise DatasetConstraintError(
                 "Pixel Classification",
                 "All input images must have the same dimensionality.  "\
                 "Your new image has {} dimensions (including channel), but your other images have {} dimensions."\
                 .format( len(thisLaneTaggedShape), len(validShape) ) )
Example #3
0
    def _checkDataConstraint(self, *args):
        """
        The batch workflow uses the same classifier as the training workflow,
         and therefore the batch datasets must be compatible with the training datasets in certain respects.
        This function tests those constraints by comparing the batch input against a (arbitrary) training dataset,
        and raises a DatasetConstraintError if something is incorrect.
        """
        if not self.ConstraintDataset.ready() or not self.RawData.ready():
            return

        dataTrain = self.ConstraintDataset.meta
        dataBatch = self.RawData.meta

        # Must have same dimensionality (but not necessarily the same shape)
        if len(dataTrain.shape) != len(dataBatch.shape):
            raise DatasetConstraintError(
                "Batch Prediction Input",
                "Batch input must have the same dimension as training input.")

        # Must have same number of channels
        if dataTrain.getTaggedShape()['c'] != dataBatch.getTaggedShape()['c']:
            raise DatasetConstraintError(
                "Batch Prediction Input",
                "Batch input must have the same number of channels as training input."
            )

        # Must have same set of axes (but not necessarily in the same order)
        if set(dataTrain.getAxisKeys()) != set(dataBatch.getAxisKeys()):
            raise DatasetConstraintError(
                "Batch Prediction Input",
                "Batch input axis must fit axis of training input.")
    def _checkConstraints(self, *args):
        if self.RawImage.ready():
            rawTaggedShape = self.RawImage.meta.getTaggedShape()
            if rawTaggedShape['t'] < 2:
                raise DatasetConstraintError(
                    "Tracking",
                    "For tracking, the dataset must have a time axis with at least 2 images.   " \
                    "Please load time-series data instead. See user documentation for details.")

        if self.LabelImage.ready():
            segmentationTaggedShape = self.LabelImage.meta.getTaggedShape()
            if segmentationTaggedShape['t'] < 2:
                raise DatasetConstraintError(
                    "Tracking",
                    "For tracking, the dataset must have a time axis with at least 2 images.   " \
                    "Please load time-series data instead. See user documentation for details.")

        if self.RawImage.ready() and self.LabelImage.ready():
            rawTaggedShape['c'] = None
            segmentationTaggedShape['c'] = None
            if dict(rawTaggedShape) != dict(segmentationTaggedShape):
                raise DatasetConstraintError("Tracking",
                                             "For tracking, the raw data and the prediction maps must contain the same " \
                                             "number of timesteps and the same shape.   " \
                                             "Your raw image has a shape of (t, x, y, z, c) = {}, whereas your prediction image has a " \
                                             "shape of (t, x, y, z, c) = {}" \
                                             .format(self.RawImage.meta.shape, self.BinaryImage.meta.shape))
Example #5
0
    def _checkConstraints(self, *args):
        slot = self.InputData
        numChannels = slot.meta.getTaggedShape()['c']
        if numChannels != 1:
            raise DatasetConstraintError(
                "Carving", "Input image must have exactly one channel.  " +
                "You attempted to add a dataset with {} channels".format(
                    numChannels))

        sh = slot.meta.shape
        ax = slot.meta.axistags
        if len(slot.meta.shape) != 5:
            # Raise a regular exception.  This error is for developers, not users.
            raise RuntimeError("was expecting a 5D dataset, got shape=%r" %
                               (sh, ))
        if slot.meta.getTaggedShape()['t'] != 1:
            raise DatasetConstraintError(
                "Carving",
                "Input image must not have more than one time slice.  " +
                "You attempted to add a dataset with {} time slices".format(
                    slot.meta.getTaggedShape()['t']))

        for i in range(1, 4):
            if not ax[i].isSpatial():
                # This is for developers.  Don't need a user-friendly error.
                raise RuntimeError("%d-th axis %r is not spatial" % (i, ax[i]))
    def _checkConstraints(self, *args):
        slot = self.Input

        sh = slot.meta.shape
        ax = slot.meta.axistags
        if (len(slot.meta.shape) != 3) and (len(slot.meta.shape) != 4):
            # Raise a regular exception.  This error is for developers, not users.
            raise RuntimeError(
                "was expecting a 3D or 4D dataset, got shape=%r" % (sh, ))

        if "t" in slot.meta.getTaggedShape():
            raise DatasetConstraintError("PostprocessData",
                                         "Input must not have time.")

        if "c" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError("PostprocessData",
                                         "Input must have channel.")

        if "y" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError("PostprocessData",
                                         "Input must have space dim y.")

        if "x" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError("PostprocessData",
                                         "Input must have space dim x.")

        if not ax[0].isChannel():
            raise DatasetConstraintError(
                "PostprocessData", "Input image must have channel first.")

        for i in range(1, len(ax)):
            if not ax[i].isSpatial():
                # This is for developers.  Don't need a user-friendly error.
                raise RuntimeError("%d-th axis %r is not spatial" % (i, ax[i]))
Example #7
0
    def _checkConstraints(self, laneIndex):
        """
        Ensure that all input images must be 2D and have the same number of channels
        """

        thisLaneTaggedShape = self.InputImages[laneIndex].meta.getTaggedShape()

        if thisLaneTaggedShape.has_key('z'):
            raise DatasetConstraintError(
                "Objects Counting Workflow",
                "All input images must be 2D (they cannot contain the z dimension).  "\
                "Your new image has {} has z dimension"\
                .format( thisLaneTaggedShape['z']))
            # Find a different lane and use it for comparison

        validShape = thisLaneTaggedShape
        for i, slot in enumerate(self.InputImages):
            if slot.ready() and i != laneIndex:
                validShape = slot.meta.getTaggedShape()
                break

        if len(validShape) != len(thisLaneTaggedShape):
            raise DatasetConstraintError(
                 "Objects Couting Workflow Counting",
                 "All input images must have the same dimensionality.  "\
                 "Your new image has {} dimensions (including channel), but your other images have {} dimensions."\
                 .format( len(thisLaneTaggedShape), len(validShape) ) )

        if validShape['c'] != thisLaneTaggedShape['c']:
            raise DatasetConstraintError(
                 "Objects Counting Workflow",
                 "All input images must have the same number of channels.  "\
                 "Your new image has {} channel(s), but your other images have {} channel(s)."\
                 .format( thisLaneTaggedShape['c'], validShape['c'] ) )
Example #8
0
    def checkConstraints(self, *args):
        """
        Example of how to check input data constraints.
        """
        if self.OtherInput.ready() and self.RawInput.ready():
            rawTaggedShape = self.RawInput.meta.getTaggedShape()
            for other_slot in self.OtherInput:
                otherTaggedShape = other_slot.meta.getTaggedShape()
                raw_time_size = rawTaggedShape.get("t", 1)
                other_time_size = otherTaggedShape.get("t", 1)
                if raw_time_size != other_time_size and raw_time_size != 1 and other_time_size != 1:
                    msg = (
                        "Your 'raw' and 'other' datasets appear to have differing sizes in the time dimension.\n"
                        "Your datasets have shapes: {} and {}".format(
                            self.RawInput.meta.shape, other_slot.meta.shape))
                    raise DatasetConstraintError("Layer Viewer", msg)

                rawTaggedShape["c"] = None
                otherTaggedShape["c"] = None
                rawTaggedShape["t"] = None
                otherTaggedShape["t"] = None
                if dict(rawTaggedShape) != dict(otherTaggedShape):
                    msg = (
                        "Raw data and other data must have equal spatial dimensions (different channels are okay).\n"
                        "Your datasets have shapes: {} and {}".format(
                            self.RawInput.meta.shape, other_slot.meta.shape))
                    raise DatasetConstraintError("Layer Viewer", msg)
    def checkConstraints(self, *args):
        tagged_shape = self.InputImage.meta.getTaggedShape()
        if 't' in tagged_shape:
            raise DatasetConstraintError(
                 "IIBoost Pixel Classification: Feature Selection",
                 "This classifier handles only 3D data. Your input data has a time dimension, which is not allowed.")

        if not set('xyz').issubset(tagged_shape.keys()):
            raise DatasetConstraintError(
                 "IIBoost Pixel Classification: Feature Selection",
                 "This classifier handles only 3D data. Your input data does not have all three spatial dimensions (xyz).")
    def setupOutputs(self):
        # Check for preconditions.
        if self.RawImage.ready() and self.BinaryImage.ready():
            rawTaggedShape = self.RawImage.meta.getTaggedShape()
            binTaggedShape = self.BinaryImage.meta.getTaggedShape()
            rawTaggedShape['c'] = None
            binTaggedShape['c'] = None
            if dict(rawTaggedShape) != dict(binTaggedShape):
                msg = "Raw data and other data must have equal dimensions (different channels are okay).\n"\
                      "Your datasets have shapes: {} and {}".format( self.RawImage.meta.shape, self.BinaryImage.meta.shape )
                raise DatasetConstraintError("Layer Viewer", msg)

        self.PredictionImage.meta.assignFrom(self.RawImage.meta)
        self.PredictionImage.meta.dtype = numpy.uint8  # Ultimately determined by meta.mapping_dtype from OpRelabelSegmentation
        prediction_tagged_shape = self.RawImage.meta.getTaggedShape()
        prediction_tagged_shape['c'] = 1
        self.PredictionImage.meta.shape = tuple(
            prediction_tagged_shape.values())

        self._block_shape_dict = self.BlockShape3dDict.value
        self._halo_padding_dict = self.HaloPadding3dDict.value

        block_shape = self._getFullShape(self._block_shape_dict)

        region_feature_output_shape = (numpy.array(
            self.PredictionImage.meta.shape) + block_shape - 1) / block_shape
        self.BlockwiseRegionFeatures.meta.shape = tuple(
            region_feature_output_shape)
        self.BlockwiseRegionFeatures.meta.dtype = object
        self.BlockwiseRegionFeatures.meta.axistags = self.PredictionImage.meta.axistags
Example #11
0
    def _checkConstraints(self, *args):
        if self.RawImage.ready():
            rawTaggedShape = self.RawImage.meta.getTaggedShape()
            if 't' not in rawTaggedShape or rawTaggedShape['t'] < 2:
                msg = "Raw image must have a time dimension with at least 2 images.\n"\
                    "Your dataset has shape: {}".format(self.RawImage.meta.shape)

        if self.BinaryImage.ready():
            rawTaggedShape = self.BinaryImage.meta.getTaggedShape()
            if 't' not in rawTaggedShape or rawTaggedShape['t'] < 2:
                msg = "Binary image must have a time dimension with at least 2 images.\n"\
                    "Your dataset has shape: {}".format(self.BinaryImage.meta.shape)

        if self.RawImage.ready() and self.BinaryImage.ready():
            rawTaggedShape = self.RawImage.meta.getTaggedShape()
            binTaggedShape = self.BinaryImage.meta.getTaggedShape()
            rawTaggedShape['c'] = None
            binTaggedShape['c'] = None
            if dict(rawTaggedShape) != dict(binTaggedShape):
                logger.info("Raw data and other data must have equal dimensions (different channels are okay).\n"\
                      "Your datasets have shapes: {} and {}".format( self.RawImage.meta.shape, self.BinaryImage.meta.shape ))

                msg = "Raw data and other data must have equal dimensions (different channels are okay).\n"\
                      "Your datasets have shapes: {} and {}".format( self.RawImage.meta.shape, self.BinaryImage.meta.shape )
                raise DatasetConstraintError("Object Extraction", msg)
Example #12
0
    def setupOutputs(self):
        # drop non-channel singleton axes
        allAxes = 'txyzc'
        ts = self.InputImage.meta.getTaggedShape()
        oldAxes = "".join(ts.keys())
        newAxes = "".join(
            [a for a in allAxes if a in ts and ts[a] > 1 or a == 'c'])
        self.opReorderIn.AxisOrder.setValue(newAxes)
        self.opReorderOut.AxisOrder.setValue(oldAxes)
        self.opReorderLayers.AxisOrder.setValue(oldAxes)
        if self.FeatureListFilename.ready() and len(
                self.FeatureListFilename.value) > 0:
            f = open(self.FeatureListFilename.value, 'r')
            self._files = []
            for line in f:
                line = line.strip()
                if len(line) > 0:
                    self._files.append(line)
            f.close()

            self.OutputImage.disconnect()
            self.FeatureLayers.disconnect()

            axistags = self.InputImage.meta.axistags

            self.FeatureLayers.resize(len(self._files))
            for i in range(len(self._files)):
                f = h5py.File(self._files[i], 'r')
                shape = f["data"].shape
                assert len(shape) == 3
                dtype = f["data"].dtype.type
                f.close()
                self.FeatureLayers[i].meta.shape = shape + (1, )
                self.FeatureLayers[i].meta.dtype = dtype
                self.FeatureLayers[i].meta.axistags = axistags
                self.FeatureLayers[i].meta.description = os.path.basename(
                    self._files[i])

            self.OutputImage.meta.shape = (shape) + (len(self._files), )
            self.OutputImage.meta.dtype = dtype
            self.OutputImage.meta.axistags = axistags

            self.CachedOutputImage.meta.shape = (shape) + (len(self._files), )
            self.CachedOutputImage.meta.axistags = axistags
        else:
            # Set the new selection matrix and check if it creates an error.
            selections = self.SelectionMatrix.value
            self.opPixelFeatures.Matrix.setValue(selections,
                                                 check_changed=False)
            invalid_scales = self.opPixelFeatures.getInvalidScales()
            if invalid_scales:
                msg = "Some of your selected feature scales are too large for your dataset.\n"\
                      "Choose smaller scales (sigma) or use a larger dataset.\n"\
                      "The invalid scales are: {}".format( invalid_scales )
                raise DatasetConstraintError("Feature Selection", msg)

            # Connect our external outputs to our internal operators
            self.OutputImage.connect(self.opReorderOut.Output)
            self.FeatureLayers.connect(self.opReorderLayers.Output)
Example #13
0
 def setupOutputs(self):
     # drop non-channel singleton axes
     allAxes = 'txyzc'
     ts = self.InputImage.meta.getTaggedShape()
     oldAxes = "".join(ts.keys())
     newAxes = "".join([a for a in allAxes
                        if a in ts and ts[a] > 1 or a == 'c'])
     self.opReorderIn.AxisOrder.setValue(newAxes)
     self.opReorderOut.AxisOrder.setValue(oldAxes)
     self.opReorderLayers.AxisOrder.setValue(oldAxes)
     
     # Get features from external file
     if self.FeatureListFilename.ready() and len(self.FeatureListFilename.value) > 0:
               
         self.OutputImage.disconnect()
         self.FeatureLayers.disconnect()
         
         axistags = self.InputImage.meta.axistags
             
         with h5py.File(self.FeatureListFilename.value,'r') as f:
             dset_names = []
             f.visit(dset_names.append)
             if len(dset_names) != 1:
                 sys.stderr.write("Input external features HDF5 file should have exactly 1 dataset.\n")
                 sys.exit(1)                
             
             dset = f[dset_names[0]]
             chnum = dset.shape[-1]
             shape = dset.shape
             dtype = dset.dtype.type
         
         # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage, 
         # FeatureLayers has one slot per channel and therefore the channel dimension must be 1.
         self.FeatureLayers.resize(chnum)
         for i in range(chnum):
             self.FeatureLayers[i].meta.shape    = shape[:-1]+(1,)
             self.FeatureLayers[i].meta.dtype    = dtype
             self.FeatureLayers[i].meta.axistags = axistags 
             self.FeatureLayers[i].meta.display_mode = 'default' 
             self.FeatureLayers[i].meta.description = "feature_channel_"+str(i)
         
         self.OutputImage.meta.shape    = shape
         self.OutputImage.meta.dtype    = dtype 
         self.OutputImage.meta.axistags = axistags
         
     else:
         # Set the new selection matrix and check if it creates an error.
         selections = self.SelectionMatrix.value
         self.opPixelFeatures.Matrix.setValue( selections )
         invalid_scales = self.opPixelFeatures.getInvalidScales()
         if invalid_scales:
             msg = "Some of your selected feature scales are too large for your dataset.\n"\
                   "Choose smaller scales (sigma) or use a larger dataset.\n"\
                   "The invalid scales are: {}".format( invalid_scales )                      
             raise DatasetConstraintError( "Feature Selection", msg )
         
         # Connect our external outputs to our internal operators
         self.OutputImage.connect( self.opReorderOut.Output )
         self.FeatureLayers.connect( self.opReorderLayers.Output )
Example #14
0
    def _checkConstraints(self, laneIndex):
        """
        Ensure that all input images have the same number of channels.
        """
        if not self.InputImages[laneIndex].ready():
            return

        thisLaneTaggedShape = self.InputImages[laneIndex].meta.getTaggedShape()

        # Find a different lane and use it for comparison
        validShape = thisLaneTaggedShape
        for i, slot in enumerate(self.InputImages):
            if slot.ready() and i != laneIndex:
                validShape = slot.meta.getTaggedShape()
                break

        if "t" in thisLaneTaggedShape:
            del thisLaneTaggedShape["t"]
        if "t" in validShape:
            del validShape["t"]

        if validShape["c"] != thisLaneTaggedShape["c"]:
            raise DatasetConstraintError(
                "Pixel Classification",
                "All input images must have the same number of channels.  "
                "Your new image has {} channel(s), but your other images have {} channel(s)."
                .format(thisLaneTaggedShape["c"], validShape["c"]),
            )

        if len(validShape) != len(thisLaneTaggedShape):
            raise DatasetConstraintError(
                "Pixel Classification",
                "All input images must have the same dimensionality.  "
                "Your new image has {} dimensions (including channel), but your other images have {} dimensions."
                .format(len(thisLaneTaggedShape), len(validShape)),
            )

        mask_slot = self.PredictionMasks[laneIndex]
        input_shape = self.InputImages[laneIndex].meta.shape
        if mask_slot.ready() and mask_slot.meta.shape[:-1] != input_shape[:-1]:
            raise DatasetConstraintError(
                "Pixel Classification",
                "If you supply a prediction mask, it must have the same shape as the input image."
                "Your input image has shape {}, but your mask has shape {}.".
                format(input_shape, mask_slot.meta.shape),
            )
    def _checkConstraints(self, *args):
        if self.ImageA.ready() and self.ImageB.ready():
            shapeA = self.ImageA.meta.getTaggedShape()
            shapeB = self.ImageB.meta.getTaggedShape()

            if shapeA != shapeB:
                raise DatasetConstraintError("Label Image Difference",
                                             "Cannot compute difference of images with different shapes")
Example #16
0
 def checkConstraints(*_):
     if opData.Image.meta.dtype != np.uint8:
         msg = "The Autocontext Workflow only supports 8-bit images (UINT8 pixel type).\n"\
               "Your image has a pixel type of {}.  Please convert your data to UINT8 and try again."\
               .format( str(np.dtype(opData.Image.meta.dtype)) )
         raise DatasetConstraintError("Autocontext Workflow",
                                      msg,
                                      unfixable=True)
Example #17
0
 def checkConstraints(self, *args):
     if self._opReorder1.Output.ready():
         numChannels = self._opReorder1.Output.meta.getTaggedShape()['c']
         if self.Channel.value >= numChannels:
             raise DatasetConstraintError(
                 "Two-Level Thresholding",
                 "Your project is configured to select data from channel"
                 " #{}, but your input data only has {} channels.".format(
                     self.Channel.value, numChannels))
Example #18
0
    def checkConstraints(self, *args):
        """
        Example of how to check input data constraints.
        """
        if self.RawInput.ready():
            numChannels = self.RawInput.meta.getTaggedShape()['c']
            if numChannels != 1:
                raise DatasetConstraintError(
                    "Layer Viewer",
                    "Raw data must have exactly one channel.  " +
                    "You attempted to add a dataset with {} channels".format( numChannels ) )

        if self.OtherInput.ready() and self.RawInput.ready():
            rawTaggedShape = self.RawInput.meta.getTaggedShape()
            otherTaggedShape = self.OtherInput.meta.getTaggedShape()
            rawTaggedShape['c'] = None
            otherTaggedShape['c'] = None
            if dict(rawTaggedShape) != dict(otherTaggedShape):
                msg = "Raw data and other data must have equal dimensions (different channels are okay).\n"\
                      "Your datasets have shapes: {} and {}".format( self.RawInput.meta.shape, self.OtherInput.meta.shape )
                raise DatasetConstraintError( "Layer Viewer", msg )
Example #19
0
    def setupOutputs(self):
        taggedOutputShape = self.LabelImage.meta.getTaggedShape()

        if 't' not in taggedOutputShape.keys() or taggedOutputShape['t'] < 2:
            raise DatasetConstraintError( "Tracking Feature Extraction",
                                          "Label Image must have a time axis with more than 1 image.\n"\
                                          "Label Image shape: {}"\
                                          "".format(self.LabelImage.meta.shape))

        # Every value in the region features output is cached separately as it's own "block"
        blockshape = (1, ) * len(
            self._opDivisionFeatures.BlockwiseDivisionFeatures.meta.shape)
        self._opCache.blockShape.setValue(blockshape)
    def _checkConstraints(self, *args):
        slot = self.Input

        sh = slot.meta.shape
        ndim = len(sh)
        ax = slot.meta.axistags
        tsh = slot.meta.getTaggedShape()

        if ("c" in tsh):
            if (tsh["c"] != 1):
                raise DatasetConstraintError(
                    "ColorizeLabelImage",
                    "Input image cannot have a non-singleton channel dimension."
                )
            if (ndim == 1):
                raise DatasetConstraintError(
                    "ColorizeLabelImage",
                    "There must be more dimensions than just the channel dimension."
                )
            if not ax[-1].isChannel():
                raise DatasetConstraintError(
                    "ColorizeLabelImage",
                    "Input image must have channel last.")
    def _checkConstraints(self, laneIndex):
        """
        Override from OpPixelClassification.
        
        Check all input slots for appropriate size/shape, etc. 
        """
        if not self.InputImages[laneIndex].ready():
            return

        tagged_shape = self.InputImages[laneIndex].meta.getTaggedShape()

        if 't' in tagged_shape:
            raise DatasetConstraintError(
                "IIBoost Pixel Classification",
                "This classifier handles only 3D data. Your input data has a time dimension, which is not allowed."
            )

        if not set('xyz').issubset(list(tagged_shape.keys())):
            raise DatasetConstraintError(
                "IIBoost Pixel Classification",
                "This classifier handles only 3D data. Your input data does not have all three spatial dimensions (xyz)."
            )

        super(OpIIBoostPixelClassification, self)._checkConstraints(laneIndex)
Example #22
0
    def _checkConstraints(self, *args):
        slot = self.Input

        sh = slot.meta.shape
        ax = slot.meta.axistags
        if (len(slot.meta.shape) != 4) and (len(slot.meta.shape) != 5):
            # Raise a regular exception.  This error is for developers, not users.
            raise RuntimeError("was expecting a 4D or 5D dataset, got shape=%r" % (sh,))

        if "t" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError(
                "EstimateF0",
                "Input must have time.")

        if "y" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError(
                "EstimateF0",
                "Input must have space dim y.")

        if "x" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError(
                "EstimateF0",
                "Input must have space dim x.")

        if "c" not in slot.meta.getTaggedShape():
            raise DatasetConstraintError(
                "EstimateF0",
                "Input must have channel.")

        numChannels = slot.meta.getTaggedShape()["c"]
        if numChannels != 1:
            raise DatasetConstraintError(
                "EstimateF0",
                "Input image must have exactly one channel.  " +
                "You attempted to add a dataset with {} channels".format( numChannels ) )

        if not ax[0].isTemporal():
            raise DatasetConstraintError(
                "EstimateF0",
                "Input image must have time first." )

        if not ax[-1].isChannel():
            raise DatasetConstraintError(
                "EstimateF0",
                "Input image must have channel last." )

        for i in range(1, len(ax) - 1):
            if not ax[i].isSpatial():
                # This is for developers.  Don't need a user-friendly error.
                raise RuntimeError("%d-th axis %r is not spatial" % (i, ax[i]))
Example #23
0
    def setupOutputs(self):
        assert self.LabelImage.meta.axistags == self.RawImage.meta.axistags

        taggedOutputShape = self.LabelImage.meta.getTaggedShape()
        taggedRawShape = self.RawImage.meta.getTaggedShape()

        if not np.all(list(taggedOutputShape.get(k, 0) == taggedRawShape.get(k, 0)
                           for k in "txyz")):
            raise DatasetConstraintError( "Object Extraction",
                                          "Raw Image and Label Image shapes do not match.\n"\
                                          "Label Image shape: {}. Raw Image shape: {}"\
                                          "".format(self.LabelImage.meta.shape, self.RawVolume.meta.shape))


        # Every value in the regionfeatures output is cached seperately as it's own "block"
        blockshape = (1,) * len(self._opRegionFeatures.Output.meta.shape)
        self._opCache.blockShape.setValue(blockshape)
Example #24
0
    def _checkConstraints(self, *args):
        if self.RawImage.ready() and self.BinaryImage.ready():
            rawTaggedShape = self.RawImage.meta.getTaggedShape()
            binTaggedShape = self.BinaryImage.meta.getTaggedShape()
            rawTaggedShape["c"] = None
            binTaggedShape["c"] = None
            if dict(rawTaggedShape) != dict(binTaggedShape):
                logger.info(
                    "Raw data and other data must have equal dimensions (different channels are okay).\n"
                    "Your datasets have shapes: {} and {}".format(
                        self.RawImage.meta.shape, self.BinaryImage.meta.shape))

                msg = (
                    "Raw data and other data must have equal dimensions (different channels are okay).\n"
                    "Your datasets have shapes: {} and {}".format(
                        self.RawImage.meta.shape, self.BinaryImage.meta.shape))
                raise DatasetConstraintError("Object Extraction", msg)
Example #25
0
    def setupOutputs(self):
        # Check for preconditions.
        if self.RawImage.ready() and self.BinaryImage.ready():
            rawTaggedShape = self.RawImage.meta.getTaggedShape()
            binTaggedShape = self.BinaryImage.meta.getTaggedShape()
            rawTaggedShape['c'] = None
            binTaggedShape['c'] = None
            if dict(rawTaggedShape) != dict(binTaggedShape):
                msg = "Raw data and other data must have equal dimensions (different channels are okay).\n"\
                      "Your datasets have shapes: {} and {}".format( self.RawImage.meta.shape, self.BinaryImage.meta.shape )
                raise DatasetConstraintError("Blockwise Object Classification",
                                             msg)

        self._block_shape_dict = self.BlockShape3dDict.value
        self._halo_padding_dict = self.HaloPadding3dDict.value

        self.PredictionImage.meta.assignFrom(self.RawImage.meta)
        self.PredictionImage.meta.dtype = numpy.uint8  # Ultimately determined by meta.mapping_dtype from OpRelabelSegmentation
        prediction_tagged_shape = self.RawImage.meta.getTaggedShape()
        prediction_tagged_shape['c'] = 1
        self.PredictionImage.meta.shape = tuple(
            prediction_tagged_shape.values())

        block_shape = self._getFullShape(self._block_shape_dict)
        self.PredictionImage.meta.ideal_blockshape = block_shape

        raw_ruprp = self.RawImage.meta.ram_usage_per_requested_pixel
        binary_ruprp = self.BinaryImage.meta.ram_usage_per_requested_pixel
        prediction_ruprp = max(raw_ruprp, binary_ruprp)
        self.PredictionImage.meta.ram_usage_per_requested_pixel = prediction_ruprp

        self.ProbabilityChannelImage.meta.assignFrom(self.RawImage.meta)
        self.ProbabilityChannelImage.meta.dtype = numpy.float32
        prediction_channels_tagged_shape = self.RawImage.meta.getTaggedShape()
        prediction_channels_tagged_shape['c'] = self.LabelsCount.value
        self.ProbabilityChannelImage.meta.shape = tuple(
            prediction_channels_tagged_shape.values())
        self.ProbabilityChannelImage.meta.ram_usage_per_requested_pixel = prediction_ruprp

        region_feature_output_shape = (numpy.array(
            self.PredictionImage.meta.shape) + block_shape - 1) // block_shape
        self.BlockwiseRegionFeatures.meta.shape = tuple(
            region_feature_output_shape)
        self.BlockwiseRegionFeatures.meta.dtype = object
        self.BlockwiseRegionFeatures.meta.axistags = self.PredictionImage.meta.axistags
Example #26
0
    def setupOutputs(self):
        # drop non-channel singleton axes
        oldAxes = self.InputImage.meta.getAxisKeys()
        # make sure channel axis is present
        if 'c' not in oldAxes:
            oldAxes.append('c')

        self.opReorderOut.AxisOrder.setValue(oldAxes)
        self.opReorderLayers.AxisOrder.setValue(oldAxes)

        # Get features from external file
        if self.FeatureListFilename.ready() and len(
                self.FeatureListFilename.value) > 0:
            raise NotImplementedError('Not simplified yet!')

            self.OutputImage.disconnect()
            self.FeatureLayers.disconnect()

            axistags = self.InputImage.meta.axistags

            with h5py.File(self.FeatureListFilename.value, 'r') as f:
                dset_names = []
                f.visit(dset_names.append)
                if len(dset_names) != 1:
                    sys.stderr.write(
                        "Input external features HDF5 file should have exactly 1 dataset.\n"
                    )
                    sys.exit(1)

                dset = f[dset_names[0]]
                chnum = dset.shape[-1]
                shape = dset.shape
                dtype = dset.dtype.type

            # Set the metadata for FeatureLayers. Unlike OutputImage and CachedOutputImage,
            # FeatureLayers has one slot per channel and therefore the channel dimension must be 1.
            self.FeatureLayers.resize(chnum)
            for i in range(chnum):
                self.FeatureLayers[i].meta.shape = shape[:-1] + (1, )
                self.FeatureLayers[i].meta.dtype = dtype
                self.FeatureLayers[i].meta.axistags = axistags
                self.FeatureLayers[i].meta.display_mode = 'default'
                self.FeatureLayers[
                    i].meta.description = "feature_channel_" + str(i)

            self.OutputImage.meta.shape = shape
            self.OutputImage.meta.dtype = dtype
            self.OutputImage.meta.axistags = axistags

        else:
            invalid_scales, invalid_z_scales = self.opPixelFeatures.getInvalidScales(
            )
            if invalid_scales or invalid_z_scales:
                invalid_z_scales = [
                    s for s in invalid_z_scales if s not in invalid_scales
                ]  # 'do not complain twice'
                msg = 'Some of your selected feature scales are too large for your dataset.\n'
                if invalid_scales:
                    msg += f'Reduce or remove these scales:\n{invalid_scales}\n\n'

                if invalid_z_scales:
                    msg += f'Reduce, remove or switch to 2D computation for these scales:\n{invalid_z_scales}\n\n'

                msg += 'Alternatively use another dataset.'
                if self.parent.parent.featureSelectionApplet._gui is None:
                    # headless
                    fix_dlgs = []
                else:
                    fix_dlgs = [
                        self.parent.parent.featureSelectionApplet._gui.
                        currentGui(
                            fallback_on_lane_0=True).onFeatureButtonClicked
                    ]

                raise DatasetConstraintError("Feature Selection",
                                             msg,
                                             fixing_dialogs=fix_dlgs)

            # Connect our external outputs to our internal operators
            self.OutputImage.connect(self.opReorderOut.Output)
            self.FeatureLayers.connect(self.opReorderLayers.Output)
Example #27
0
    def _generate_traxelstore(self,
                              time_range,
                              x_range,
                              y_range,
                              z_range,
                              size_range,
                              x_scale=1.0,
                              y_scale=1.0,
                              z_scale=1.0,
                              with_div=False,
                              with_local_centers=False,
                              median_object_size=None,
                              max_traxel_id_at=None,
                              with_opt_correction=False,
                              with_coordinate_list=False,
                              with_classifier_prior=False):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        parameters = self.Parameters.value
        parameters['scales'] = [x_scale, y_scale, z_scale]
        parameters['time_range'] = [min(time_range), max(time_range)]
        parameters['x_range'] = x_range
        parameters['y_range'] = y_range
        parameters['z_range'] = z_range
        parameters['size_range'] = size_range

        logger.info("generating traxels")
        logger.info("fetching region features and division probabilities")
        feats = self.ObjectFeatures(time_range).wait()

        if with_div:
            if not self.DivisionProbabilities.ready() or len(
                    self.DivisionProbabilities([0]).wait()[0]) == 0:
                msgStr = "\nDivision classifier has not been trained! " + \
                         "Uncheck divisible objects if your objects don't divide or " + \
                         "go back to the Division Detection applet and train it."
                raise DatasetConstraintError("Tracking", msgStr)
            divProbs = self.DivisionProbabilities(time_range).wait()

        if with_local_centers:
            localCenters = self.RegionLocalCenters(time_range).wait()

        if with_classifier_prior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                msgStr = "\nObject count classifier has not been trained! " + \
                         "Go back to the Object Count Classification applet and train it."
                raise DatasetConstraintError("Tracking", msgStr)
            detProbs = self.DetectionProbabilities(time_range).wait()

        logger.info("filling traxelstore")
        ts = pgmlink.TraxelStore()
        fs = pgmlink.FeatureStore()

        max_traxel_id_at = pgmlink.VectorOfInt()
        filtered_labels = {}
        obj_sizes = []
        total_count = 0
        empty_frame = False

        for t in feats.keys():
            rc = feats[t][default_features_key]['RegionCenter']
            lower = feats[t][default_features_key]['Coord<Minimum>']
            upper = feats[t][default_features_key]['Coord<Maximum>']
            if rc.size:
                rc = rc[1:, ...]
                lower = lower[1:, ...]
                upper = upper[1:, ...]

            if with_opt_correction:
                try:
                    rc_corr = feats[t][
                        config.features_vigra_name]['RegionCenter_corr']
                except:
                    raise Exception, 'Can not consider optical correction since it has not been computed before'
                if rc_corr.size:
                    rc_corr = rc_corr[1:, ...]

            ct = feats[t][default_features_key]['Count']
            if ct.size:
                ct = ct[1:, ...]

            logger.debug("at timestep {}, {} traxels found".format(
                t, rc.shape[0]))
            count = 0
            filtered_labels_at = []
            for idx in range(rc.shape[0]):
                # for 2d data, set z-coordinate to 0:
                if len(rc[idx]) == 2:
                    x, y = rc[idx]
                    z = 0
                elif len(rc[idx]) == 3:
                    x, y, z = rc[idx]
                else:
                    raise DatasetConstraintError(
                        "Tracking",
                        "The RegionCenter feature must have dimensionality 2 or 3."
                    )
                size = ct[idx]
                if (x < x_range[0] or x >= x_range[1] or y < y_range[0]
                        or y >= y_range[1] or z < z_range[0] or z >= z_range[1]
                        or size < size_range[0] or size >= size_range[1]):
                    filtered_labels_at.append(int(idx + 1))
                    continue
                else:
                    count += 1
                tr = pgmlink.Traxel()
                tr.set_feature_store(fs)
                tr.set_x_scale(x_scale)
                tr.set_y_scale(y_scale)
                tr.set_z_scale(z_scale)
                tr.Id = int(idx + 1)
                tr.Timestep = int(t)

                # pgmlink expects always 3 coordinates, z=0 for 2d data
                tr.add_feature_array("com", 3)
                for i, v in enumerate([x, y, z]):
                    tr.set_feature_value('com', i, float(v))

                tr.add_feature_array("CoordMinimum", 3)
                for i, v in enumerate(lower[idx]):
                    tr.set_feature_value("CoordMinimum", i, float(v))
                tr.add_feature_array("CoordMaximum", 3)
                for i, v in enumerate(upper[idx]):
                    tr.set_feature_value("CoordMaximum", i, float(v))

                if with_opt_correction:
                    tr.add_feature_array("com_corrected", 3)
                    for i, v in enumerate(rc_corr[idx]):
                        tr.set_feature_value("com_corrected", i, float(v))
                    if len(rc_corr[idx]) == 2:
                        tr.set_feature_value("com_corrected", 2, 0.)

                if with_div:
                    tr.add_feature_array("divProb", 1)
                    # idx+1 because rc and ct start from 1, divProbs starts from 0
                    tr.set_feature_value("divProb", 0,
                                         float(divProbs[t][idx + 1][1]))

                if with_classifier_prior:
                    tr.add_feature_array("detProb", len(detProbs[t][idx + 1]))
                    for i, v in enumerate(detProbs[t][idx + 1]):
                        val = float(v)
                        if val < 0.0000001:
                            val = 0.0000001
                        if val > 0.99999999:
                            val = 0.99999999
                        tr.set_feature_value("detProb", i, float(val))

                # FIXME: check whether it is 2d or 3d data!
                if with_local_centers:
                    tr.add_feature_array("localCentersX",
                                         len(localCenters[t][idx + 1]))
                    tr.add_feature_array("localCentersY",
                                         len(localCenters[t][idx + 1]))
                    tr.add_feature_array("localCentersZ",
                                         len(localCenters[t][idx + 1]))
                    for i, v in enumerate(localCenters[t][idx + 1]):
                        tr.set_feature_value("localCentersX", i, float(v[0]))
                        tr.set_feature_value("localCentersY", i, float(v[1]))
                        tr.set_feature_value("localCentersZ", i, float(v[2]))

                tr.add_feature_array("count", 1)
                tr.set_feature_value("count", 0, float(size))
                if median_object_size is not None:
                    obj_sizes.append(float(size))

                ts.add(fs, tr)

            if len(filtered_labels_at) > 0:
                filtered_labels[str(int(t) -
                                    time_range[0])] = filtered_labels_at
            logger.debug("at timestep {}, {} traxels passed filter".format(
                t, count))
            max_traxel_id_at.append(int(rc.shape[0]))
            if count == 0:
                empty_frame = True

            total_count += count

        if median_object_size is not None:
            median_object_size[0] = np.median(np.array(obj_sizes),
                                              overwrite_input=True)
            logger.info('median object size = ' + str(median_object_size[0]))

        self.FilteredLabels.setValue(filtered_labels, check_changed=True)

        return fs, ts, empty_frame, max_traxel_id_at
    def _generate_traxelstore(self,
                              time_range,
                              x_range,
                              y_range,
                              z_range,
                              size_range,
                              x_scale=1.0,
                              y_scale=1.0,
                              z_scale=1.0,
                              with_div=False,
                              with_local_centers=False,
                              with_classifier_prior=False):

        logger.info("generating traxels")

        self.progressVisitor.showState("Object features")
        self.progressVisitor.showProgress(0)

        traxelstore = ProbabilityGenerator()

        logger.info("fetching region features and division probabilities")
        feats = self.ObjectFeatures(time_range).wait()

        if with_div:
            if not self.DivisionProbabilities.ready() or len(
                    self.DivisionProbabilities([0]).wait()[0]) == 0:
                msgStr = "\nDivision classifier has not been trained! " + \
                         "Uncheck divisible objects if your objects don't divide or " + \
                         "go back to the Division Detection applet and train it."
                raise DatasetConstraintError("Tracking", msgStr)
            self.progressVisitor.showState("Division probabilities")
            self.progressVisitor.showProgress(0)
            divProbs = self.DivisionProbabilities(time_range).wait()

        if with_local_centers:
            localCenters = self.RegionLocalCenters(time_range).wait()

        if with_classifier_prior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                msgStr = "\nObject count classifier has not been trained! " + \
                         "Go back to the Object Count Classification applet and train it."
                raise DatasetConstraintError("Tracking", msgStr)
            self.progressVisitor.showState("Detection probabilities")
            self.progressVisitor.showProgress(0)
            detProbs = self.DetectionProbabilities(time_range).wait()

        logger.info("filling traxelstore")

        filtered_labels = {}
        total_count = 0
        empty_frame = False
        numTimeStep = len(list(feats.keys()))
        countT = 0

        stepStr = "Creating traxel store"
        self.progressVisitor.showState(stepStr +
                                       "                              ")

        for t in list(feats.keys()):
            countT += 1
            self.progressVisitor.showProgress(
                old_div(countT, float(numTimeStep)))

            rc = feats[t][default_features_key]['RegionCenter']
            lower = feats[t][default_features_key]['Coord<Minimum>']
            upper = feats[t][default_features_key]['Coord<Maximum>']
            if rc.size:
                rc = rc[1:, ...]
                lower = lower[1:, ...]
                upper = upper[1:, ...]

            ct = feats[t][default_features_key]['Count']
            if ct.size:
                ct = ct[1:, ...]

            logger.debug("at timestep {}, {} traxels found".format(
                t, rc.shape[0]))
            count = 0
            filtered_labels_at = []
            for idx in range(rc.shape[0]):
                traxel = Traxel()

                # for 2d data, set z-coordinate to 0:
                if len(rc[idx]) == 2:
                    x, y = rc[idx]
                    z = 0
                    x_lower, y_lower = lower[idx]
                    x_upper, y_upper = upper[idx]
                    z_lower = 0
                    z_upper = 0
                elif len(rc[idx]) == 3:
                    x, y, z = rc[idx]
                    x_lower, y_lower, z_lower = lower[idx]
                    x_upper, y_upper, z_upper = upper[idx]
                else:
                    raise DatasetConstraintError(
                        "Tracking",
                        "The RegionCenter feature must have dimensionality 2 or 3."
                    )

                size = ct[idx]

                if (x_upper < x_range[0] or x_lower >= x_range[1]
                        or y_upper < y_range[0] or y_lower >= y_range[1]
                        or z_upper < z_range[0] or z_lower >= z_range[1]
                        or size < size_range[0] or size >= size_range[1]):
                    filtered_labels_at.append(int(idx + 1))
                    continue
                else:
                    count += 1

                traxel.Id = int(idx + 1)
                traxel.Timestep = int(t)
                traxel.set_x_scale(x_scale)
                traxel.set_y_scale(y_scale)
                traxel.set_z_scale(z_scale)

                # Expects always 3 coordinates, z=0 for 2d data
                traxel.add_feature_array("com", 3)
                for i, v in enumerate([x, y, z]):
                    traxel.set_feature_value('com', i, float(v))

                traxel.add_feature_array("CoordMinimum", 3)
                for i, v in enumerate(lower[idx]):
                    traxel.set_feature_value("CoordMinimum", i, float(v))
                traxel.add_feature_array("CoordMaximum", 3)
                for i, v in enumerate(upper[idx]):
                    traxel.set_feature_value("CoordMaximum", i, float(v))

                if with_div:
                    traxel.add_feature_array("divProb", 2)
                    # idx+1 because rc and ct start from 1, divProbs starts from 0
                    prob = float(divProbs[t][idx + 1][1])
                    prob = float(prob)
                    if prob < 0.0000001:
                        prob = 0.0000001
                    if prob > 0.99999999:
                        prob = 0.99999999
                    traxel.set_feature_value("divProb", 0, 1.0 - prob)
                    traxel.set_feature_value("divProb", 1, prob)

                if with_classifier_prior:
                    traxel.add_feature_array("detProb",
                                             len(detProbs[t][idx + 1]))
                    for i, v in enumerate(detProbs[t][idx + 1]):
                        val = float(v)
                        if val < 0.0000001:
                            val = 0.0000001
                        if val > 0.99999999:
                            val = 0.99999999
                        traxel.set_feature_value("detProb", i, float(val))

                # FIXME: check whether it is 2d or 3d data!
                if with_local_centers:
                    traxel.add_feature_array("localCentersX",
                                             len(localCenters[t][idx + 1]))
                    traxel.add_feature_array("localCentersY",
                                             len(localCenters[t][idx + 1]))
                    traxel.add_feature_array("localCentersZ",
                                             len(localCenters[t][idx + 1]))

                    for i, v in enumerate(localCenters[t][idx + 1]):
                        traxel.set_feature_value("localCentersX", i,
                                                 float(v[0]))
                        traxel.set_feature_value("localCentersY", i,
                                                 float(v[1]))
                        traxel.set_feature_value("localCentersZ", i,
                                                 float(v[2]))

                traxel.add_feature_array("count", 1)
                traxel.set_feature_value("count", 0, float(size))

                if (x_upper < x_range[0] or x_lower >= x_range[1]
                        or y_upper < y_range[0] or y_lower >= y_range[1]
                        or z_upper < z_range[0] or z_lower >= z_range[1]
                        or size < size_range[0] or size >= size_range[1]):
                    logger.info("Omitting traxel with ID: {} {}".format(
                        traxel.Id, t))
                    print("Omitting traxel with ID: {} {}".format(
                        traxel.Id, t))
                else:
                    logger.debug("Adding traxel with ID: {}  {}".format(
                        traxel.Id, t))
                    traxelstore.TraxelsPerFrame.setdefault(
                        int(t), {})[int(idx + 1)] = traxel

            if len(filtered_labels_at) > 0:
                filtered_labels[str(int(t) -
                                    time_range[0])] = filtered_labels_at

            logger.debug("at timestep {}, {} traxels passed filter".format(
                t, count))

            if count == 0:
                empty_frame = True
                logger.info('Found empty frames for time {}'.format(t))

            total_count += count

        self.parent.parent.trackingApplet.progressSignal(100)
        self.FilteredLabels.setValue(filtered_labels, check_changed=True)

        return traxelstore
 def raiseDatasetConstraintError(self, progressWindow, titleStr, str):
     if progressWindow is not None:
         progressWindow.onTrackDone()
     raise DatasetConstraintError(titleStr, str)
Example #30
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              detWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              motionModelWeight=10.0,
              force_build_hypotheses_graph=False,
              max_nearest_neighbors=1,
              withBatchProcessing=False,
              solverName="ILP",
              numFramesPerSplit=0):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        if numFramesPerSplit != 0:
            raise Exception(
                "PGMLINK tracking does not support sliding window tracking")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['detWeight'] = detWeight
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['numFramesPerSplit'] = numFramesPerSplit

        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError(
                    'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')

        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise DatasetConstraintError(
                'Tracking', 'Can not track frames with 0 objects, abort.')

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        solverType = self.getPgmlinkSolverType(solverName)

        if do_build_hypotheses_graph:
            print '\033[94m' + "make new graph" + '\033[0m'
            self.tracker = pgmlink.ConsTracking(
                int(maxObj),
                bool(sizeDependent),  # size_dependent_detection_prob
                float(median_obj_size[0]),  # median_object_size
                float(maxDist),
                bool(withDivisions),
                float(divThreshold),
                "none",  # detection_rf_filename
                fov,
                "none",  # dump traxelstore,
                solverType,
                ndim)
            g = self.tracker.buildGraph(ts, max_nearest_neighbors)

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(
            1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        params = self.tracker.get_conservation_tracking_parameters(
            0,  # forbidden_cost
            float(ep_gap),  # ep_gap
            bool(withTracklets),  # with tracklets
            float(detWeight),  # detection weight
            float(divWeight),  # division weight
            float(transWeight),  # transition weight
            float(disappearance_cost),  # disappearance cost
            float(appearance_cost),  # appearance cost
            bool(withMergerResolution),  # with merger resolution
            int(ndim),  # ndim
            float(transition_parameter),  # transition param
            float(borderAwareWidth),  # border width
            True,  #with_constraints
            uncertaintyParams,  # uncertainty parameters
            float(cplex_timeout),  # cplex timeout
            None,  # transition classifier
            solverType,
            False,  # training to hard constraints
            1  # num threads
        )

        # if motionModelWeight > 0:
        #     logger.info("Registering motion model with weight {}".format(motionModelWeight))
        #     params.register_motion_model4_func(swirl_motion_func_creator(motionModelWeight), motionModelWeight * 25.0)

        try:
            eventsVector = self.tracker.track(params, False)

            eventsVector = eventsVector[
                0]  # we have a vector such that we could get a vector per perturbation

            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(
                    eventsVector,
                    params,
                    coordinate_map.get(),
                    float(ep_gap),
                    float(transWeight),
                    bool(withTracklets),
                    ndim,
                    transition_parameter,
                    max_traxel_id_at,
                    True,  # with_constraints
                    None)  # TransitionClassifier

        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()

        if not withBatchProcessing:
            merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
            ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
            tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
            ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
            if 'withMergerResolution' in parameters.keys(
            ) and not parameters['withMergerResolution']:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
            else:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable