示例#1
0
    def track(self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range=(0, 100000),
            x_scale=1.0,
            y_scale=1.0,
            z_scale=1.0,
            maxDist=30,     
            maxObj=2,       
            divThreshold=0.5,
            avgSize=[0],                        
            withTracklets=False,
            sizeDependent=True,
            divWeight=10.0,
            transWeight=10.0,
            withDivisions=True,
            withOpticalCorrection=True,
            withClassifierPrior=False,
            ndim=3,
            cplex_timeout=None,
            withMergerResolution=True,
            borderAwareWidth = 0.0,
            withArmaCoordinates = True,
            appearance_cost = 500,
            disappearance_cost = 500,
            motionModelWeight=10.0,
            force_build_hypotheses_graph = False,
            max_nearest_neighbors = 1,
            withBatchProcessing = False,
            solverName="ILP"
            ):
        
        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")
        
        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight   
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost

        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)
        
        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError('Tracking', 'Classifier not ready yet. Did you forget to train the Object Count Classifier?')
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities([0]).wait()[0][0]) < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
        
        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(time_range, x_range, y_range, z_range,
                                                                      size_range, x_scale, y_scale, z_scale, 
                                                                      median_object_size=median_obj_size, 
                                                                      with_div=withDivisions,
                                                                      with_opt_correction=withOpticalCorrection,
                                                                      with_classifier_prior=withClassifierPrior)
        
        if empty_frame:
            raise DatasetConstraintError('Tracking', 'Can not track frames with 0 objects, abort.')
              
        
        if avgSize[0] > 0:
            median_obj_size = avgSize
        
        logger.info( 'median_obj_size = {}'.format( median_obj_size ) )

        ep_gap = 0.05
        transition_parameter = 5
        
        fov = pgmlink.FieldOfView(time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale,)
        
        logger.info( 'fov = {},{},{},{},{},{},{},{}'.format( time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale, ) )
        
        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (z_range[1]-1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        solverType = self.getPgmlinkSolverType(solverName)

        if do_build_hypotheses_graph:
            print '\033[94m' +"make new graph"+  '\033[0m'
            self.tracker = pgmlink.ConsTracking(int(maxObj),
                                         bool(sizeDependent),   # size_dependent_detection_prob
                                         float(median_obj_size[0]), # median_object_size
                                         float(maxDist),
                                         bool(withDivisions),
                                         float(divThreshold),
                                         "none",  # detection_rf_filename
                                         fov,
                                         "none", # dump traxelstore,
                                         solverType,
                                         ndim
                                         )
            g = self.tracker.buildGraph(ts, max_nearest_neighbors)

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        params = self.tracker.get_conservation_tracking_parameters(
            0,       # forbidden_cost
            float(ep_gap), # ep_gap
            bool(withTracklets), # with tracklets
            float(10.0), # detection weight
            float(divWeight), # division weight
            float(transWeight), # transition weight
            float(disappearance_cost), # disappearance cost
            float(appearance_cost), # appearance cost
            bool(withMergerResolution), # with merger resolution
            int(ndim), # ndim
            float(transition_parameter), # transition param
            float(borderAwareWidth), # border width
            True, #with_constraints
            uncertaintyParams, # uncertainty parameters
            float(cplex_timeout), # cplex timeout
            None, # transition classifier
            solverType,
            False, # training to hard constraints
            1 # num threads
        )

        # if motionModelWeight > 0:
        #     logger.info("Registering motion model with weight {}".format(motionModelWeight))
        #     params.register_motion_model4_func(swirl_motion_func_creator(motionModelWeight), motionModelWeight * 25.0)

        try:
            eventsVector = self.tracker.track(params, False)

            eventsVector = eventsVector[0] # we have a vector such that we could get a vector per perturbation

            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map,
                                             time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(
                    eventsVector,
                    params,
                    coordinate_map.get(),
                    float(ep_gap),
                    float(transWeight),
                    bool(withTracklets),
                    ndim,
                    transition_parameter,
                    max_traxel_id_at,
                    True, # with_constraints
                    None) # TransitionClassifier

        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)
        
        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'
        
        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()
        
        if not withBatchProcessing:
            merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui().layerstack.findMatchingIndex(lambda x: x.name == "Merger")
            tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui().layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
            if 'withMergerResolution' in parameters.keys() and not parameters['withMergerResolution']:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
            else:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
    def track(self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range=(0, 100000),
            x_scale=1.0,
            y_scale=1.0,
            z_scale=1.0,
            maxDist=30,     
            maxObj=2,       
            divThreshold=0.5,
            avgSize=[0],                        
            withTracklets=False,
            sizeDependent=True,
            divWeight=10.0,
            transWeight=10.0,
            withDivisions=True,
            withOpticalCorrection=True,
            withClassifierPrior=False,
            ndim=3,
            cplex_timeout=None,
            withMergerResolution=True,
            borderAwareWidth = 0.0,
            withArmaCoordinates = True,
            appearance_cost = 500,
            disappearance_cost = 500
            ):
        
        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")
        
        parameters = self.Parameters.value
        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight   
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
                
        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)
        
        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise Exception, 'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
            if not self.NumLabels.ready() or self.NumLabels.value != (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' \
                    'one training example for each class.'
            if len(self.DetectionProbabilities([0]).wait()[0][0]) != (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' \
                    'one training example for each class.'            
        
        median_obj_size = [0]

        coordinate_map = pgmlink.TimestepIdCoordinateMap()
        if withArmaCoordinates:
            coordinate_map.initialize()
        ts, empty_frame = self._generate_traxelstore(time_range, x_range, y_range, z_range, 
                                                                      size_range, x_scale, y_scale, z_scale, 
                                                                      median_object_size=median_obj_size, 
                                                                      with_div=withDivisions,
                                                                      with_opt_correction=withOpticalCorrection,
                                                                      with_coordinate_list=withMergerResolution , # no vigra coordinate list, that is done by arma
                                                                      with_classifier_prior=withClassifierPrior,
                                                                      coordinate_map=coordinate_map)
        
        if empty_frame:
            raise Exception, 'cannot track frames with 0 objects, abort.'
              
        
        if avgSize[0] > 0:
            median_obj_size = avgSize
        
        logger.info( 'median_obj_size = {}'.format( median_obj_size ) )

        ep_gap = 0.05
        transition_parameter = 5
        
        fov = pgmlink.FieldOfView(time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale,)
        
        logger.info( 'fov = {},{},{},{},{},{},{},{}'.format( time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale, ) )
        
        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (z_range[1]-1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        tracker = pgmlink.ConsTracking(maxObj,
                                         float(maxDist),
                                         float(divThreshold),
                                         "none",  # detection_rf_filename
                                         sizeDependent,   # size_dependent_detection_prob
                                         0,       # forbidden_cost
                                         float(ep_gap), # ep_gap
                                         float(median_obj_size[0]), # median_object_size
                                         withTracklets,
                                         divWeight,
                                         transWeight,
                                         withDivisions,
                                         disappearance_cost, # disappearance cost
                                         appearance_cost, # appearance cost
                                         withMergerResolution,
                                         ndim,
                                         transition_parameter,
                                         borderAwareWidth,
                                         fov,
                                         True, #with_constraints
                                         cplex_timeout,
                                         "none" # dump traxelstore
                                         )

        
        try:
            eventsVector = tracker(ts, coordinate_map.get())
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)
        
        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'
        
        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
示例#3
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              detWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              motionModelWeight=10.0,
              force_build_hypotheses_graph=False,
              max_nearest_neighbors=1,
              withBatchProcessing=False,
              solverName="ILP",
              numFramesPerSplit=0):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        if numFramesPerSplit != 0:
            raise Exception(
                "PGMLINK tracking does not support sliding window tracking")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['detWeight'] = detWeight
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['numFramesPerSplit'] = numFramesPerSplit

        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError(
                    'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')

        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise DatasetConstraintError(
                'Tracking', 'Can not track frames with 0 objects, abort.')

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        solverType = self.getPgmlinkSolverType(solverName)

        if do_build_hypotheses_graph:
            print '\033[94m' + "make new graph" + '\033[0m'
            self.tracker = pgmlink.ConsTracking(
                int(maxObj),
                bool(sizeDependent),  # size_dependent_detection_prob
                float(median_obj_size[0]),  # median_object_size
                float(maxDist),
                bool(withDivisions),
                float(divThreshold),
                "none",  # detection_rf_filename
                fov,
                "none",  # dump traxelstore,
                solverType,
                ndim)
            g = self.tracker.buildGraph(ts, max_nearest_neighbors)

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(
            1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        params = self.tracker.get_conservation_tracking_parameters(
            0,  # forbidden_cost
            float(ep_gap),  # ep_gap
            bool(withTracklets),  # with tracklets
            float(detWeight),  # detection weight
            float(divWeight),  # division weight
            float(transWeight),  # transition weight
            float(disappearance_cost),  # disappearance cost
            float(appearance_cost),  # appearance cost
            bool(withMergerResolution),  # with merger resolution
            int(ndim),  # ndim
            float(transition_parameter),  # transition param
            float(borderAwareWidth),  # border width
            True,  #with_constraints
            uncertaintyParams,  # uncertainty parameters
            float(cplex_timeout),  # cplex timeout
            None,  # transition classifier
            solverType,
            False,  # training to hard constraints
            1  # num threads
        )

        # if motionModelWeight > 0:
        #     logger.info("Registering motion model with weight {}".format(motionModelWeight))
        #     params.register_motion_model4_func(swirl_motion_func_creator(motionModelWeight), motionModelWeight * 25.0)

        try:
            eventsVector = self.tracker.track(params, False)

            eventsVector = eventsVector[
                0]  # we have a vector such that we could get a vector per perturbation

            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(
                    eventsVector,
                    params,
                    coordinate_map.get(),
                    float(ep_gap),
                    float(transWeight),
                    bool(withTracklets),
                    ndim,
                    transition_parameter,
                    max_traxel_id_at,
                    True,  # with_constraints
                    None)  # TransitionClassifier

        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()

        if not withBatchProcessing:
            merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
            ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
            tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
            ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
            if 'withMergerResolution' in parameters.keys(
            ) and not parameters['withMergerResolution']:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
            else:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
    def track( self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range = (0,100000),
            x_scale = 1.0,
            y_scale = 1.0,
            z_scale = 1.0,               
            rf_fn = "none",
            app = 500,
            dis = 500,
            noiserate = 0.10,
            noiseweight = 100,
            use_rf = False,
            opp = 100,
            forb = 0,
            with_constr = True,
            fixed_detections = False,
            mdd = 0,
            min_angle = 0,
            ep_gap = 0.2,
            n_neighbors = 2,            
            with_div = True,
            cplex_timeout = None):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")
        
        parameters = self.Parameters.value
        parameters['appearance'] = app
        parameters['disappearance'] = dis
        parameters['opportunity'] = opp
        parameters['noiserate'] = noiserate
        parameters['noiseweight'] = noiseweight
        parameters['epgap'] = ep_gap
        parameters['nneighbors'] = n_neighbors   
        parameters['with_divisions'] = with_div
        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''        
        
        det = noiseweight*(-1)*math.log(1-noiserate)
        mdet = noiseweight*(-1)*math.log(noiserate)
        
        ts, empty_frame = self._generate_traxelstore(time_range, x_range, y_range, z_range, size_range, x_scale, y_scale, z_scale)
        
        if empty_frame:
            raise Exception, 'Cannot track frames with 0 objects, abort.'
        
        tracker = pgmlink.ChaingraphTracking(rf_fn,
                                app,
                                dis,
                                det,
                                mdet,
                                use_rf,
                                opp,
                                forb,
                                with_constr,
                                fixed_detections,
                                mdd,
                                min_angle,
                                ep_gap,
                                n_neighbors
                                )

        tracker.set_with_divisions(with_div)        
        if cplex_timeout:
            tracker.set_cplex_timeout(cplex_timeout)
            
        try:
            eventsVector = tracker(ts)
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)
        
        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'
        
        events = get_events(eventsVector)
        
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        parameters = self.Parameters.value
        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise Exception, 'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
            if not self.NumLabels.ready() or self.NumLabels.value != (maxObj +
                                                                      1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' \
                    'one training example for each class.'
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) != (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' \
                    'one training example for each class.'

        median_obj_size = [0]

        coordinate_map = pgmlink.TimestepIdCoordinateMap()
        if withArmaCoordinates:
            coordinate_map.initialize()
        ts, empty_frame = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_coordinate_list=
            withMergerResolution,  # no vigra coordinate list, that is done by arma
            with_classifier_prior=withClassifierPrior,
            coordinate_map=coordinate_map)

        if empty_frame:
            raise Exception, 'cannot track frames with 0 objects, abort.'

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        tracker = pgmlink.ConsTracking(
            maxObj,
            float(maxDist),
            float(divThreshold),
            "none",  # detection_rf_filename
            sizeDependent,  # size_dependent_detection_prob
            0,  # forbidden_cost
            float(ep_gap),  # ep_gap
            float(median_obj_size[0]),  # median_object_size
            withTracklets,
            divWeight,
            transWeight,
            withDivisions,
            disappearance_cost,  # disappearance cost
            appearance_cost,  # appearance cost
            withMergerResolution,
            ndim,
            transition_parameter,
            borderAwareWidth,
            fov,
            True,  #with_constraints
            cplex_timeout,
            "none"  # dump traxelstore
        )

        try:
            eventsVector = tracker(ts, coordinate_map.get())
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
示例#6
0
    def track(self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range=(0, 100000),
            x_scale=1.0,
            y_scale=1.0,
            z_scale=1.0,
            maxDist=30,     
            maxObj=2,       
            divThreshold=0.5,
            avgSize=[0],                        
            withTracklets=False,
            sizeDependent=True,
            detWeight=10.0,
            divWeight=10.0,
            transWeight=10.0,
            withDivisions=True,
            withOpticalCorrection=True,
            withClassifierPrior=False,
            ndim=3,
            cplex_timeout=None,
            withMergerResolution=True,
            borderAwareWidth = 0.0,
            withArmaCoordinates = True,
            appearance_cost = 500,
            disappearance_cost = 500,
            graph_building_parameter_changed = True,
            trainingToHardConstraints = False,
            max_nearest_neighbors = 1
            ):
        
        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")
        
        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight   
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
                
        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)
        
        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError('Tracking', 'Classifier not ready yet. Did you forget to train the Object Count Classifier?')
            if not self.NumLabels.ready() or self.NumLabels.value != (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'+\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities([0]).wait()[0][0]) != (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'+\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' +\
                    'one training example for each class.')
        
        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(
            time_range, x_range, y_range, z_range,
            size_range, x_scale, y_scale, z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)
        
        if empty_frame:
            raise DatasetConstraintError('Tracking', 'Can not track frames with 0 objects, abort.')
              
        
        if avgSize[0] > 0:
            median_obj_size = avgSize
        
        logger.info( 'median_obj_size = {}'.format( median_obj_size ) )

        ep_gap = 0.05

        fov = pgmlink.FieldOfView(time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale,)
        
        logger.info( 'fov = {},{},{},{},{},{},{},{}'.format( time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale, ) )
        
        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (z_range[1]-1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if(self.consTracker == None or graph_building_parameter_changed):# or do_build_hypotheses_graph):

            foundAllArcs = False;
            new_max_nearest_neighbors = max_nearest_neighbors-1

            while not foundAllArcs:
                new_max_nearest_neighbors += 1
                logger.info( '\033[94m' +"make new graph"+  '\033[0m' )

                self.consTracker = pgmlink.ConsTracking(
                    maxObj,
                    sizeDependent,   # size_dependent_detection_prob
                    float(median_obj_size[0]), # median_object_size
                    float(maxDist),
                    withDivisions,
                    float(divThreshold),
                    "none",  # detection_rf_filename
                    fov,
                    "none", # dump traxelstore,
                    pgmlink.ConsTrackingSolverType.CplexSolver,
                    ndim)
                hypothesesGraph = self.consTracker.buildGraph(ts, new_max_nearest_neighbors)


                self.features = self.ObjectFeatures(range(0,self.LabelImage.meta.shape[0])).wait()

                foundAllArcs = True;
                if trainingToHardConstraints:

                    logger.info("Tracking: Adding Training Annotations to Hypotheses Graph")

                    # could be merged with code in structuredTrackingGui
                    self.consTracker.addLabels()

                    for cropKey in self.Annotations.value.keys():
                        if foundAllArcs:
                            crop = self.Annotations.value[cropKey]

                            if "labels" in crop.keys():
                                labels = crop["labels"]
                                for time in labels.keys():

                                    if not foundAllArcs:
                                        break

                                    for label in labels[time].keys():
                                        if not foundAllArcs:
                                            break

                                        trackSet = labels[time][label]
                                        center = self.features[time]['Default features']['RegionCenter'][label]
                                        trackCount = len(trackSet)

                                        for track in trackSet:

                                            if not foundAllArcs:
                                                logger.info("[opStructuredTracking] Increasing max nearest neighbors!")
                                                break

                                            # is this a FIRST, INTERMEDIATE, LAST, SINGLETON(FIRST_LAST) object of a track (or FALSE_DETECTION)
                                            type = self._type(cropKey, time, track) # returns [type, previous_label] if type=="LAST" or "INTERMEDIATE" (else [type])

                                            if type[0] == "LAST" or type[0] == "INTERMEDIATE":
                                                previous_label = int(type[1])
                                                previousTrackSet = labels[time-1][previous_label]
                                                intersectionSet = trackSet.intersection(previousTrackSet)
                                                trackCountIntersection = len(intersectionSet)

                                                foundAllArcs &= self.consTracker.addArcLabel(time-1, int(previous_label), int(label), float(trackCountIntersection))
                                                if not foundAllArcs:
                                                    logger.info("[opStructuredTracking] Increasing max nearest neighbors!")
                                                    break

                                        if type[0] == "FIRST":
                                            self.consTracker.addFirstLabels(time, int(label), float(trackCount))
                                            if time > self.Crops.value[cropKey]["time"][0]:
                                                self.consTracker.addDisappearanceLabel(time, int(label), 0.0)

                                        elif type[0] == "LAST":
                                            self.consTracker.addLastLabels(time, int(label), float(trackCount))
                                            if time < self.Crops.value[cropKey]["time"][1]:
                                                self.consTracker.addAppearanceLabel(time, int(label), 0.0)

                                        elif type[0] == "INTERMEDIATE":
                                            self.consTracker.addIntermediateLabels(time, int(label), float(trackCount))

                            if "divisions" in crop.keys():
                                divisions = crop["divisions"]
                                for track in divisions.keys():
                                    if not foundAllArcs:
                                        logger.info("[opStructuredTracking] Increasing max nearest neighbors!")
                                        break
                                    division = divisions[track]
                                    time = int(division[1])
                                    parent = int(self.getLabelInCrop(cropKey, time, track))

                                    if parent >=0:
                                        self.consTracker.addDivisionLabel(time, parent, 1.0)
                                        self.consTracker.addAppearanceLabel(time, parent, 1.0)
                                        self.consTracker.addDisappearanceLabel(time, parent, 1.0)

                                        child0 = int(self.getLabelInCrop(cropKey, time+1, division[0][0]))
                                        self.consTracker.addDisappearanceLabel(time+1, child0, 1.0)
                                        self.consTracker.addAppearanceLabel(time+1, child0, 1.0)
                                        foundAllArcs &= self.consTracker.addArcLabel(time, parent, child0, 1.0)
                                        if not foundAllArcs:
                                            logger.info("[opStructuredTracking] Increasing max nearest neighbors!")
                                            break

                                        child1 = int(self.getLabelInCrop(cropKey, time+1, division[0][1]))
                                        self.consTracker.addDisappearanceLabel(time+1, child1, 1.0)
                                        self.consTracker.addAppearanceLabel(time+1, child1, 1.0)
                                        foundAllArcs &= self.consTracker.addArcLabel(time, parent, child1, 1.0)
                                        if not foundAllArcs:
                                            logger.info("[opStructuredTracking] Increasing max nearest neighbors!")
                                            break


                logger.info("max nearest neighbors={}".format(new_max_nearest_neighbors))

        drawer = self.parent.parent.trackingApplet._gui.currentGui()._drawer
        if new_max_nearest_neighbors > max_nearest_neighbors:
            max_nearest_neighbors = new_max_nearest_neighbors
            drawer.maxNearestNeighborsSpinBox.setValue(max_nearest_neighbors)
            self.parent.parent.trackingApplet._gui.currentGui()._maxNearestNeighbors = max_nearest_neighbors

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        self.detectionWeight = drawer.detWeightBox.value()
        self.divisionWeight = drawer.divWeightBox.value()
        self.transitionWeight = drawer.transWeightBox.value()
        self.appearanceWeight = drawer.appearanceBox.value()
        self.disappearanceWeight = drawer.disappearanceBox.value()

        logger.info("detectionWeight= {}".format(self.detectionWeight))
        logger.info("divisionWeight={}".format(self.divisionWeight))
        logger.info("transitionWeight={}".format(self.transitionWeight))
        logger.info("appearanceWeight={}".format(self.appearanceWeight))
        logger.info("disappearanceWeight={}".format(self.disappearanceWeight))

        consTrackerParameters = self.consTracker.get_conservation_tracking_parameters(
                                        0,# forbidden_cost
                                        float(ep_gap),
                                        withTracklets,
                                        self.detectionWeight,
                                        self.divisionWeight,
                                        self.transitionWeight,
                                        self.disappearanceWeight,
                                        self.appearanceWeight,
                                        withMergerResolution,
                                        ndim,
                                        self.transition_parameter,
                                        borderAwareWidth,
                                        True, #with_constraints
                                        uncertaintyParams,
                                        cplex_timeout,
                                        None, # TransitionClassifier
                                        pgmlink.ConsTrackingSolverType.CplexSolver,
                                        trainingToHardConstraints,
                                        1) # default: False

        # will be needed for python defined TRANSITION function
        # consTrackerParameters.register_transition_func(self.track_transition_func)

        fixLabeledNodes = False

        try:
            eventsVector = self.consTracker.track(consTrackerParameters, fixLabeledNodes )

            eventsVector = eventsVector[0] # we have a vector such that we could get a vector per perturbation

            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()
                self._get_merger_coordinates(coordinate_map,
                                             time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.consTracker.resolve_mergers(
                    eventsVector,
                    consTrackerParameters,
                    coordinate_map.get(),
                    float(ep_gap),
                    transWeight,
                    withTracklets,
                    ndim,
                    self.transition_parameter,
                    max_traxel_id_at,
                    True, # with_constraints
                    None) # TransitionClassifier

        except Exception as e:
            if trainingToHardConstraints:
                raise Exception, 'Tracking: Your training can not be extended to a feasible solution! ' + \
                                 'Turn training to hard constraints off or correct your tracking training. '
            else:
                raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'
        
        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()

        merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui().layerstack.findMatchingIndex(lambda x: x.name == "Merger")
        tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui().layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
        if 'withMergerResolution' in parameters.keys() and not parameters['withMergerResolution']:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
        else:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
示例#7
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              rf_fn="none",
              app=500,
              dis=500,
              noiserate=0.10,
              noiseweight=100,
              use_rf=False,
              opp=100,
              forb=0,
              with_constr=True,
              fixed_detections=False,
              mdd=0,
              min_angle=0,
              ep_gap=0.2,
              n_neighbors=2,
              with_div=True,
              cplex_timeout=None):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        parameters = self.Parameters.value
        parameters['appearance'] = app
        parameters['disappearance'] = dis
        parameters['opportunity'] = opp
        parameters['noiserate'] = noiserate
        parameters['noiseweight'] = noiseweight
        parameters['epgap'] = ep_gap
        parameters['nneighbors'] = n_neighbors
        parameters['with_divisions'] = with_div
        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''

        det = noiseweight * (-1) * math.log(1 - noiserate)
        mdet = noiseweight * (-1) * math.log(noiserate)

        ts, empty_frame = self._generate_traxelstore(time_range, x_range,
                                                     y_range, z_range,
                                                     size_range, x_scale,
                                                     y_scale, z_scale)

        if empty_frame:
            raise Exception, 'Cannot track frames with 0 objects, abort.'

        tracker = pgmlink.ChaingraphTracking(rf_fn, app, dis, det, mdet,
                                             use_rf, opp, forb, with_constr,
                                             fixed_detections, mdd, min_angle,
                                             ep_gap, n_neighbors)

        tracker.set_with_divisions(with_div)
        if cplex_timeout:
            tracker.set_cplex_timeout(cplex_timeout)

        try:
            eventsVector = tracker(ts)
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)

        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
示例#8
0
    def track(self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range=(0, 100000),
            x_scale=1.0,
            y_scale=1.0,
            z_scale=1.0,
            maxDist=30,     
            maxObj=2,       
            divThreshold=0.5,
            avgSize=[0],                        
            withTracklets=False,
            sizeDependent=True,
            divWeight=10.0,
            transWeight=10.0,
            withDivisions=True,
            withOpticalCorrection=True,
            withClassifierPrior=False,
            ndim=3,
            cplex_timeout=None,
            withMergerResolution=True,
            borderAwareWidth = 0.0,
            withArmaCoordinates = True,
            appearance_cost = 500,
            disappearance_cost = 500,
            force_build_hypotheses_graph = False,
            withBatchProcessing = False
            ):
        
        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")
        
        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value
        parameters_changed = {}
        self._setParameter('maxDist', maxDist, parameters, parameters_changed)
        self._setParameter('maxObj', maxObj, parameters, parameters_changed)
        self._setParameter('divThreshold', divThreshold, parameters, parameters_changed)
        self._setParameter('avgSize', avgSize, parameters, parameters_changed)
        self._setParameter('withTracklets', withTracklets, parameters, parameters_changed)
        self._setParameter('sizeDependent', sizeDependent, parameters, parameters_changed)
        self._setParameter('divWeight', divWeight, parameters, parameters_changed)
        self._setParameter('transWeight', transWeight, parameters, parameters_changed)
        self._setParameter('withDivisions', withDivisions, parameters, parameters_changed)
        self._setParameter('withOpticalCorrection', withOpticalCorrection, parameters, parameters_changed)
        self._setParameter('withClassifierPrior', withClassifierPrior, parameters, parameters_changed)
        self._setParameter('withMergerResolution', withMergerResolution, parameters, parameters_changed)
        self._setParameter('borderAwareWidth', borderAwareWidth, parameters, parameters_changed)
        self._setParameter('withArmaCoordinates', withArmaCoordinates, parameters, parameters_changed)
        self._setParameter('appearanceCost', appearance_cost, parameters, parameters_changed)
        self._setParameter('disappearanceCost', disappearance_cost, parameters, parameters_changed)
        # if self._graphBuildingParameterChanged(parameters_changed):
        #     do_build_hypotheses_graph = True
        # else:
        #     do_build_hypotheses_graph = force_build_hypotheses_graph
        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)
        
        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise Exception, 'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' \
                    'one training example for each class.'
            if len(self.DetectionProbabilities([0]).wait()[0][0]) < (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' \
                    'one training example for each class.'            
        
        median_obj_size = [0]

        ts, empty_frame = self._generate_traxelstore(time_range, x_range, y_range, z_range, 
                                                                      size_range, x_scale, y_scale, z_scale, 
                                                                      median_object_size=median_obj_size, 
                                                                      with_div=withDivisions,
                                                                      with_opt_correction=withOpticalCorrection,
                                                                      with_classifier_prior=withClassifierPrior)
        
        if empty_frame:
            raise Exception, 'cannot track frames with 0 objects, abort.'
              
        
        if avgSize[0] > 0:
            median_obj_size = avgSize
        
        logger.info( 'median_obj_size = {}'.format( median_obj_size ) )

        ep_gap = 0.05
        transition_parameter = 5
        
        fov = pgmlink.FieldOfView(time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale,)
        
        logger.info( 'fov = {},{},{},{},{},{},{},{}'.format( time_range[0] * 1.0,
                                      x_range[0] * x_scale,
                                      y_range[0] * y_scale,
                                      z_range[0] * z_scale,
                                      time_range[-1] * 1.0,
                                      (x_range[1]-1) * x_scale,
                                      (y_range[1]-1) * y_scale,
                                      (z_range[1]-1) * z_scale, ) )
        
        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (z_range[1]-1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        if do_build_hypotheses_graph:
            print '\033[94m' +"make new graph"+  '\033[0m'
            self.tracker = pgmlink.ConsTracking(int(maxObj),
                                         bool(sizeDependent),   # size_dependent_detection_prob
                                         float(median_obj_size[0]), # median_object_size
                                         float(maxDist),
                                         bool(withDivisions),
                                         float(divThreshold),
                                         "none",  # detection_rf_filename
                                         fov,
                                         "none" # dump traxelstore
                                         )
            self.tracker.buildGraph(ts)
        
        try:
            eventsVector = self.tracker.track(0,       # forbidden_cost
                                            float(ep_gap), # ep_gap
                                            bool(withTracklets),
                                            float(divWeight),
                                            float(transWeight),
                                            float(disappearance_cost), # disappearance cost
                                            float(appearance_cost), # appearance cost
                                            int(ndim),
                                            float(transition_parameter),
                                            float(borderAwareWidth),
                                            True, #with_constraints
                                            float(cplex_timeout))
            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map,
                                             time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(eventsVector,
                                                coordinate_map.get(),
                                                float(ep_gap),
                                                float(transWeight),
                                                bool(withTracklets),
                                                int(ndim),
                                                float(transition_parameter),
                                                True, # with_constraints
                                                False) # with_multi_frame_moves
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)
        
        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'
        
        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()
        
        if not withBatchProcessing:
            merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui().layerstack.findMatchingIndex(lambda x: x.name == "Merger")
            tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui().layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
            if 'withMergerResolution' in parameters.keys() and not parameters['withMergerResolution']:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
            else:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              force_build_hypotheses_graph=False):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value
        parameters_changed = {}
        self._setParameter('maxDist', maxDist, parameters, parameters_changed)
        self._setParameter('maxObj', maxObj, parameters, parameters_changed)
        self._setParameter('divThreshold', divThreshold, parameters,
                           parameters_changed)
        self._setParameter('avgSize', avgSize, parameters, parameters_changed)
        self._setParameter('withTracklets', withTracklets, parameters,
                           parameters_changed)
        self._setParameter('sizeDependent', sizeDependent, parameters,
                           parameters_changed)
        self._setParameter('divWeight', divWeight, parameters,
                           parameters_changed)
        self._setParameter('transWeight', transWeight, parameters,
                           parameters_changed)
        self._setParameter('withDivisions', withDivisions, parameters,
                           parameters_changed)
        self._setParameter('withOpticalCorrection', withOpticalCorrection,
                           parameters, parameters_changed)
        self._setParameter('withClassifierPrior', withClassifierPrior,
                           parameters, parameters_changed)
        self._setParameter('withMergerResolution', withMergerResolution,
                           parameters, parameters_changed)
        self._setParameter('borderAwareWidth', borderAwareWidth, parameters,
                           parameters_changed)
        self._setParameter('withArmaCoordinates', withArmaCoordinates,
                           parameters, parameters_changed)
        self._setParameter('appearanceCost', appearance_cost, parameters,
                           parameters_changed)
        self._setParameter('disappearanceCost', disappearance_cost, parameters,
                           parameters_changed)
        # if self._graphBuildingParameterChanged(parameters_changed):
        #     do_build_hypotheses_graph = True
        # else:
        #     do_build_hypotheses_graph = force_build_hypotheses_graph
        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise Exception, 'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' \
                    'one training example for each class.'
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' \
                    'one training example for each class.'

        median_obj_size = [0]

        ts, empty_frame = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise Exception, 'cannot track frames with 0 objects, abort.'

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        if do_build_hypotheses_graph:
            print '\033[94m' + "make new graph" + '\033[0m'
            self.tracker = pgmlink.ConsTracking(
                maxObj,
                sizeDependent,  # size_dependent_detection_prob
                float(median_obj_size[0]),  # median_object_size
                float(maxDist),
                withDivisions,
                float(divThreshold),
                "none",  # detection_rf_filename
                fov,
                "none"  # dump traxelstore
            )
            self.tracker.buildGraph(ts)

        try:
            eventsVector = self.tracker.track(
                0,  # forbidden_cost
                float(ep_gap),  # ep_gap
                withTracklets,
                divWeight,
                transWeight,
                disappearance_cost,  # disappearance cost
                appearance_cost,  # appearance cost
                ndim,
                transition_parameter,
                borderAwareWidth,
                True,  #with_constraints
                cplex_timeout)
            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(
                    eventsVector,
                    coordinate_map.get(),
                    float(ep_gap),
                    transWeight,
                    withTracklets,
                    ndim,
                    transition_parameter,
                    True,  # with_constraints
                    False)  # with_multi_frame_moves
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()
        merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
        tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
        if 'withMergerResolution' in parameters.keys(
        ) and not parameters['withMergerResolution']:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
        else:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
示例#10
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              detWeight=10.0,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              graph_building_parameter_changed=True,
              trainingToHardConstraints=False,
              max_nearest_neighbors=1):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost

        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError(
                    'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value != (maxObj +
                                                                      1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'+\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) != (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'+\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' +\
                    'one training example for each class.')

        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise DatasetConstraintError(
                'Tracking', 'Can not track frames with 0 objects, abort.')

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if (self.consTracker == None or graph_building_parameter_changed
            ):  # or do_build_hypotheses_graph):

            foundAllArcs = False
            new_max_nearest_neighbors = max_nearest_neighbors - 1

            while not foundAllArcs:
                new_max_nearest_neighbors += 1
                logger.info('\033[94m' + "make new graph" + '\033[0m')

                self.consTracker = pgmlink.ConsTracking(
                    maxObj,
                    sizeDependent,  # size_dependent_detection_prob
                    float(median_obj_size[0]),  # median_object_size
                    float(maxDist),
                    withDivisions,
                    float(divThreshold),
                    "none",  # detection_rf_filename
                    fov,
                    "none",  # dump traxelstore,
                    pgmlink.ConsTrackingSolverType.CplexSolver,
                    ndim)
                hypothesesGraph = self.consTracker.buildGraph(
                    ts, new_max_nearest_neighbors)

                self.features = self.ObjectFeatures(
                    range(0, self.LabelImage.meta.shape[0])).wait()

                foundAllArcs = True
                if trainingToHardConstraints:

                    logger.info(
                        "Tracking: Adding Training Annotations to Hypotheses Graph"
                    )

                    # could be merged with code in structuredTrackingGui
                    self.consTracker.addLabels()

                    for cropKey in self.Annotations.value.keys():
                        if foundAllArcs:
                            crop = self.Annotations.value[cropKey]

                            if "labels" in crop.keys():
                                labels = crop["labels"]
                                for time in labels.keys():

                                    if not foundAllArcs:
                                        break

                                    for label in labels[time].keys():
                                        if not foundAllArcs:
                                            break

                                        trackSet = labels[time][label]
                                        center = self.features[time][
                                            'Default features'][
                                                'RegionCenter'][label]
                                        trackCount = len(trackSet)

                                        for track in trackSet:

                                            if not foundAllArcs:
                                                logger.info(
                                                    "[opStructuredTracking] Increasing max nearest neighbors!"
                                                )
                                                break

                                            # is this a FIRST, INTERMEDIATE, LAST, SINGLETON(FIRST_LAST) object of a track (or FALSE_DETECTION)
                                            type = self._type(
                                                cropKey, time, track
                                            )  # returns [type, previous_label] if type=="LAST" or "INTERMEDIATE" (else [type])

                                            if type[0] == "LAST" or type[
                                                    0] == "INTERMEDIATE":
                                                previous_label = int(type[1])
                                                previousTrackSet = labels[
                                                    time - 1][previous_label]
                                                intersectionSet = trackSet.intersection(
                                                    previousTrackSet)
                                                trackCountIntersection = len(
                                                    intersectionSet)

                                                foundAllArcs &= self.consTracker.addArcLabel(
                                                    time - 1,
                                                    int(previous_label),
                                                    int(label),
                                                    float(
                                                        trackCountIntersection)
                                                )
                                                if not foundAllArcs:
                                                    logger.info(
                                                        "[opStructuredTracking] Increasing max nearest neighbors!"
                                                    )
                                                    break

                                        if type[0] == "FIRST":
                                            self.consTracker.addFirstLabels(
                                                time, int(label),
                                                float(trackCount))
                                            if time > self.Crops.value[
                                                    cropKey]["time"][0]:
                                                self.consTracker.addDisappearanceLabel(
                                                    time, int(label), 0.0)

                                        elif type[0] == "LAST":
                                            self.consTracker.addLastLabels(
                                                time, int(label),
                                                float(trackCount))
                                            if time < self.Crops.value[
                                                    cropKey]["time"][1]:
                                                self.consTracker.addAppearanceLabel(
                                                    time, int(label), 0.0)

                                        elif type[0] == "INTERMEDIATE":
                                            self.consTracker.addIntermediateLabels(
                                                time, int(label),
                                                float(trackCount))

                            if "divisions" in crop.keys():
                                divisions = crop["divisions"]
                                for track in divisions.keys():
                                    if not foundAllArcs:
                                        logger.info(
                                            "[opStructuredTracking] Increasing max nearest neighbors!"
                                        )
                                        break
                                    division = divisions[track]
                                    time = int(division[1])
                                    parent = int(
                                        self.getLabelInCrop(
                                            cropKey, time, track))

                                    if parent >= 0:
                                        self.consTracker.addDivisionLabel(
                                            time, parent, 1.0)
                                        self.consTracker.addAppearanceLabel(
                                            time, parent, 1.0)
                                        self.consTracker.addDisappearanceLabel(
                                            time, parent, 1.0)

                                        child0 = int(
                                            self.getLabelInCrop(
                                                cropKey, time + 1,
                                                division[0][0]))
                                        self.consTracker.addDisappearanceLabel(
                                            time + 1, child0, 1.0)
                                        self.consTracker.addAppearanceLabel(
                                            time + 1, child0, 1.0)
                                        foundAllArcs &= self.consTracker.addArcLabel(
                                            time, parent, child0, 1.0)
                                        if not foundAllArcs:
                                            logger.info(
                                                "[opStructuredTracking] Increasing max nearest neighbors!"
                                            )
                                            break

                                        child1 = int(
                                            self.getLabelInCrop(
                                                cropKey, time + 1,
                                                division[0][1]))
                                        self.consTracker.addDisappearanceLabel(
                                            time + 1, child1, 1.0)
                                        self.consTracker.addAppearanceLabel(
                                            time + 1, child1, 1.0)
                                        foundAllArcs &= self.consTracker.addArcLabel(
                                            time, parent, child1, 1.0)
                                        if not foundAllArcs:
                                            logger.info(
                                                "[opStructuredTracking] Increasing max nearest neighbors!"
                                            )
                                            break

                logger.info("max nearest neighbors={}".format(
                    new_max_nearest_neighbors))

        drawer = self.parent.parent.trackingApplet._gui.currentGui()._drawer
        if new_max_nearest_neighbors > max_nearest_neighbors:
            max_nearest_neighbors = new_max_nearest_neighbors
            drawer.maxNearestNeighborsSpinBox.setValue(max_nearest_neighbors)
            self.parent.parent.trackingApplet._gui.currentGui(
            )._maxNearestNeighbors = max_nearest_neighbors

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(
            1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        self.detectionWeight = drawer.detWeightBox.value()
        self.divisionWeight = drawer.divWeightBox.value()
        self.transitionWeight = drawer.transWeightBox.value()
        self.appearanceWeight = drawer.appearanceBox.value()
        self.disappearanceWeight = drawer.disappearanceBox.value()

        logger.info("detectionWeight= {}".format(self.detectionWeight))
        logger.info("divisionWeight={}".format(self.divisionWeight))
        logger.info("transitionWeight={}".format(self.transitionWeight))
        logger.info("appearanceWeight={}".format(self.appearanceWeight))
        logger.info("disappearanceWeight={}".format(self.disappearanceWeight))

        consTrackerParameters = self.consTracker.get_conservation_tracking_parameters(
            0,  # forbidden_cost
            float(ep_gap),
            withTracklets,
            self.detectionWeight,
            self.divisionWeight,
            self.transitionWeight,
            self.disappearanceWeight,
            self.appearanceWeight,
            withMergerResolution,
            ndim,
            self.transition_parameter,
            borderAwareWidth,
            True,  #with_constraints
            uncertaintyParams,
            cplex_timeout,
            None,  # TransitionClassifier
            pgmlink.ConsTrackingSolverType.CplexSolver,
            trainingToHardConstraints,
            1)  # default: False

        # will be needed for python defined TRANSITION function
        # consTrackerParameters.register_transition_func(self.track_transition_func)

        fixLabeledNodes = False

        try:
            eventsVector = self.consTracker.track(consTrackerParameters,
                                                  fixLabeledNodes)

            eventsVector = eventsVector[
                0]  # we have a vector such that we could get a vector per perturbation

            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()
                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.consTracker.resolve_mergers(
                    eventsVector,
                    consTrackerParameters,
                    coordinate_map.get(),
                    float(ep_gap),
                    transWeight,
                    withTracklets,
                    ndim,
                    self.transition_parameter,
                    max_traxel_id_at,
                    True,  # with_constraints
                    None)  # TransitionClassifier

        except Exception as e:
            if trainingToHardConstraints:
                raise Exception, 'Tracking: Your training can not be extended to a feasible solution! ' + \
                                 'Turn training to hard constraints off or correct your tracking training. '
            else:
                raise Exception, 'Tracking terminated unsuccessfully: ' + str(
                    e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()

        merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
        tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
        if 'withMergerResolution' in parameters.keys(
        ) and not parameters['withMergerResolution']:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
        else:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable