def track(self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range=(0, 100000),
            x_scale=1.0,
            y_scale=1.0,
            z_scale=1.0,
            maxDist=30,     
            maxObj=2,       
            divThreshold=0.5,
            avgSize=[0],                        
            withTracklets=False,
            sizeDependent=True,
            detWeight=10.0,
            divWeight=10.0,
            transWeight=10.0,
            withDivisions=True,
            withOpticalCorrection=True,
            withClassifierPrior=False,
            ndim=3,
            cplex_timeout=None,
            withMergerResolution=True,
            borderAwareWidth = 0.0,
            withArmaCoordinates = True,
            appearance_cost = 500,
            disappearance_cost = 500,
            motionModelWeight=10.0,
            force_build_hypotheses_graph = False,
            max_nearest_neighbors = 1,
            numFramesPerSplit=0,
            withBatchProcessing = False,
            solverName="Flow-based",
            progressWindow=None,
            progressVisitor=CommandLineProgressVisitor()
            ):
        """
        Main conservation tracking function. Runs tracking solver, generates hypotheses graph, and resolves mergers.
        """

        self.progressWindow = progressWindow
        self.progressVisitor=progressVisitor
    
        if not self.Parameters.ready():
            self.raiseException(self.progressWindow, "Parameter slot is not ready")
        
        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['detWeight'] = detWeight
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost       
        parameters['scales'] = [x_scale, y_scale, z_scale]
        parameters['time_range'] = [min(time_range), max(time_range)]
        parameters['x_range'] = x_range
        parameters['y_range'] = y_range
        parameters['z_range'] = z_range
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['numFramesPerSplit'] = numFramesPerSplit
        parameters['solver'] = str(solverName)

        # Set a size range with a minimum area equal to the max number of objects (since the GMM throws an error if we try to fit more gaussians than the number of pixels in the object)
        size_range = (max(maxObj, size_range[0]), size_range[1])
        parameters['size_range'] = size_range

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)
        
        self.Parameters.setValue(parameters, check_changed=False)
        
        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0:
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'Classifier not ready yet. Did you forget to train the Object Count Classifier?')
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj + 1):
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities([0]).wait()[0][0]) < (maxObj + 1):
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')

        hypothesesGraph = self._createHypothesesGraph()
        hypothesesGraph.allowLengthOneTracks = True

        if withTracklets:
            hypothesesGraph = hypothesesGraph.generateTrackletGraph()

        hypothesesGraph.insertEnergies()
        trackingGraph = hypothesesGraph.toTrackingGraph()
        trackingGraph.convexifyCosts()
        model = trackingGraph.model
        model['settings']['allowLengthOneTracks'] = True

        detWeight = 10.0 # FIXME: Should we store this weight in the parameters slot?
        weights = trackingGraph.weightsListToDict([transWeight, detWeight, divWeight, appearance_cost, disappearance_cost])

        stepStr = solverName + " tracking solver"
        self.progressVisitor.showState(stepStr)
        self.progressVisitor.showProgress(0)

        if solverName == 'Flow-based' and dpct:
            if numFramesPerSplit:
                # Run solver with frame splits (split, solve, and stitch video to improve running-time)
                from hytra.core.splittracking import SplitTracking 
                result = SplitTracking.trackFlowBasedWithSplits(model, weights, numFramesPerSplit=numFramesPerSplit)
            else:
                # casting weights to float (raised TypeError on Windows before)
                weights['weights'] = [float(w) for w in weights['weights']]
                result = dpct.trackFlowBased(model, weights)

        elif solverName == 'ILP' and mht:
            result = mht.track(model, weights)
        else:
            raise ValueError("Invalid tracking solver selected")

        self.progressVisitor.showProgress(1.0)
        # Insert the solution into the hypotheses graph and from that deduce the lineages
        if hypothesesGraph:
            hypothesesGraph.insertSolution(result)
            
        # Merger resolution
        resolvedMergersDict = {}
        if withMergerResolution:
            stepStr = "Merger resolution"
            self.progressVisitor.showState(stepStr)
            resolvedMergersDict = self._resolveMergers(hypothesesGraph, model)

        # Set value of resolved mergers slot (Should be empty if mergers are disabled)
        self.ResolvedMergers.setValue(resolvedMergersDict, check_changed=False)
                
        # Computing tracking lineage IDs from within Hytra
        hypothesesGraph.computeLineage()

        if self.progressWindow is not None:
            self.progressWindow.onTrackDone()
        self.progressVisitor.showProgress(1.0)
        # Uncomment to export a hypothese graph diagram
        #logger.info("Exporting hypotheses graph diagram")
        #from hytra.util.hypothesesgraphdiagram import HypothesesGraphDiagram
        #hgv = HypothesesGraphDiagram(hypothesesGraph._graph, timeRange=(0, 10), fileName='HypothesesGraph.png' )
                
        # Set value of hypotheses grap slot (use referenceTraxelGraph if using tracklets)
        hypothesesGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        self.HypothesesGraph.setValue(hypothesesGraph, check_changed=False)

        # Set all the output slots dirty (See execute() function)
        self.Output.setDirty()
        self.MergerOutput.setDirty()
        self.RelabeledImage.setDirty()

        return result
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              detWeight=10.0,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              motionModelWeight=10.0,
              force_build_hypotheses_graph=False,
              max_nearest_neighbors=1,
              numFramesPerSplit=0,
              withBatchProcessing=False,
              solverName="Flow-based",
              progressWindow=None,
              progressVisitor=CommandLineProgressVisitor()):
        """
        Main conservation tracking function. Runs tracking solver, generates hypotheses graph, and resolves mergers.
        """

        self.progressWindow = progressWindow
        self.progressVisitor = progressVisitor

        if not self.Parameters.ready():
            self.raiseException(self.progressWindow,
                                "Parameter slot is not ready")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['detWeight'] = detWeight
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
        parameters['scales'] = [x_scale, y_scale, z_scale]
        parameters['time_range'] = [min(time_range), max(time_range)]
        parameters['x_range'] = x_range
        parameters['y_range'] = y_range
        parameters['z_range'] = z_range
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['numFramesPerSplit'] = numFramesPerSplit
        parameters['solver'] = str(solverName)

        # Set a size range with a minimum area equal to the max number of objects (since the GMM throws an error if we try to fit more gaussians than the number of pixels in the object)
        size_range = (max(maxObj, size_range[0]), size_range[1])
        parameters['size_range'] = size_range

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        self.Parameters.setValue(parameters, check_changed=False)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                self.raiseDatasetConstraintError(
                    self.progressWindow, 'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')

        hypothesesGraph = self._createHypothesesGraph()
        hypothesesGraph.allowLengthOneTracks = True

        if withTracklets:
            hypothesesGraph = hypothesesGraph.generateTrackletGraph()

        hypothesesGraph.insertEnergies()
        trackingGraph = hypothesesGraph.toTrackingGraph()
        trackingGraph.convexifyCosts()
        model = trackingGraph.model
        model['settings']['allowLengthOneTracks'] = True

        detWeight = 10.0  # FIXME: Should we store this weight in the parameters slot?
        weights = trackingGraph.weightsListToDict([
            transWeight, detWeight, divWeight, appearance_cost,
            disappearance_cost
        ])

        stepStr = solverName + " tracking solver"
        self.progressVisitor.showState(stepStr)
        self.progressVisitor.showProgress(0)

        if solverName == 'Flow-based' and dpct:
            if numFramesPerSplit:
                # Run solver with frame splits (split, solve, and stitch video to improve running-time)
                from hytra.core.splittracking import SplitTracking
                result = SplitTracking.trackFlowBasedWithSplits(
                    model, weights, numFramesPerSplit=numFramesPerSplit)
            else:
                # casting weights to float (raised TypeError on Windows before)
                weights['weights'] = [float(w) for w in weights['weights']]
                result = dpct.trackFlowBased(model, weights)

        elif solverName == 'ILP' and mht:
            result = mht.track(model, weights)
        else:
            raise ValueError("Invalid tracking solver selected")

        self.progressVisitor.showProgress(1.0)
        # Insert the solution into the hypotheses graph and from that deduce the lineages
        if hypothesesGraph:
            hypothesesGraph.insertSolution(result)

        # Merger resolution
        resolvedMergersDict = {}
        if withMergerResolution:
            stepStr = "Merger resolution"
            self.progressVisitor.showState(stepStr)
            resolvedMergersDict = self._resolveMergers(hypothesesGraph, model)

        # Set value of resolved mergers slot (Should be empty if mergers are disabled)
        self.ResolvedMergers.setValue(resolvedMergersDict, check_changed=False)

        # Computing tracking lineage IDs from within Hytra
        hypothesesGraph.computeLineage()

        if self.progressWindow is not None:
            self.progressWindow.onTrackDone()
        self.progressVisitor.showProgress(1.0)
        # Uncomment to export a hypothese graph diagram
        #logger.info("Exporting hypotheses graph diagram")
        #from hytra.util.hypothesesgraphdiagram import HypothesesGraphDiagram
        #hgv = HypothesesGraphDiagram(hypothesesGraph._graph, timeRange=(0, 10), fileName='HypothesesGraph.png' )

        # Set value of hypotheses grap slot (use referenceTraxelGraph if using tracklets)
        hypothesesGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        self.HypothesesGraph.setValue(hypothesesGraph, check_changed=False)

        # Set all the output slots dirty (See execute() function)
        self.Output.setDirty()
        self.MergerOutput.setDirty()
        self.RelabeledImage.setDirty()

        return result
Beispiel #3
0
    def _minCostMaxFlowMergerResolving(self,
                                       objectFeatures,
                                       transitionClassifier=None,
                                       transitionParameter=5.0):
        """
        Find the optimal assignments within the `resolvedGraph` by running min-cost max-flow from the
        `dpct` module.

        Converts the `resolvedGraph` to our JSON model structure, predicts the transition probabilities
        either using the given transitionClassifier, or using distance-based probabilities.

        **returns** a `nodeFlowMap` and `arcFlowMap` holding information on the usage of the respective nodes and links

        **Note:** cannot use `networkx` flow methods because they don't work with floating point weights.
        """

        trackingGraph = JsonTrackingGraph(progressVisitor=self.progressVisitor)
        for node in self.resolvedGraph.nodes_iter():
            additionalFeatures = {}
            additionalFeatures['nid'] = node

            # nodes with no in/out
            numStates = 2

            if len(self.resolvedGraph.in_edges(node)) == 0:
                # division nodes with no incoming arcs offer 2 units of flow without the need to de-merge
                if node in self.unresolvedGraph.nodes(
                ) and self.unresolvedGraph.node[node]['division'] and len(
                        self.unresolvedGraph.out_edges(node)) == 2:
                    numStates = 3
                additionalFeatures['appearanceFeatures'] = [
                    [i**2 * 0.01] for i in range(numStates)
                ]
            if len(self.resolvedGraph.out_edges(node)) == 0:
                assert (
                    numStates == 2
                )  # division nodes with no incoming should have outgoing, or they shouldn't show up in resolved graph
                additionalFeatures['disappearanceFeatures'] = [
                    [i**2 * 0.01] for i in range(numStates)
                ]

            features = [[i**2] for i in range(numStates)]
            uuid = trackingGraph.addDetectionHypotheses(
                features, **additionalFeatures)
            self.resolvedGraph.node[node]['id'] = uuid

        for edge in self.resolvedGraph.edges_iter():
            src = self.resolvedGraph.node[edge[0]]['id']
            dest = self.resolvedGraph.node[edge[1]]['id']

            featuresAtSrc = objectFeatures[edge[0]]
            featuresAtDest = objectFeatures[edge[1]]

            if transitionClassifier is not None:
                try:
                    featVec = self.pluginManager.applyTransitionFeatureVectorConstructionPlugins(
                        featuresAtSrc, featuresAtDest,
                        transitionClassifier.selectedFeatures)
                except:
                    getLogger().error(
                        "Could not compute transition features of link {}->{}:"
                        .format(src, dest))
                    getLogger().error(featuresAtSrc)
                    getLogger().error(featuresAtDest)
                    raise
                featVec = np.expand_dims(np.array(featVec), axis=0)
                probs = transitionClassifier.predictProbabilities(featVec)[0]
            else:
                dist = np.linalg.norm(featuresAtDest['RegionCenter'] -
                                      featuresAtSrc['RegionCenter'])
                prob = np.exp(-dist / transitionParameter)
                probs = [1.0 - prob, prob]

            trackingGraph.addLinkingHypotheses(src, dest,
                                               listify(negLog(probs)))

        # Set TraxelToUniqueId on resolvedGraph's json graph
        uuidToTraxelMap = {}
        traxelIdPerTimestepToUniqueIdMap = {}

        for node in self.resolvedGraph.nodes_iter():
            uuid = self.resolvedGraph.node[node]['id']
            uuidToTraxelMap[uuid] = [node]

            for t in uuidToTraxelMap[uuid]:
                traxelIdPerTimestepToUniqueIdMap.setdefault(str(t[0]), {})[str(
                    t[1])] = uuid

        trackingGraph.setTraxelToUniqueId(traxelIdPerTimestepToUniqueIdMap)

        # track
        import dpct

        weights = {"weights": [1, 1, 1, 1]}

        if not self.numSplits:
            mergerResult = dpct.trackMaxFlow(trackingGraph.model, weights)
        else:
            getLogger().info("Running split tracking with {} splits.".format(
                self.numSplits))
            mergerResult = SplitTracking.trackFlowBasedWithSplits(
                trackingGraph.model,
                weights,
                numSplits=self.numSplits,
                withMergerResolver=True)

        # transform results to dictionaries that can be indexed by id or (src,dest)
        nodeFlowMap = dict([(int(d['id']), int(d['value']))
                            for d in mergerResult['detectionResults']])
        arcFlowMap = dict([((int(l['src']), int(l['dest'])), int(l['value']))
                           for l in mergerResult['linkingResults']])

        return nodeFlowMap, arcFlowMap