def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the different steps.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    params = convertToDict(unknown)
    
    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject(options.ilastik_tracking_project)
    else:
        with open(options.weight_filename, 'r') as f:
            weights = json.load(f)

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")

        import hytra.core.probabilitygenerator as probabilitygenerator
        from hytra.core.ilastik_project_options import IlastikProjectOptions
        ilpOptions = IlastikProjectOptions()
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'

        ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000]

        if 'object-count-classifier-file' in params:
            ilpOptions.objectCountClassifierFilename = params[str('object-count-classifier-file')]
        else:
            ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project

        withDivisions = 'without-divisions' not in params
        if withDivisions:
            if 'division-classifier-file' in params:
                ilpOptions.divisionClassifierFilename = params[str('division-classifier-file')]
            else:
                ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project
        else:
            ilpOptions.divisionClassifierFilename = None

        probGenerator = probabilitygenerator.IlpProbabilityGenerator(ilpOptions, 
                                              pluginPaths=[str('../hytra/plugins')],
                                              useMultiprocessing=False)

        # if time_range is not None:
        #     traxelstore.timeRange = time_range

        probGenerator.fillTraxels(usePgmlink=False)
        fieldOfView = constructFov(probGenerator.shape,
                                   probGenerator.timeRange[0],
                                   probGenerator.timeRange[1],
                                   [probGenerator.x_scale,
                                   probGenerator.y_scale,
                                   probGenerator.z_scale])

        hypotheses_graph = IlastikHypothesesGraph(
            probabilityGenerator=probGenerator,
            timeRange=probGenerator.timeRange,
            maxNumObjects=int(params[str('max-number-objects')]),
            numNearestNeighbors=int(params[str('max-nearest-neighbors')]),
            fieldOfView=fieldOfView,
            withDivisions=withDivisions,
            divisionThreshold=0.1
        )

        withTracklets = True
        if withTracklets:
            hypotheses_graph = hypotheses_graph.generateTrackletGraph()

        hypotheses_graph.insertEnergies()
        trackingGraph = hypotheses_graph.toTrackingGraph()
    else:
        trackingGraph = JsonTrackingGraph(model_filename=options.model_filename)

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    # get model out of trackingGraph
    model = trackingGraph.model

    if options.do_tracking:
        logging.info("Run tracking...")
        if options.solver == "flow-based":
            result = dpct.trackFlowBased(model, weights)
        elif options.solver == "ilp":
            try:
                import multiHypoTracking_with_cplex as mht
            except ImportError:
                try:
                    import multiHypoTracking_with_gurobi as mht
                except ImportError:
                    raise ImportError("Could not find multi hypotheses tracking ilp solver")
            result = mht.track(model, weights)
            
        hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result)
        
        if hypotheses_graph:
            # insert the solution into the hypotheses graph and from that deduce the lineages
            hypotheses_graph.insertSolution(result)
            hypotheses_graph.computeLineage()

    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        trackingGraph = JsonTrackingGraph(model=model, result=result)
        merger_resolver = JsonMergerResolver(
            trackingGraph,
            ilpOptions.labelImageFilename,
            ilpOptions.labelImagePath,
            params[str('out-label-image-file')],
            ilpOptions.rawImageFilename,
            ilpOptions.rawImagePath,
            ilpOptions.rawImageAxes,
            [str('../hytra/plugins')],
            True)
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'
        merger_resolver.run(None,  None)
Example #2
0
    # parse command line
    args, unknown = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    getLogger().debug("Ignoring unknown parameters: {}".format(unknown))

    # make sure output directory exists
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # load graph and compute lineages
    getLogger().debug("Loading graph and result")
    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename,
                                      result_filename=args.result_filename)
    hypothesesGraph = trackingGraph.toHypothesesGraph()
    hypothesesGraph.computeLineage(1, 1, args.linksToNumNextFrames)

    mappings = {
    }  # dictionary over timeframes, containing another dict objectId -> trackId per frame
    tracks = {
    }  # stores a list of timeframes per track, so that we can find from<->to per track
    trackParents = {}  # store the parent trackID of a track if known
    gapTrackParents = {}

    for n in hypothesesGraph.nodeIterator():
        frameMapping = mappings.setdefault(n[0], {})
        if 'trackId' not in hypothesesGraph._graph.node[n]:
            raise ValueError(
                "You need to compute the Lineage of every node before accessing the trackId!"
Example #3
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='(Strictly!) Convexify the costs of a model to allow a flow-based solution',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c', '--config', is_config_file=True, help='config file path', dest='config_file')
    parser.add_argument('--graph-json-file', required=True, type=str, dest='model_filename',
                        help='Filename of the json model description')
    parser.add_argument('--out-json-file', default=None, type=str, dest='result_filename',
                        help='Filename of the json file containing the model with convexified costs.'
                        +' If None, it works in-place.')
    parser.add_argument('--epsilon', type=float, dest='epsilon', default=0.000001,
                        help='Epsilon is added to the gradient if the 1st derivative has a plateau.')
    parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)

    # parse command line
    args, unknown = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    getLogger().debug("Ignoring unknown parameters: {}".format(unknown))

    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename)
    trackingGraph.convexifyCosts(args.epsilon)

    if args.result_filename is None:
        args.result_filename = args.model_filename

    writeToFormattedJSON(args.result_filename, trackingGraph.model)
Example #4
0
                        help='Filename where to store the new result')
    parser.add_argument('--trans-par', dest='trans_par', type=float, default=5.0,
                        help='alpha for the transition prior')
    parser.add_argument('--verbose', dest='verbose', action='store_true',
                        help='Turn on verbose logging', default=False)
    parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',
                        default=[os.path.abspath('../hytra/plugins')],
                        help='A list of paths to search for plugins for the tracking pipeline.')
    args, _ = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    
    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename, result_filename=args.result_filename)

    merger_resolver = JsonMergerResolver(trackingGraph,
        args.label_image_filename,
        args.label_image_path,
        args.out_label_image,
        args.raw_filename,
        args.raw_path,
        args.raw_axes,
        args.pluginPaths,
        args.verbose)
    merger_resolver.run(
        args.transition_classifier_filename,
        args.transition_classifier_path)

    # save
Example #5
0
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the different steps.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    params = convertToDict(unknown)

    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject(
            options.ilastik_tracking_project)
    else:
        with open(options.weight_filename, 'r') as f:
            weights = json.load(f)

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")

        import hytra.core.probabilitygenerator as probabilitygenerator
        from hytra.core.ilastik_project_options import IlastikProjectOptions
        ilpOptions = IlastikProjectOptions()
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'

        ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000]

        if 'object-count-classifier-file' in params:
            ilpOptions.objectCountClassifierFilename = params[str(
                'object-count-classifier-file')]
        else:
            ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project

        withDivisions = 'without-divisions' not in params
        if withDivisions:
            if 'division-classifier-file' in params:
                ilpOptions.divisionClassifierFilename = params[str(
                    'division-classifier-file')]
            else:
                ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project
        else:
            ilpOptions.divisionClassifierFilename = None

        probGenerator = probabilitygenerator.IlpProbabilityGenerator(
            ilpOptions,
            pluginPaths=[str('../hytra/plugins')],
            useMultiprocessing=False)

        # if time_range is not None:
        #     traxelstore.timeRange = time_range

        probGenerator.fillTraxels(usePgmlink=False)
        fieldOfView = constructFov(
            probGenerator.shape, probGenerator.timeRange[0],
            probGenerator.timeRange[1], [
                probGenerator.x_scale, probGenerator.y_scale,
                probGenerator.z_scale
            ])

        hypotheses_graph = IlastikHypothesesGraph(
            probabilityGenerator=probGenerator,
            timeRange=probGenerator.timeRange,
            maxNumObjects=int(params[str('max-number-objects')]),
            numNearestNeighbors=int(params[str('max-nearest-neighbors')]),
            fieldOfView=fieldOfView,
            withDivisions=withDivisions,
            divisionThreshold=0.1)

        withTracklets = True
        if withTracklets:
            hypotheses_graph = hypotheses_graph.generateTrackletGraph()

        hypotheses_graph.insertEnergies()
        trackingGraph = hypotheses_graph.toTrackingGraph()
    else:
        trackingGraph = JsonTrackingGraph(
            model_filename=options.model_filename)

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    # get model out of trackingGraph
    model = trackingGraph.model

    if options.do_tracking:
        logging.info("Run tracking...")
        if options.solver == "flow-based":
            result = dpct.trackFlowBased(model, weights)
        elif options.solver == "ilp":
            try:
                import multiHypoTracking_with_cplex as mht
            except ImportError:
                try:
                    import multiHypoTracking_with_gurobi as mht
                except ImportError:
                    raise ImportError(
                        "Could not find multi hypotheses tracking ilp solver")
            result = mht.track(model, weights)

        hytra.core.jsongraph.writeToFormattedJSON(options.result_filename,
                                                  result)

        if hypotheses_graph:
            # insert the solution into the hypotheses graph and from that deduce the lineages
            hypotheses_graph.insertSolution(result)
            hypotheses_graph.computeLineage()

    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        trackingGraph = JsonTrackingGraph(model=model, result=result)
        merger_resolver = JsonMergerResolver(
            trackingGraph, ilpOptions.labelImageFilename,
            ilpOptions.labelImagePath, params[str('out-label-image-file')],
            ilpOptions.rawImageFilename, ilpOptions.rawImagePath,
            ilpOptions.rawImageAxes, [str('../hytra/plugins')], True)
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'
        merger_resolver.run(None, None)
Example #6
0
    def _minCostMaxFlowMergerResolving(self,
                                       objectFeatures,
                                       transitionClassifier=None,
                                       transitionParameter=5.0):
        """
        Find the optimal assignments within the `resolvedGraph` by running min-cost max-flow from the
        `dpct` module.

        Converts the `resolvedGraph` to our JSON model structure, predicts the transition probabilities
        either using the given transitionClassifier, or using distance-based probabilities.

        **returns** a `nodeFlowMap` and `arcFlowMap` holding information on the usage of the respective nodes and links

        **Note:** cannot use `networkx` flow methods because they don't work with floating point weights.
        """

        trackingGraph = JsonTrackingGraph(progressVisitor=self.progressVisitor)
        for node in self.resolvedGraph.nodes_iter():
            additionalFeatures = {}
            additionalFeatures['nid'] = node

            # nodes with no in/out
            numStates = 2

            if len(self.resolvedGraph.in_edges(node)) == 0:
                # division nodes with no incoming arcs offer 2 units of flow without the need to de-merge
                if node in self.unresolvedGraph.nodes(
                ) and self.unresolvedGraph.node[node]['division'] and len(
                        self.unresolvedGraph.out_edges(node)) == 2:
                    numStates = 3
                additionalFeatures['appearanceFeatures'] = [
                    [i**2 * 0.01] for i in range(numStates)
                ]
            if len(self.resolvedGraph.out_edges(node)) == 0:
                assert (
                    numStates == 2
                )  # division nodes with no incoming should have outgoing, or they shouldn't show up in resolved graph
                additionalFeatures['disappearanceFeatures'] = [
                    [i**2 * 0.01] for i in range(numStates)
                ]

            features = [[i**2] for i in range(numStates)]
            uuid = trackingGraph.addDetectionHypotheses(
                features, **additionalFeatures)
            self.resolvedGraph.node[node]['id'] = uuid

        for edge in self.resolvedGraph.edges_iter():
            src = self.resolvedGraph.node[edge[0]]['id']
            dest = self.resolvedGraph.node[edge[1]]['id']

            featuresAtSrc = objectFeatures[edge[0]]
            featuresAtDest = objectFeatures[edge[1]]

            if transitionClassifier is not None:
                try:
                    featVec = self.pluginManager.applyTransitionFeatureVectorConstructionPlugins(
                        featuresAtSrc, featuresAtDest,
                        transitionClassifier.selectedFeatures)
                except:
                    getLogger().error(
                        "Could not compute transition features of link {}->{}:"
                        .format(src, dest))
                    getLogger().error(featuresAtSrc)
                    getLogger().error(featuresAtDest)
                    raise
                featVec = np.expand_dims(np.array(featVec), axis=0)
                probs = transitionClassifier.predictProbabilities(featVec)[0]
            else:
                dist = np.linalg.norm(featuresAtDest['RegionCenter'] -
                                      featuresAtSrc['RegionCenter'])
                prob = np.exp(-dist / transitionParameter)
                probs = [1.0 - prob, prob]

            trackingGraph.addLinkingHypotheses(src, dest,
                                               listify(negLog(probs)))

        # Set TraxelToUniqueId on resolvedGraph's json graph
        uuidToTraxelMap = {}
        traxelIdPerTimestepToUniqueIdMap = {}

        for node in self.resolvedGraph.nodes_iter():
            uuid = self.resolvedGraph.node[node]['id']
            uuidToTraxelMap[uuid] = [node]

            for t in uuidToTraxelMap[uuid]:
                traxelIdPerTimestepToUniqueIdMap.setdefault(str(t[0]), {})[str(
                    t[1])] = uuid

        trackingGraph.setTraxelToUniqueId(traxelIdPerTimestepToUniqueIdMap)

        # track
        import dpct

        weights = {"weights": [1, 1, 1, 1]}

        if not self.numSplits:
            mergerResult = dpct.trackMaxFlow(trackingGraph.model, weights)
        else:
            getLogger().info("Running split tracking with {} splits.".format(
                self.numSplits))
            mergerResult = SplitTracking.trackFlowBasedWithSplits(
                trackingGraph.model,
                weights,
                numSplits=self.numSplits,
                withMergerResolver=True)

        # transform results to dictionaries that can be indexed by id or (src,dest)
        nodeFlowMap = dict([(int(d['id']), int(d['value']))
                            for d in mergerResult['detectionResults']])
        arcFlowMap = dict([((int(l['src']), int(l['dest'])), int(l['value']))
                           for l in mergerResult['linkingResults']])

        return nodeFlowMap, arcFlowMap
    def _minCostMaxFlowMergerResolving(self, objectFeatures, transitionClassifier=None, transitionParameter=5.0):
        """
        Find the optimal assignments within the `resolvedGraph` by running min-cost max-flow from the
        `dpct` module.

        Converts the `resolvedGraph` to our JSON model structure, predicts the transition probabilities
        either using the given transitionClassifier, or using distance-based probabilities.

        **returns** a `nodeFlowMap` and `arcFlowMap` holding information on the usage of the respective nodes and links

        **Note:** cannot use `networkx` flow methods because they don't work with floating point weights.
        """

        trackingGraph = JsonTrackingGraph()
        for node in self.resolvedGraph.nodes_iter():
            additionalFeatures = {}

            # nodes with no in/out
            numStates = 2
            if len(self.resolvedGraph.in_edges(node)) == 0:
                # division nodes with no incoming arcs offer 2 units of flow without the need to de-merge
                if node in self.unresolvedGraph.nodes() and self.unresolvedGraph.node[node]['division'] and len(self.unresolvedGraph.out_edges(node)) == 2:
                    numStates = 3
                additionalFeatures['appearanceFeatures'] = [[i**2 * 0.01] for i in range(numStates)]
            if len(self.resolvedGraph.out_edges(node)) == 0:
                assert(numStates == 2) # division nodes with no incoming should have outgoing, or they shouldn't show up in resolved graph
                additionalFeatures['disappearanceFeatures'] = [[i**2 * 0.01] for i in range(numStates)]

            features = [[i**2] for i in range(numStates)]
            uuid = trackingGraph.addDetectionHypotheses(features, **additionalFeatures)
            self.resolvedGraph.node[node]['id'] = uuid

        for edge in self.resolvedGraph.edges_iter():
            src = self.resolvedGraph.node[edge[0]]['id']
            dest = self.resolvedGraph.node[edge[1]]['id']

            featuresAtSrc = objectFeatures[edge[0]]
            featuresAtDest = objectFeatures[edge[1]]

            if transitionClassifier is not None:
                try:
                    featVec = self.pluginManager.applyTransitionFeatureVectorConstructionPlugins(
                        featuresAtSrc, featuresAtDest, transitionClassifier.selectedFeatures)
                except:
                    getLogger().error("Could not compute transition features of link {}->{}:".format(src, dest))
                    getLogger().error(featuresAtSrc)
                    getLogger().error(featuresAtDest)
                    raise
                featVec = np.expand_dims(np.array(featVec), axis=0)
                probs = transitionClassifier.predictProbabilities(featVec)[0]
            else:
                dist = np.linalg.norm(featuresAtDest['RegionCenter'] - featuresAtSrc['RegionCenter'])
                prob = np.exp(-dist / transitionParameter)
                probs = [1.0 - prob, prob]

            trackingGraph.addLinkingHypotheses(src, dest, listify(negLog(probs)))

        # track
        import dpct
        weights = {"weights": [1, 1, 1, 1]}
        mergerResult = dpct.trackMaxFlow(trackingGraph.model, weights)

        # transform results to dictionaries that can be indexed by id or (src,dest)
        nodeFlowMap = dict([(int(d['id']), int(d['value'])) for d in mergerResult['detectionResults']])
        arcFlowMap = dict([((int(l['src']), int(l['dest'])), int(l['value'])) for l in mergerResult['linkingResults']])

        return nodeFlowMap, arcFlowMap
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='(Strictly!) Convexify the costs of a model to allow a flow-based solution',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c', '--config', is_config_file=True, help='config file path', dest='config_file', required=True)
    parser.add_argument('--graph-json-file', required=True, type=str, dest='model_filename',
                        help='Filename of the json model description')
    parser.add_argument('--out-json-file', default=None, type=str, dest='result_filename',
                        help='Filename of the json file containing the model with convexified costs.'
                        +' If None, it works in-place.')
    parser.add_argument('--epsilon', type=float, dest='epsilon', default=0.000001,
                        help='Epsilon is added to the gradient if the 1st derivative has a plateau.')
    parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)

    # parse command line
    args, unknown = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    getLogger().debug("Ignoring unknown parameters: {}".format(unknown))

    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename)
    trackingGraph.convexifyCosts(args.epsilon)

    if args.result_filename is None:
        args.result_filename = args.model_filename

    writeToFormattedJSON(args.result_filename, trackingGraph.model)
    # parse command line
    args, unknown = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    getLogger().debug("Ignoring unknown parameters: {}".format(unknown))

    # make sure output directory exists
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # load graph and compute lineages
    getLogger().debug("Loading graph and result")
    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename, result_filename=args.result_filename)
    hypothesesGraph = trackingGraph.toHypothesesGraph()
    hypothesesGraph.computeLineage(1, 1, args.linksToNumNextFrames)

    mappings = {} # dictionary over timeframes, containing another dict objectId -> trackId per frame
    tracks = {} # stores a list of timeframes per track, so that we can find from<->to per track
    trackParents = {} # store the parent trackID of a track if known
    gapTrackParents = {}

    for n in hypothesesGraph.nodeIterator():
        frameMapping = mappings.setdefault(n[0], {})
        if 'trackId' not in hypothesesGraph._graph.node[n]:
            raise ValueError("You need to compute the Lineage of every node before accessing the trackId!")
        trackId = hypothesesGraph._graph.node[n]['trackId']
        if trackId is not None:
            frameMapping[n[1]] = trackId
Example #10
0
    def _minCostMaxFlowMergerResolving(self,
                                       objectFeatures,
                                       transitionClassifier=None,
                                       transitionParameter=5.0):
        """
        Find the optimal assignments within the `resolvedGraph` by running min-cost max-flow from the
        `dpct` module.

        Converts the `resolvedGraph` to our JSON model structure, predicts the transition probabilities
        either using the given transitionClassifier, or using distance-based probabilities.

        **returns** a `nodeFlowMap` and `arcFlowMap` holding information on the usage of the respective nodes and links

        **Note:** cannot use `networkx` flow methods because they don't work with floating point weights.
        """

        trackingGraph = JsonTrackingGraph()
        for node in self.resolvedGraph.nodes_iter():
            additionalFeatures = {}
            if len(self.resolvedGraph.in_edges(node)) == 0:
                additionalFeatures['appearanceFeatures'] = [[0], [0]]
            if len(self.resolvedGraph.out_edges(node)) == 0:
                additionalFeatures['disappearanceFeatures'] = [[0], [0]]
            uuid = trackingGraph.addDetectionHypotheses([[0], [1]],
                                                        **additionalFeatures)
            self.resolvedGraph.node[node]['id'] = uuid

        for edge in self.resolvedGraph.edges_iter():
            src = self.resolvedGraph.node[edge[0]]['id']
            dest = self.resolvedGraph.node[edge[1]]['id']

            featuresAtSrc = objectFeatures[edge[0]]
            featuresAtDest = objectFeatures[edge[1]]

            if transitionClassifier is not None:
                try:
                    featVec = self.pluginManager.applyTransitionFeatureVectorConstructionPlugins(
                        featuresAtSrc, featuresAtDest,
                        transitionClassifier.selectedFeatures)
                except:
                    getLogger().error(
                        "Could not compute transition features of link {}->{}:"
                        .format(src, dest))
                    getLogger().error(featuresAtSrc)
                    getLogger().error(featuresAtDest)
                    raise
                featVec = np.expand_dims(np.array(featVec), axis=0)
                probs = transitionClassifier.predictProbabilities(featVec)[0]
            else:
                dist = np.linalg.norm(featuresAtDest['RegionCenter'] -
                                      featuresAtSrc['RegionCenter'])
                prob = np.exp(-dist / transitionParameter)
                probs = [1.0 - prob, prob]

            trackingGraph.addLinkingHypotheses(src, dest,
                                               listify(negLog(probs)))

        # track
        import dpct
        weights = {"weights": [1, 1, 1, 1]}
        mergerResult = dpct.trackMaxFlow(trackingGraph.model, weights)

        # transform results to dictionaries that can be indexed by id or (src,dest)
        nodeFlowMap = dict([(int(d['id']), int(d['value']))
                            for d in mergerResult['detectionResults']])
        arcFlowMap = dict([((int(l['src']), int(l['dest'])), int(l['value']))
                           for l in mergerResult['linkingResults']])

        return nodeFlowMap, arcFlowMap