コード例 #1
0
        def export(self, filename, hypothesesGraph, pluginExportContext):
            """
            Export the tracking model and result

            :param filename: string of the FILE where to save the result (will be appended with _graph.json and _result.json)
            :param hypothesesGraph: hytra.core.hypothesesgraph.HypothesesGraph filled with a solution
            :param pluginExportContext: additional contextual info (here to adhere to the interface)

            :returns: True on success, False otherwise
            """

            # The nodes inserted into the graph after merger resolving
            # do not have detectionProbabilities set. We first find one node that was no merger,
            # check how many entries are needed, and then insert 0.0 as detection probabilities there everywhere.

            numStates = -1
            for n in hypothesesGraph.nodeIterator():
                t = hypothesesGraph._graph.node[n]['traxel']
                if 'detProb' in t.Features:
                    numStates = len(t.Features['detProb'])
                    break

            assert numStates > 0, "Cannot export hypotheses graph without features (e.g. only resolved mergers) to JSON"

            dummyVector = np.zeros(numStates)
            for n in hypothesesGraph.nodeIterator():
                t = hypothesesGraph._graph.node[n]['traxel']
                if 'detProb' not in t.Features:
                    logger.debug(
                        f"replacing detProb of node with ID={hypothesesGraph._graph.node[n]['id']}"
                    )
                    t.Features['detProb'] = dummyVector

            # now we can insert the energies into the graph
            hypothesesGraph.insertEnergies()
            trackingGraph = hypothesesGraph.toTrackingGraph()

            writeToFormattedJSON(filename + '_graph.json', trackingGraph.model)
            writeToFormattedJSON(filename + '_result.json',
                                 hypothesesGraph.getSolutionDictionary())

            return True
コード例 #2
0
        def export(self, filename, hypothesesGraph, objectFeaturesSlot, labelImageSlot, rawImageSlot):
            """
            Export the tracking model and result

            :param filename: string of the FILE where to save the result (will be appended with _graph.json and _result.json)
            :param hypothesesGraph: hytra.core.hypothesesgraph.HypothesesGraph filled with a solution
            :param objectFeaturesSlot: lazyflow.graph.InputSlot, connected to the RegionFeaturesAll output
                   of ilastik.applets.trackingFeatureExtraction.opTrackingFeatureExtraction.OpTrackingFeatureExtraction

            :returns: True on success, False otherwise
            """

            # The nodes inserted into the graph after merger resolving
            # do not have detectionProbabilities set. We first find one node that was no merger,
            # check how many entries are needed, and then insert 0.0 as detection probabilities there everywhere.

            numStates = -1
            for n in hypothesesGraph.nodeIterator():
                t = hypothesesGraph._graph.node[n]['traxel']
                if 'detProb' in t.Features:
                    numStates = len(t.Features['detProb'])
                    break

            assert numStates > 0, "Cannot export hypotheses graph without features (e.g. only resolved mergers) to JSON"

            dummyVector = np.zeros(numStates)
            for n in hypothesesGraph.nodeIterator():
                t = hypothesesGraph._graph.node[n]['traxel']
                if 'detProb' not in t.Features:
                    logger.debug(f"replacing detProb of node with ID={hypothesesGraph._graph.node[n]['id']}")
                    t.Features['detProb'] = dummyVector

            # now we can insert the energies into the graph
            hypothesesGraph.insertEnergies()
            trackingGraph = hypothesesGraph.toTrackingGraph()

            writeToFormattedJSON(filename + '_graph.json',  trackingGraph.model)
            writeToFormattedJSON(filename + '_result.json', hypothesesGraph.getSolutionDictionary())

            return True
コード例 #3
0
def mapGroundTruth(options, hypotheses_graph, trackingGraph, probGenerator):
    """
    If we were given a ground truth, we can map it to the graph and either train new classifiers,
    or run structured learning to find the optimal weights. 
    """
    getLogger().info("Map ground truth")
    jsonGT = probGenerator.findGroundTruthJaccardScoreAndMapping(
        hypotheses_graph, options.gt_label_image_file,
        options.gt_label_image_path, options.gt_text_file,
        options.gt_jaccard_threshold)

    if options.out_obj_count_classifier_file is not None and options.out_obj_count_classifier_path is not None:
        getLogger().info("Training Random Forest detection classifier")
        rf = hytra.jst.classifiertrainingexampleextractor.trainDetectionClassifier(
            hypotheses_graph, jsonGT, numSamples=100)
        rf.save(options.out_obj_count_classifier_file,
                options.out_obj_count_classifier_path)

        getLogger().info(
            "Quitting, because you probably want to set up a new graph using the new classifiers..."
        )
        sys.exit(0)

    try:
        import multiHypoTracking_with_cplex as mht
    except ImportError:
        try:
            import multiHypoTracking_with_gurobi as mht
        except ImportError:
            pass
    if mht:
        getLogger().info("Learn weights")
        weights = mht.train(trackingGraph.model, jsonGT)

        if options.learned_weights_json_filename is not None:
            writeToFormattedJSON(options.learned_weights_json_filename,
                                 weights)
        return weights
    return None
コード例 #4
0
def mapGroundTruth(options, hypotheses_graph, trackingGraph, probGenerator):
    """
    If we were given a ground truth, we can map it to the graph and either train new classifiers,
    or run structured learning to find the optimal weights. 
    """
    getLogger().info("Map ground truth")
    jsonGT = probGenerator.findGroundTruthJaccardScoreAndMapping(
        hypotheses_graph,
        options.gt_label_image_file,
        options.gt_label_image_path,
        options.gt_text_file,
        options.gt_jaccard_threshold
    )

    if options.out_obj_count_classifier_file is not None and options.out_obj_count_classifier_path is not None:
        getLogger().info("Training Random Forest detection classifier")
        rf = hytra.jst.classifiertrainingexampleextractor.trainDetectionClassifier(hypotheses_graph, jsonGT, numSamples=100)
        rf.save(options.out_obj_count_classifier_file, options.out_obj_count_classifier_path)

        getLogger().info("Quitting, because you probably want to set up a new graph using the new classifiers...")
        sys.exit(0)

    try:
        import multiHypoTracking_with_cplex as mht
    except ImportError:
        try:
            import multiHypoTracking_with_gurobi as mht
        except ImportError:
            pass
    if mht:
        getLogger().info("Learn weights")
        weights = mht.train(trackingGraph.model, jsonGT)

        if options.learned_weights_json_filename is not None:
            writeToFormattedJSON(options.learned_weights_json_filename, weights)
        return weights
    return None
コード例 #5
0
def runTracking(options, trackingGraph, weights=None):
    """
    Track the given graph with the given weights, if None the weights will be loaded from a json file.
    **Returns** the tracking result dictionary
    """

    getLogger().info("Run tracking...")
    if weights is None:
        getLogger().info("Loading weights from " +
                         options.weight_json_filename)
        with open(options.weight_json_filename, 'r') as f:
            weights = json.load(f)

        # if withDivisions:
        #     weights = {"weights" : [10, 10, 10, 500, 500]}
        # else:
        #     weights = {"weights" : [10, 10, 500, 500]}
    else:
        getLogger().info("Using learned weights!")

    if options.use_flow_solver:
        import dpct
        result = dpct.trackFlowBased(trackingGraph.model, weights)
    else:
        try:
            import multiHypoTracking_with_cplex as mht
        except ImportError:
            try:
                import multiHypoTracking_with_gurobi as mht
            except ImportError:
                raise ImportError("No version of ILP solver found")
        result = mht.track(trackingGraph.model, weights)

    if options.result_json_filename is not None:
        writeToFormattedJSON(options.result_json_filename, result)

    return result
コード例 #6
0
def runTracking(options, trackingGraph, weights=None):
    """
    Track the given graph with the given weights, if None the weights will be loaded from a json file.
    **Returns** the tracking result dictionary
    """

    getLogger().info("Run tracking...")
    if weights is None:
        getLogger().info("Loading weights from " + options.weight_json_filename)
        with open(options.weight_json_filename, 'r') as f:
            weights = json.load(f)

        # if withDivisions:
        #     weights = {"weights" : [10, 10, 10, 500, 500]}
        # else:
        #     weights = {"weights" : [10, 10, 500, 500]}
    else:
        getLogger().info("Using learned weights!")

    if options.use_flow_solver:
        import dpct
        result = dpct.trackFlowBased(trackingGraph.model, weights)
    else:
        try:
            import multiHypoTracking_with_cplex as mht
        except ImportError:
            try:
                import multiHypoTracking_with_gurobi as mht
            except ImportError:
                raise ImportError("No version of ILP solver found")
        result = mht.track(trackingGraph.model, weights)
    
    if options.result_json_filename is not None:
        writeToFormattedJSON(options.result_json_filename, result)

    return result
コード例 #7
0
    parser.add_argument('--verbose', dest='verbose', action='store_true',
                        help='Turn on verbose logging', default=False)
    parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',
                        default=[os.path.abspath('../hytra/plugins')],
                        help='A list of paths to search for plugins for the tracking pipeline.')
    args, _ = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    
    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename, result_filename=args.result_filename)

    merger_resolver = JsonMergerResolver(trackingGraph,
        args.label_image_filename,
        args.label_image_path,
        args.out_label_image,
        args.raw_filename,
        args.raw_path,
        args.raw_axes,
        args.pluginPaths,
        args.verbose)
    merger_resolver.run(
        args.transition_classifier_filename,
        args.transition_classifier_path)

    # save
    writeToFormattedJSON(args.out_model_filename, merger_resolver.model)
    writeToFormattedJSON(args.out_result, merger_resolver.result)
コード例 #8
0
ファイル: convexify_costs.py プロジェクト: tomldh/hytra
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='(Strictly!) Convexify the costs of a model to allow a flow-based solution',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c', '--config', is_config_file=True, help='config file path', dest='config_file')
    parser.add_argument('--graph-json-file', required=True, type=str, dest='model_filename',
                        help='Filename of the json model description')
    parser.add_argument('--out-json-file', default=None, type=str, dest='result_filename',
                        help='Filename of the json file containing the model with convexified costs.'
                        +' If None, it works in-place.')
    parser.add_argument('--epsilon', type=float, dest='epsilon', default=0.000001,
                        help='Epsilon is added to the gradient if the 1st derivative has a plateau.')
    parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)

    # parse command line
    args, unknown = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    getLogger().debug("Ignoring unknown parameters: {}".format(unknown))

    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename)
    trackingGraph.convexifyCosts(args.epsilon)

    if args.result_filename is None:
        args.result_filename = args.model_filename

    writeToFormattedJSON(args.result_filename, trackingGraph.model)
コード例 #9
0
def setupGraph(options):
    """
    Configure where to load raw data and classifiers from, then first find all objects and their features 
    and probabilities with the probabilitygenerator, and finally build a hypotheses graph and prepare it for tracking
    """
    ilpOptions = IlastikProjectOptions()
    ilpOptions.labelImagePath = options.label_image_paths[0]
    ilpOptions.labelImageFilename = options.label_image_files[0]

    ilpOptions.rawImagePath = options.raw_data_path
    ilpOptions.rawImageFilename = options.raw_data_file
    ilpOptions.rawImageAxes = options.raw_data_axes
    
    ilpOptions.sizeFilter = [10, 100000]
    ilpOptions.objectCountClassifierFilename = options.obj_count_classifier_file
    ilpOptions.objectCountClassifierPath = options.obj_count_classifier_path
    
    withDivisions = options.with_divisions
    if withDivisions:
        ilpOptions.divisionClassifierFilename = options.div_classifier_file
        ilpOptions.divisionClassifierPath = options.div_classifier_path
    else:
        ilpOptions.divisionClassifierFilename = None

    getLogger().info("Extracting traxels from images")
    probGenerator = probabilitygenerator.ConflictingSegmentsProbabilityGenerator(
        ilpOptions, 
        options.label_image_files[1:],
        options.label_image_paths[1:],
        pluginPaths=['../hytra/plugins'],
        useMultiprocessing=not options.disableMultiprocessing)

    # restrict range of timeframes used for learning and tracking
    if options.end_frame < 0:
        options.end_frame += probGenerator.timeRange[1] + 1
    assert(options.init_frame < probGenerator.timeRange[1])
    assert(options.end_frame <= probGenerator.timeRange[1])
    probGenerator.timeRange = (options.init_frame, options.end_frame)

    probGenerator.fillTraxels(usePgmlink=False)
    fieldOfView = constructFov(probGenerator.shape,
                            probGenerator.timeRange[0],
                            probGenerator.timeRange[1],
                            [probGenerator.x_scale,
                            probGenerator.y_scale,
                            probGenerator.z_scale])

    getLogger().info("Building hypotheses graph")
    hypotheses_graph = IlastikHypothesesGraph(
        probabilityGenerator=probGenerator,
        timeRange=probGenerator.timeRange,
        maxNumObjects=1,
        numNearestNeighbors=options.max_nearest_neighbors,
        fieldOfView=fieldOfView,
        withDivisions=withDivisions,
        divisionThreshold=0.1,
        maxNeighborDistance=options.max_neighbor_distance
    )

    # if options.with_tracklets:
    #     hypotheses_graph = hypotheses_graph.generateTrackletGraph()

    getLogger().info("Preparing for tracking")
    hypotheses_graph.insertEnergies()
    trackingGraph = hypotheses_graph.toTrackingGraph()
    
    if options.do_convexify or options.use_flow_solver:
        getLogger().info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    if options.graph_json_filename is not None:
        writeToFormattedJSON(options.graph_json_filename, trackingGraph.model)

    return fieldOfView, hypotheses_graph, ilpOptions, probGenerator, trackingGraph
コード例 #10
0
    parser.add_argument('--verbose', dest='verbose', action='store_true',
                        help='Turn on verbose logging', default=False)
    parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',
                        default=[os.path.abspath('../hytra/plugins')],
                        help='A list of paths to search for plugins for the tracking pipeline.')
    args, _ = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    
    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename, result_filename=args.result_filename)

    merger_resolver = JsonMergerResolver(trackingGraph,
        args.label_image_filename,
        args.label_image_path,
        args.out_label_image,
        args.raw_filename,
        args.raw_path,
        args.raw_axes,
        args.pluginPaths,
        args.verbose)
    merger_resolver.run(
        args.transition_classifier_filename,
        args.transition_classifier_path)

    # save
    writeToFormattedJSON(args.out_model_filename, merger_resolver.model)
    writeToFormattedJSON(args.out_result, merger_resolver.result)
コード例 #11
0
def setupGraph(options):
    """
    Configure where to load raw data and classifiers from, then first find all objects and their features 
    and probabilities with the probabilitygenerator, and finally build a hypotheses graph and prepare it for tracking
    """
    ilpOptions = IlastikProjectOptions()
    ilpOptions.labelImagePath = options.label_image_paths[0]
    ilpOptions.labelImageFilename = options.label_image_files[0]

    ilpOptions.rawImagePath = options.raw_data_path
    ilpOptions.rawImageFilename = options.raw_data_file
    ilpOptions.rawImageAxes = options.raw_data_axes
    
    ilpOptions.sizeFilter = [10, 100000]
    ilpOptions.objectCountClassifierFilename = options.obj_count_classifier_file
    ilpOptions.objectCountClassifierPath = options.obj_count_classifier_path
    
    withDivisions = options.with_divisions
    if withDivisions:
        ilpOptions.divisionClassifierFilename = options.div_classifier_file
        ilpOptions.divisionClassifierPath = options.div_classifier_path
    else:
        ilpOptions.divisionClassifierFilename = None

    getLogger().info("Extracting traxels from images")
    probGenerator = probabilitygenerator.ConflictingSegmentsProbabilityGenerator(
        ilpOptions, 
        options.label_image_files[1:],
        options.label_image_paths[1:],
        pluginPaths=['../hytra/plugins'],
        useMultiprocessing=not options.disableMultiprocessing)

    # restrict range of timeframes used for learning and tracking
    if options.end_frame < 0:
        options.end_frame += probGenerator.timeRange[1] + 1
    assert(options.init_frame < probGenerator.timeRange[1])
    assert(options.end_frame <= probGenerator.timeRange[1])
    probGenerator.timeRange = (options.init_frame, options.end_frame)

    probGenerator.fillTraxels(usePgmlink=False)
    fieldOfView = constructFov(probGenerator.shape,
                            probGenerator.timeRange[0],
                            probGenerator.timeRange[1],
                            [probGenerator.x_scale,
                            probGenerator.y_scale,
                            probGenerator.z_scale])

    getLogger().info("Building hypotheses graph")
    hypotheses_graph = IlastikHypothesesGraph(
        probabilityGenerator=probGenerator,
        timeRange=probGenerator.timeRange,
        maxNumObjects=1,
        numNearestNeighbors=options.max_nearest_neighbors,
        fieldOfView=fieldOfView,
        withDivisions=withDivisions,
        divisionThreshold=0.1,
        maxNeighborDistance=options.max_neighbor_distance
    )

    # if options.with_tracklets:
    #     hypotheses_graph = hypotheses_graph.generateTrackletGraph()

    getLogger().info("Preparing for tracking")
    hypotheses_graph.insertEnergies()
    trackingGraph = hypotheses_graph.toTrackingGraph()
    
    if options.do_convexify or options.use_flow_solver:
        getLogger().info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    if options.graph_json_filename is not None:
        writeToFormattedJSON(options.graph_json_filename, trackingGraph.model)

    return fieldOfView, hypotheses_graph, ilpOptions, probGenerator, trackingGraph
コード例 #12
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='(Strictly!) Convexify the costs of a model to allow a flow-based solution',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c', '--config', is_config_file=True, help='config file path', dest='config_file', required=True)
    parser.add_argument('--graph-json-file', required=True, type=str, dest='model_filename',
                        help='Filename of the json model description')
    parser.add_argument('--out-json-file', default=None, type=str, dest='result_filename',
                        help='Filename of the json file containing the model with convexified costs.'
                        +' If None, it works in-place.')
    parser.add_argument('--epsilon', type=float, dest='epsilon', default=0.000001,
                        help='Epsilon is added to the gradient if the 1st derivative has a plateau.')
    parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)

    # parse command line
    args, unknown = parser.parse_known_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    getLogger().debug("Ignoring unknown parameters: {}".format(unknown))

    trackingGraph = JsonTrackingGraph(model_filename=args.model_filename)
    trackingGraph.convexifyCosts(args.epsilon)

    if args.result_filename is None:
        args.result_filename = args.model_filename

    writeToFormattedJSON(args.result_filename, trackingGraph.model)