Exemplo n.º 1
0
def initializeConservationTracking(options, shape, t0, t1):
    import pgmlink as track
    ndim = 2 if shape[-1] == 1 else 3
    rf_fn = 'none'
    if options.rf_fn:
        rf_fn = options.rf_fn

    fov = getFovFromOptions(options, shape, t0, t1)
    if ndim == 2:
        [xshape, yshape, zshape] = shape
        assert options.z_scale * (
            zshape - 1) == 0, "fov of z must be (0,0) if ndim == 2"

    if options.method == 'conservation':
        tracker = track.ConsTracking(
            int(options.max_num_objects),
            bool(options.size_dependent_detection_prob),
            options.avg_obj_size[0], options.mnd,
            not bool(options.without_divisions), options.division_threshold,
            str(rf_fn), fov, str("none"),
            track.ConsTrackingSolverType.CplexSolver, ndim)
    elif options.method == 'conservation-dynprog':
        tracker = track.ConsTracking(
            int(options.max_num_objects),
            bool(options.size_dependent_detection_prob),
            options.avg_obj_size[0], options.mnd,
            not bool(options.without_divisions), options.division_threshold,
            str(rf_fn), fov, str("none"),
            track.ConsTrackingSolverType.DynProgSolver, ndim)
    else:
        raise ValueError("Must be conservation or conservation-dynprog")
    return tracker, fov
Exemplo n.º 2
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              detWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              motionModelWeight=10.0,
              force_build_hypotheses_graph=False,
              max_nearest_neighbors=1,
              withBatchProcessing=False,
              solverName="ILP",
              numFramesPerSplit=0):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        if numFramesPerSplit != 0:
            raise Exception(
                "PGMLINK tracking does not support sliding window tracking")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['detWeight'] = detWeight
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['numFramesPerSplit'] = numFramesPerSplit

        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError(
                    'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')

        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise DatasetConstraintError(
                'Tracking', 'Can not track frames with 0 objects, abort.')

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        solverType = self.getPgmlinkSolverType(solverName)

        if do_build_hypotheses_graph:
            print '\033[94m' + "make new graph" + '\033[0m'
            self.tracker = pgmlink.ConsTracking(
                int(maxObj),
                bool(sizeDependent),  # size_dependent_detection_prob
                float(median_obj_size[0]),  # median_object_size
                float(maxDist),
                bool(withDivisions),
                float(divThreshold),
                "none",  # detection_rf_filename
                fov,
                "none",  # dump traxelstore,
                solverType,
                ndim)
            g = self.tracker.buildGraph(ts, max_nearest_neighbors)

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(
            1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        params = self.tracker.get_conservation_tracking_parameters(
            0,  # forbidden_cost
            float(ep_gap),  # ep_gap
            bool(withTracklets),  # with tracklets
            float(detWeight),  # detection weight
            float(divWeight),  # division weight
            float(transWeight),  # transition weight
            float(disappearance_cost),  # disappearance cost
            float(appearance_cost),  # appearance cost
            bool(withMergerResolution),  # with merger resolution
            int(ndim),  # ndim
            float(transition_parameter),  # transition param
            float(borderAwareWidth),  # border width
            True,  #with_constraints
            uncertaintyParams,  # uncertainty parameters
            float(cplex_timeout),  # cplex timeout
            None,  # transition classifier
            solverType,
            False,  # training to hard constraints
            1  # num threads
        )

        # if motionModelWeight > 0:
        #     logger.info("Registering motion model with weight {}".format(motionModelWeight))
        #     params.register_motion_model4_func(swirl_motion_func_creator(motionModelWeight), motionModelWeight * 25.0)

        try:
            eventsVector = self.tracker.track(params, False)

            eventsVector = eventsVector[
                0]  # we have a vector such that we could get a vector per perturbation

            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(
                    eventsVector,
                    params,
                    coordinate_map.get(),
                    float(ep_gap),
                    float(transWeight),
                    bool(withTracklets),
                    ndim,
                    transition_parameter,
                    max_traxel_id_at,
                    True,  # with_constraints
                    None)  # TransitionClassifier

        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()

        if not withBatchProcessing:
            merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
            ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
            tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
            ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
            if 'withMergerResolution' in parameters.keys(
            ) and not parameters['withMergerResolution']:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
            else:
                self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                    self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
Exemplo n.º 3
0
def track_subgraphs(graph,
                    time_range,
                    timesteps_per_segment,
                    segment_overlap_timesteps,
                    conservation_tracking_parameter,
                    fov,
                    ilp_fn,
                    ts, 
                    fs,
                    t0,
                    trans_classifier,
                    uncertaintyParam
                    ):
    """
    Experiment: track only subgraphs of the full hypotheses graph with some overlap,
    and then stitch the results together using fusion moves.
    """
    # define which segments we have
    num_segments = int(np.ceil(float((time_range[1] - time_range[0])) / (timesteps_per_segment - segment_overlap_timesteps)))
    segments = [(time_range[0] + i * (timesteps_per_segment - segment_overlap_timesteps),
                 (time_range[0] + (i + 1) * timesteps_per_segment - i * segment_overlap_timesteps))
                for i in xrange(num_segments)]

    tmap = graph.getNodeTraxelMap()
    solutions = {}
    arc_solutions = {}
    div_solutions = {}

    original_out_dir = options.out_dir

    # track all segments individually
    for i, segment in enumerate(segments):
        print("************** Creating subgraph for timesteps in {}".format(segment))

        # use special out-dir per window
        options.out_dir = original_out_dir.rstrip('/') + '/window_' + str(i) + '/'
        try:
            os.makedirs(options.out_dir)
        except:
            pass

        # create subgraph for this segment
        node_mask = track.NodeMask(graph)
        n_it = track.NodeIt(graph)
        for n in n_it:
            node_mask[n] = segment[0] <= tmap[n].Timestep < segment[1]

        arc_mask = track.ArcMask(graph)
        a_it = track.ArcIt(graph)
        for a in a_it:
            arc_mask[a] = tmap[graph.source(a)].Timestep >= segment[0] and tmap[graph.target(a)].Timestep < segment[1]
        subgraph = track.HypothesesGraph()
        track.copy_hypotheses_subgraph(graph, subgraph, node_mask, arc_mask)
        subgraph_node_origin_map = subgraph.getNodeOriginReferenceMap()
        subgraph_arc_origin_map = subgraph.getArcOriginReferenceMap()
        subgraph.initLabelingMaps()

        # fix variables in overlap
        if i > 0:
            sub_tmap = subgraph.getNodeTraxelMap()
            n_it = track.NodeIt(subgraph)
            for n in n_it:
                if segment[0] == sub_tmap[n].Timestep:
                    origin_node = subgraph_node_origin_map[n]
                    origin_node_id = graph.id(origin_node)
                    subgraph.addAppearanceLabel(n, solutions[origin_node_id][-1])
                    print "fixing node ", origin_node_id, " which is ", subgraph.id(n), " in subgraph"

        print("Subgraph has {} nodes and {} arcs".format(track.countNodes(subgraph), track.countArcs(subgraph)))

        # create subgraph tracker
        subgraph_tracker = track.ConsTracking(subgraph,
                                                ts,
                                                conservation_tracking_parameter,
                                                uncertaintyParam,
                                                fov,
                                                bool(options.size_dependent_detection_prob),
                                                options.avg_obj_size[0],
                                                options.mnd,
                                                options.division_threshold)
        all_events = subgraph_tracker.track(conservation_tracking_parameter, bool(i > 0))

        if len(options.raw_filename) > 0 and len(options.reranker_weight_file) > 0:
            # run merger resolving and feature extraction, which also returns the score of each proposal
            region_features = multitrack.getRegionFeatures(ndim)
            scores = multitrack.runMergerResolving(options, 
                subgraph_tracker, 
                ts,
                fs,
                subgraph,
                ilp_fn,
                all_events,
                fov,
                region_features,
                trans_classifier,
                segment[0],
                True)

            best_sol_idx = int(np.argmax(np.array(scores)))
            subgraph.set_solution(best_sol_idx)
            print("====> selected solution {} in window {} <=====".format(best_sol_idx, i))
        else:
            subgraph.set_solution(0)
        print("Done tracking subgraph")

        # collect solutions
        subgraph_node_active_map = subgraph.getNodeActiveMap()
        subgraph_arc_active_map = subgraph.getArcActiveMap()
        subgraph_div_active_map = subgraph.getDivisionActiveMap()

        n_it = track.NodeIt(subgraph)
        for n in n_it:
            origin_node = subgraph_node_origin_map[n]
            origin_node_id = graph.id(origin_node)
            value = subgraph_node_active_map[n]

            if not origin_node_id in solutions:
                solutions[origin_node_id] = [value]
            else:
                solutions[origin_node_id].append(value)
            div_solutions[origin_node_id] = subgraph_div_active_map[n]
        a_it = track.ArcIt(subgraph)
        for a in a_it:
            origin_arc = subgraph_arc_origin_map[a]
            origin_arc_id = graph.id(origin_arc)
            arc_solutions[origin_arc_id] = subgraph_arc_active_map[a]
        print("Done storing solutions")

    # reset out-dir
    options.out_dir = original_out_dir

    # find overlapping variables
    print("Computing overlap statistics...")
    num_overlap_vars = sum([1 for values in solutions.values() if len(values) > 1])
    num_disagreeing_overlap_vars = sum([1 for values in solutions.values() if len(values) > 1 and values[0] != values[1]])

    for key, values in solutions.items():
        if len(values) > 1 and values[0] != values[1]:
            print("\tFound disagreement at {}: {} != {}".format(key, values[0], values[1]))

    print("Found {} variables in overlaps, of which {} did disagree ({}%)".format(num_overlap_vars,
                                                                                  num_disagreeing_overlap_vars,
                                                                                  100.0 * float(num_disagreeing_overlap_vars) / num_overlap_vars))
    
    if num_disagreeing_overlap_vars == 0:
        # write overall solution back to hypotheses graph
        graph.initLabelingMaps()
        n_it = track.NodeIt(graph)
        for n in n_it:
            n_id = graph.id(n)

            graph.addAppearanceLabel(n, solutions[n_id][-1])
            graph.addDisappearanceLabel(n, solutions[n_id][-1])

            # store division information
            graph.addDivisionLabel(n, div_solutions[n_id])

        # activate arcs
        a_it = track.ArcIt(graph)
        for a in a_it:
            a_id = graph.id(a)
            graph.addArcLabel(a, arc_solutions[a_id])
        graph.set_injected_solution()
    else:
        raise AssertionError("Nodes did disagree, cannot create stitched solution")
Exemplo n.º 4
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        parameters = self.Parameters.value
        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise Exception, 'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
            if not self.NumLabels.ready() or self.NumLabels.value != (maxObj +
                                                                      1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' \
                    'one training example for each class.'
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) != (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' \
                    'one training example for each class.'

        median_obj_size = [0]

        coordinate_map = pgmlink.TimestepIdCoordinateMap()
        if withArmaCoordinates:
            coordinate_map.initialize()
        ts, empty_frame = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_coordinate_list=
            withMergerResolution,  # no vigra coordinate list, that is done by arma
            with_classifier_prior=withClassifierPrior,
            coordinate_map=coordinate_map)

        if empty_frame:
            raise Exception, 'cannot track frames with 0 objects, abort.'

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        tracker = pgmlink.ConsTracking(
            maxObj,
            float(maxDist),
            float(divThreshold),
            "none",  # detection_rf_filename
            sizeDependent,  # size_dependent_detection_prob
            0,  # forbidden_cost
            float(ep_gap),  # ep_gap
            float(median_obj_size[0]),  # median_object_size
            withTracklets,
            divWeight,
            transWeight,
            withDivisions,
            disappearance_cost,  # disappearance cost
            appearance_cost,  # appearance cost
            withMergerResolution,
            ndim,
            transition_parameter,
            borderAwareWidth,
            fov,
            True,  #with_constraints
            cplex_timeout,
            "none"  # dump traxelstore
        )

        try:
            eventsVector = tracker(ts, coordinate_map.get())
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              force_build_hypotheses_graph=False):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value
        parameters_changed = {}
        self._setParameter('maxDist', maxDist, parameters, parameters_changed)
        self._setParameter('maxObj', maxObj, parameters, parameters_changed)
        self._setParameter('divThreshold', divThreshold, parameters,
                           parameters_changed)
        self._setParameter('avgSize', avgSize, parameters, parameters_changed)
        self._setParameter('withTracklets', withTracklets, parameters,
                           parameters_changed)
        self._setParameter('sizeDependent', sizeDependent, parameters,
                           parameters_changed)
        self._setParameter('divWeight', divWeight, parameters,
                           parameters_changed)
        self._setParameter('transWeight', transWeight, parameters,
                           parameters_changed)
        self._setParameter('withDivisions', withDivisions, parameters,
                           parameters_changed)
        self._setParameter('withOpticalCorrection', withOpticalCorrection,
                           parameters, parameters_changed)
        self._setParameter('withClassifierPrior', withClassifierPrior,
                           parameters, parameters_changed)
        self._setParameter('withMergerResolution', withMergerResolution,
                           parameters, parameters_changed)
        self._setParameter('borderAwareWidth', borderAwareWidth, parameters,
                           parameters_changed)
        self._setParameter('withArmaCoordinates', withArmaCoordinates,
                           parameters, parameters_changed)
        self._setParameter('appearanceCost', appearance_cost, parameters,
                           parameters_changed)
        self._setParameter('disappearanceCost', disappearance_cost, parameters,
                           parameters_changed)
        # if self._graphBuildingParameterChanged(parameters_changed):
        #     do_build_hypotheses_graph = True
        # else:
        #     do_build_hypotheses_graph = force_build_hypotheses_graph
        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise Exception, 'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' \
                    'one training example for each class.'
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                raise Exception, 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' \
                    'one training example for each class.'

        median_obj_size = [0]

        ts, empty_frame = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise Exception, 'cannot track frames with 0 objects, abort.'

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05
        transition_parameter = 5

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if self.tracker is None:
            do_build_hypotheses_graph = True

        if do_build_hypotheses_graph:
            print '\033[94m' + "make new graph" + '\033[0m'
            self.tracker = pgmlink.ConsTracking(
                maxObj,
                sizeDependent,  # size_dependent_detection_prob
                float(median_obj_size[0]),  # median_object_size
                float(maxDist),
                withDivisions,
                float(divThreshold),
                "none",  # detection_rf_filename
                fov,
                "none"  # dump traxelstore
            )
            self.tracker.buildGraph(ts)

        try:
            eventsVector = self.tracker.track(
                0,  # forbidden_cost
                float(ep_gap),  # ep_gap
                withTracklets,
                divWeight,
                transWeight,
                disappearance_cost,  # disappearance cost
                appearance_cost,  # appearance cost
                ndim,
                transition_parameter,
                borderAwareWidth,
                True,  #with_constraints
                cplex_timeout)
            # extract the coordinates with the given event vector
            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()

                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.tracker.resolve_mergers(
                    eventsVector,
                    coordinate_map.get(),
                    float(ep_gap),
                    transWeight,
                    withTracklets,
                    ndim,
                    transition_parameter,
                    True,  # with_constraints
                    False)  # with_multi_frame_moves
        except Exception as e:
            raise Exception, 'Tracking terminated unsuccessfully: ' + str(e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()
        merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
        tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
        if 'withMergerResolution' in parameters.keys(
        ) and not parameters['withMergerResolution']:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
        else:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable
Exemplo n.º 6
0
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              detWeight=10.0,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              graph_building_parameter_changed=True,
              trainingToHardConstraints=False,
              max_nearest_neighbors=1):

        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost

        do_build_hypotheses_graph = True

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError(
                    'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value != (maxObj +
                                                                      1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'+\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) != (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n'+\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least' +\
                    'one training example for each class.')

        median_obj_size = [0]

        fs, ts, empty_frame, max_traxel_id_at = self._generate_traxelstore(
            time_range,
            x_range,
            y_range,
            z_range,
            size_range,
            x_scale,
            y_scale,
            z_scale,
            median_object_size=median_obj_size,
            with_div=withDivisions,
            with_opt_correction=withOpticalCorrection,
            with_classifier_prior=withClassifierPrior)

        if empty_frame:
            raise DatasetConstraintError(
                'Tracking', 'Can not track frames with 0 objects, abort.')

        if avgSize[0] > 0:
            median_obj_size = avgSize

        logger.info('median_obj_size = {}'.format(median_obj_size))

        ep_gap = 0.05

        fov = pgmlink.FieldOfView(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        )

        logger.info('fov = {},{},{},{},{},{},{},{}'.format(
            time_range[0] * 1.0,
            x_range[0] * x_scale,
            y_range[0] * y_scale,
            z_range[0] * z_scale,
            time_range[-1] * 1.0,
            (x_range[1] - 1) * x_scale,
            (y_range[1] - 1) * y_scale,
            (z_range[1] - 1) * z_scale,
        ))

        if ndim == 2:
            assert z_range[0] * z_scale == 0 and (
                z_range[1] -
                1) * z_scale == 0, "fov of z must be (0,0) if ndim==2"

        if (self.consTracker == None or graph_building_parameter_changed
            ):  # or do_build_hypotheses_graph):

            foundAllArcs = False
            new_max_nearest_neighbors = max_nearest_neighbors - 1

            while not foundAllArcs:
                new_max_nearest_neighbors += 1
                logger.info('\033[94m' + "make new graph" + '\033[0m')

                self.consTracker = pgmlink.ConsTracking(
                    maxObj,
                    sizeDependent,  # size_dependent_detection_prob
                    float(median_obj_size[0]),  # median_object_size
                    float(maxDist),
                    withDivisions,
                    float(divThreshold),
                    "none",  # detection_rf_filename
                    fov,
                    "none",  # dump traxelstore,
                    pgmlink.ConsTrackingSolverType.CplexSolver,
                    ndim)
                hypothesesGraph = self.consTracker.buildGraph(
                    ts, new_max_nearest_neighbors)

                self.features = self.ObjectFeatures(
                    range(0, self.LabelImage.meta.shape[0])).wait()

                foundAllArcs = True
                if trainingToHardConstraints:

                    logger.info(
                        "Tracking: Adding Training Annotations to Hypotheses Graph"
                    )

                    # could be merged with code in structuredTrackingGui
                    self.consTracker.addLabels()

                    for cropKey in self.Annotations.value.keys():
                        if foundAllArcs:
                            crop = self.Annotations.value[cropKey]

                            if "labels" in crop.keys():
                                labels = crop["labels"]
                                for time in labels.keys():

                                    if not foundAllArcs:
                                        break

                                    for label in labels[time].keys():
                                        if not foundAllArcs:
                                            break

                                        trackSet = labels[time][label]
                                        center = self.features[time][
                                            'Default features'][
                                                'RegionCenter'][label]
                                        trackCount = len(trackSet)

                                        for track in trackSet:

                                            if not foundAllArcs:
                                                logger.info(
                                                    "[opStructuredTracking] Increasing max nearest neighbors!"
                                                )
                                                break

                                            # is this a FIRST, INTERMEDIATE, LAST, SINGLETON(FIRST_LAST) object of a track (or FALSE_DETECTION)
                                            type = self._type(
                                                cropKey, time, track
                                            )  # returns [type, previous_label] if type=="LAST" or "INTERMEDIATE" (else [type])

                                            if type[0] == "LAST" or type[
                                                    0] == "INTERMEDIATE":
                                                previous_label = int(type[1])
                                                previousTrackSet = labels[
                                                    time - 1][previous_label]
                                                intersectionSet = trackSet.intersection(
                                                    previousTrackSet)
                                                trackCountIntersection = len(
                                                    intersectionSet)

                                                foundAllArcs &= self.consTracker.addArcLabel(
                                                    time - 1,
                                                    int(previous_label),
                                                    int(label),
                                                    float(
                                                        trackCountIntersection)
                                                )
                                                if not foundAllArcs:
                                                    logger.info(
                                                        "[opStructuredTracking] Increasing max nearest neighbors!"
                                                    )
                                                    break

                                        if type[0] == "FIRST":
                                            self.consTracker.addFirstLabels(
                                                time, int(label),
                                                float(trackCount))
                                            if time > self.Crops.value[
                                                    cropKey]["time"][0]:
                                                self.consTracker.addDisappearanceLabel(
                                                    time, int(label), 0.0)

                                        elif type[0] == "LAST":
                                            self.consTracker.addLastLabels(
                                                time, int(label),
                                                float(trackCount))
                                            if time < self.Crops.value[
                                                    cropKey]["time"][1]:
                                                self.consTracker.addAppearanceLabel(
                                                    time, int(label), 0.0)

                                        elif type[0] == "INTERMEDIATE":
                                            self.consTracker.addIntermediateLabels(
                                                time, int(label),
                                                float(trackCount))

                            if "divisions" in crop.keys():
                                divisions = crop["divisions"]
                                for track in divisions.keys():
                                    if not foundAllArcs:
                                        logger.info(
                                            "[opStructuredTracking] Increasing max nearest neighbors!"
                                        )
                                        break
                                    division = divisions[track]
                                    time = int(division[1])
                                    parent = int(
                                        self.getLabelInCrop(
                                            cropKey, time, track))

                                    if parent >= 0:
                                        self.consTracker.addDivisionLabel(
                                            time, parent, 1.0)
                                        self.consTracker.addAppearanceLabel(
                                            time, parent, 1.0)
                                        self.consTracker.addDisappearanceLabel(
                                            time, parent, 1.0)

                                        child0 = int(
                                            self.getLabelInCrop(
                                                cropKey, time + 1,
                                                division[0][0]))
                                        self.consTracker.addDisappearanceLabel(
                                            time + 1, child0, 1.0)
                                        self.consTracker.addAppearanceLabel(
                                            time + 1, child0, 1.0)
                                        foundAllArcs &= self.consTracker.addArcLabel(
                                            time, parent, child0, 1.0)
                                        if not foundAllArcs:
                                            logger.info(
                                                "[opStructuredTracking] Increasing max nearest neighbors!"
                                            )
                                            break

                                        child1 = int(
                                            self.getLabelInCrop(
                                                cropKey, time + 1,
                                                division[0][1]))
                                        self.consTracker.addDisappearanceLabel(
                                            time + 1, child1, 1.0)
                                        self.consTracker.addAppearanceLabel(
                                            time + 1, child1, 1.0)
                                        foundAllArcs &= self.consTracker.addArcLabel(
                                            time, parent, child1, 1.0)
                                        if not foundAllArcs:
                                            logger.info(
                                                "[opStructuredTracking] Increasing max nearest neighbors!"
                                            )
                                            break

                logger.info("max nearest neighbors={}".format(
                    new_max_nearest_neighbors))

        drawer = self.parent.parent.trackingApplet._gui.currentGui()._drawer
        if new_max_nearest_neighbors > max_nearest_neighbors:
            max_nearest_neighbors = new_max_nearest_neighbors
            drawer.maxNearestNeighborsSpinBox.setValue(max_nearest_neighbors)
            self.parent.parent.trackingApplet._gui.currentGui(
            )._maxNearestNeighbors = max_nearest_neighbors

        # create dummy uncertainty parameter object with just one iteration, so no perturbations at all (iter=0 -> MAP)
        sigmas = pgmlink.VectorOfDouble()
        for i in range(5):
            sigmas.append(0.0)
        uncertaintyParams = pgmlink.UncertaintyParameter(
            1, pgmlink.DistrId.PerturbAndMAP, sigmas)

        self.detectionWeight = drawer.detWeightBox.value()
        self.divisionWeight = drawer.divWeightBox.value()
        self.transitionWeight = drawer.transWeightBox.value()
        self.appearanceWeight = drawer.appearanceBox.value()
        self.disappearanceWeight = drawer.disappearanceBox.value()

        logger.info("detectionWeight= {}".format(self.detectionWeight))
        logger.info("divisionWeight={}".format(self.divisionWeight))
        logger.info("transitionWeight={}".format(self.transitionWeight))
        logger.info("appearanceWeight={}".format(self.appearanceWeight))
        logger.info("disappearanceWeight={}".format(self.disappearanceWeight))

        consTrackerParameters = self.consTracker.get_conservation_tracking_parameters(
            0,  # forbidden_cost
            float(ep_gap),
            withTracklets,
            self.detectionWeight,
            self.divisionWeight,
            self.transitionWeight,
            self.disappearanceWeight,
            self.appearanceWeight,
            withMergerResolution,
            ndim,
            self.transition_parameter,
            borderAwareWidth,
            True,  #with_constraints
            uncertaintyParams,
            cplex_timeout,
            None,  # TransitionClassifier
            pgmlink.ConsTrackingSolverType.CplexSolver,
            trainingToHardConstraints,
            1)  # default: False

        # will be needed for python defined TRANSITION function
        # consTrackerParameters.register_transition_func(self.track_transition_func)

        fixLabeledNodes = False

        try:
            eventsVector = self.consTracker.track(consTrackerParameters,
                                                  fixLabeledNodes)

            eventsVector = eventsVector[
                0]  # we have a vector such that we could get a vector per perturbation

            if withMergerResolution:
                coordinate_map = pgmlink.TimestepIdCoordinateMap()
                self._get_merger_coordinates(coordinate_map, time_range,
                                             eventsVector)
                self.CoordinateMap.setValue(coordinate_map)

                eventsVector = self.consTracker.resolve_mergers(
                    eventsVector,
                    consTrackerParameters,
                    coordinate_map.get(),
                    float(ep_gap),
                    transWeight,
                    withTracklets,
                    ndim,
                    self.transition_parameter,
                    max_traxel_id_at,
                    True,  # with_constraints
                    None)  # TransitionClassifier

        except Exception as e:
            if trainingToHardConstraints:
                raise Exception, 'Tracking: Your training can not be extended to a feasible solution! ' + \
                                 'Turn training to hard constraints off or correct your tracking training. '
            else:
                raise Exception, 'Tracking terminated unsuccessfully: ' + str(
                    e)

        if len(eventsVector) == 0:
            raise Exception, 'Tracking terminated unsuccessfully: Events vector has zero length.'

        events = get_events(eventsVector)
        self.Parameters.setValue(parameters, check_changed=False)
        self.EventsVector.setValue(events, check_changed=False)
        self.RelabeledImage.setDirty()

        merger_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Merger")
        tracking_layer_idx = self.parent.parent.trackingApplet._gui.currentGui(
        ).layerstack.findMatchingIndex(lambda x: x.name == "Tracking")
        if 'withMergerResolution' in parameters.keys(
        ) and not parameters['withMergerResolution']:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().merger_colortable
        else:
            self.parent.parent.trackingApplet._gui.currentGui().layerstack[merger_layer_idx].colorTable = \
                self.parent.parent.trackingApplet._gui.currentGui().tracking_colortable