Пример #1
0
    def _resolveMergers(self, hypothesesGraph, model):
        '''
        run merger resolution on the hypotheses graph which contains the current solution
        '''
        logger.info("Resolving mergers.")
                
        parameters = self.Parameters.value
        withTracklets = parameters['withTracklets']
        originalGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        resolvedMergersDict = {}
        
        # Enable full graph computation for animal tracking workflow
        withFullGraph = False
        if 'withAnimalTracking' in parameters and parameters['withAnimalTracking']: # TODO: Setting this parameter outside of the track() function (on AnimalConservationTrackingWorkflow) is not desirable 
            withFullGraph = True
            logger.info("Computing full graph on merger resolver (Only enabled on animal tracking workflow)")
        
        mergerResolver = IlastikMergerResolver(originalGraph, pluginPaths=self.pluginPaths, withFullGraph=withFullGraph)
        
        # Check if graph contains mergers, otherwise skip merger resolving
        if not mergerResolver.mergerNum:
            logger.info("Graph contains no mergers. Skipping merger resolving.")
        else:        
            # Fit and refine merger nodes using a GMM 
            # It has to be done per time-step in order to aviod loading the whole video on RAM
            traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = getMappingsBetweenUUIDsAndTraxels(model)
            timesteps = [int(t) for t in traxelIdPerTimestepToUniqueIdMap.keys()]
            timesteps.sort()
            
            timeIndex = self.LabelImage.meta.axistags.index('t')
            
            for timestep in timesteps:
                roi = [slice(None) for i in range(len(self.LabelImage.meta.shape))]
                roi[timeIndex] = slice(timestep, timestep+1)
                roi = tuple(roi)
                
                labelImage = self.LabelImage[roi].wait()
                
                # Get coordinates for object IDs in label image. Used by GMM merger fit.
                objectIds = vigra.analysis.unique(labelImage[0,...,0])
                maxObjectId = max(objectIds)
                
                coordinatesForIds = {}
                
                pool = RequestPool()
                for objectId in objectIds:
                    pool.add(Request(partial(mergerResolver.getCoordinatesForObjectId, coordinatesForIds, labelImage[0, ..., 0], timestep, objectId)))                 

                # Run requests to get object ID coordinates
                pool.wait()              
                
                # Fit mergers and store fit info in nodes  
                if coordinatesForIds:
                    mergerResolver.fitAndRefineNodesForTimestep(coordinatesForIds, maxObjectId, timestep)   
                
            # Compute object features, re-run flow solver, update model and result, and get merger dictionary
            resolvedMergersDict = mergerResolver.run()
        return resolvedMergersDict
Пример #2
0
    def _resolveMergers(self, hypothesesGraph, model):
        '''
        run merger resolution on the hypotheses graph which contains the current solution
        '''
        logger.info("Resolving mergers.")
                
        parameters = self.Parameters.value
        withTracklets = parameters['withTracklets']
        originalGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        resolvedMergersDict = {}
        
        # Enable full graph computation for animal tracking workflow
        withFullGraph = False
        if 'withAnimalTracking' in parameters and parameters['withAnimalTracking']: # TODO: Setting this parameter outside of the track() function (on AnimalConservationTrackingWorkflow) is not desirable 
            withFullGraph = True
            logger.info("Computing full graph on merger resolver (Only enabled on animal tracking workflow)")
        
        mergerResolver = IlastikMergerResolver(originalGraph, pluginPaths=self.pluginPaths, withFullGraph=withFullGraph)
        
        # Check if graph contains mergers, otherwise skip merger resolving
        if not mergerResolver.mergerNum:
            logger.info("Graph contains no mergers. Skipping merger resolving.")
        else:        
            # Fit and refine merger nodes using a GMM 
            # It has to be done per time-step in order to aviod loading the whole video on RAM
            traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = getMappingsBetweenUUIDsAndTraxels(model)
            timesteps = [int(t) for t in traxelIdPerTimestepToUniqueIdMap.keys()]
            timesteps.sort()
            
            timeIndex = self.LabelImage.meta.axistags.index('t')
            
            for timestep in timesteps:
                roi = [slice(None) for i in range(len(self.LabelImage.meta.shape))]
                roi[timeIndex] = slice(timestep, timestep+1)
                roi = tuple(roi)
                
                labelImage = self.LabelImage[roi].wait()
                
                # Get coordinates for object IDs in label image. Used by GMM merger fit.
                objectIds = vigra.analysis.unique(labelImage[0,...,0])
                maxObjectId = max(objectIds)
                
                coordinatesForIds = {}
                
                pool = RequestPool()
                for objectId in objectIds:
                    pool.add(Request(partial(mergerResolver.getCoordinatesForObjectId, coordinatesForIds, labelImage[0, ..., 0], timestep, objectId)))                 

                # Run requests to get object ID coordinates
                pool.wait()              
                
                # Fit mergers and store fit info in nodes  
                if coordinatesForIds:
                    mergerResolver.fitAndRefineNodesForTimestep(coordinatesForIds, maxObjectId, timestep)   
                
            # Compute object features, re-run flow solver, update model and result, and get merger dictionary
            resolvedMergersDict = mergerResolver.run()
        return resolvedMergersDict
Пример #3
0
    def _getEventsVector(self, result, model):        
        traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = getMappingsBetweenUUIDsAndTraxels(model)
        timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
        
        mergers, detections, links, divisions = getMergersDetectionsLinksDivisions(result, uuidToTraxelMap)
        
        # Group by timestep for event creation
        mergersPerTimestep = getMergersPerTimestep(mergers, timesteps)
        linksPerTimestep = getLinksPerTimestep(links, timesteps)
        detectionsPerTimestep = getDetectionsPerTimestep(detections, timesteps)
        divisionsPerTimestep = getDivisionsPerTimestep(divisions, linksPerTimestep, timesteps)

        # Populate events dictionary
        events = {}
        
        # Save mergers, links, detections, and divisions
        for timestep in traxelIdPerTimestepToUniqueIdMap.keys():
            # We need to add an extra column with zeros in order to be backward compatible with the older version
            def stackExtraColumnWithZeros(array):
                return np.hstack((array, np.zeros((array.shape[0], 1), dtype=array.dtype)))
            
            dis = []
            app = []
            div = []
            mov = []
            mer = []
            mul = []
    
            dis = np.asarray(dis)
            app = np.asarray(app)
            div = np.asarray([[k, v[0], v[1]] for k,v in divisionsPerTimestep[timestep].iteritems()])
            mov = np.asarray(linksPerTimestep[timestep])
            mer = np.asarray([[k,v] for k,v in mergersPerTimestep[timestep].iteritems()])
            mul = np.asarray(mul)
            
            events[timestep] = {}
         
            if len(dis) > 0:
                events[timestep]['dis'] = dis
            if len(app) > 0:
                events[timestep]['app'] = app
            if len(div) > 0:
                events[timestep]['div'] = div
            if len(mov) > 0:
                events[timestep]['mov'] = mov
            if len(mer) > 0:
                events[timestep]['mer'] = mer
            if len(mul) > 0:
                events[timestep]['mul'] = mul

            # Write merger results dictionary
            resolvedMergersDict = self.ResolvedMergers.value
            
            if resolvedMergersDict:
                mergerRes = {}
                
                for idx in mergersPerTimestep[timestep]:
                    mergerRes[idx] = resolvedMergersDict[int(timestep)][idx]['newIds']
                    
                events[timestep]['res'] = mergerRes
                
        else:
            logger.info("Resolved Merger Dictionary not available. Please click on the Track button.")
                
        return events
Пример #4
0
    def _getEventsVector(self, result, model):        
        traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = getMappingsBetweenUUIDsAndTraxels(model)
        timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
        
        mergers, detections, links, divisions = getMergersDetectionsLinksDivisions(result, uuidToTraxelMap)
        
        # Group by timestep for event creation
        mergersPerTimestep = getMergersPerTimestep(mergers, timesteps)
        linksPerTimestep = getLinksPerTimestep(links, timesteps)
        detectionsPerTimestep = getDetectionsPerTimestep(detections, timesteps)
        divisionsPerTimestep = getDivisionsPerTimestep(divisions, linksPerTimestep, timesteps)

        # Populate events dictionary
        events = {}
        
        # Save mergers, links, detections, and divisions
        for timestep in traxelIdPerTimestepToUniqueIdMap.keys():
            # We need to add an extra column with zeros in order to be backward compatible with the older version
            def stackExtraColumnWithZeros(array):
                return np.hstack((array, np.zeros((array.shape[0], 1), dtype=array.dtype)))
            
            dis = []
            app = []
            div = []
            mov = []
            mer = []
            mul = []
    
            dis = np.asarray(dis)
            app = np.asarray(app)
            div = np.asarray([[k, v[0], v[1]] for k,v in divisionsPerTimestep[timestep].iteritems()])
            mov = np.asarray(linksPerTimestep[timestep])
            mer = np.asarray([[k,v] for k,v in mergersPerTimestep[timestep].iteritems()])
            mul = np.asarray(mul)
            
            events[timestep] = {}
         
            if len(dis) > 0:
                events[timestep]['dis'] = dis
            if len(app) > 0:
                events[timestep]['app'] = app
            if len(div) > 0:
                events[timestep]['div'] = div
            if len(mov) > 0:
                events[timestep]['mov'] = mov
            if len(mer) > 0:
                events[timestep]['mer'] = mer
            if len(mul) > 0:
                events[timestep]['mul'] = mul

            # Write merger results dictionary
            resolvedMergersDict = self.ResolvedMergers.value
            
            if resolvedMergersDict:
                mergerRes = {}
                
                for idx in mergersPerTimestep[timestep]:
                    mergerRes[idx] = resolvedMergersDict[int(timestep)][idx]['newIds']
                    
                events[timestep]['res'] = mergerRes
                
        else:
            logger.info("Resolved Merger Dictionary not available. Please click on the Track button.")
                
        return events
Пример #5
0
def test_loading_no_divisions():
    model = return_example_model()
    result = return_example_result()

    # traxel <=> uuid mappings
    traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = jg.getMappingsBetweenUUIDsAndTraxels(
        model)
    assert (traxelIdPerTimestepToUniqueIdMap == {
        '0': {
            '1': 0,
            '2': 5
        },
        '1': {
            '1': 4
        },
        '2': {
            '1': 3
        },
        '3': {
            '1': 2,
            '2': 1
        }
    })
    assert (uuidToTraxelMap == {
        0: [(0, 1)],
        1: [(3, 2)],
        2: [(3, 1)],
        3: [(2, 1)],
        4: [(1, 1)],
        5: [(0, 2)]
    })

    # get lists
    mergers, detections, links, divisions = jg.getMergersDetectionsLinksDivisions(
        result, uuidToTraxelMap)
    assert (divisions is None)
    assert (mergers == [(2, 1, 2), (1, 1, 2)])
    assert (detections == [(0, 1), (3, 2), (3, 1), (2, 1), (1, 1), (0, 2)])
    assert (links == [((0, 1), (1, 1)), ((2, 1), (3, 2)), ((2, 1), (3, 1)),
                      ((1, 1), (2, 1)), ((0, 2), (1, 1))])

    # events per timestep
    timesteps = traxelIdPerTimestepToUniqueIdMap.keys()
    mergersPerTimestep = jg.getMergersPerTimestep(mergers, timesteps)
    assert (mergersPerTimestep == {'0': {}, '1': {1: 2}, '2': {1: 2}, '3': {}})

    detectionsPerTimestep = jg.getDetectionsPerTimestep(detections, timesteps)
    assert (detectionsPerTimestep == {
        '0': [1, 2],
        '1': [1],
        '2': [1],
        '3': [2, 1]
    })

    linksPerTimestep = jg.getLinksPerTimestep(links, timesteps)
    assert (linksPerTimestep == {
        '0': [],
        '1': [(1, 1), (2, 1)],
        '2': [(1, 1)],
        '3': [(1, 2), (1, 1)]
    })

    # merger links as triplets [("timestep", (sourceId, destId)), (), ...]
    mergerLinks = jg.getMergerLinks(linksPerTimestep, mergersPerTimestep,
                                    timesteps)
    assert (mergerLinks == [('1', (1, 1)), ('1', (2, 1)), ('3', (1, 2)),
                            ('3', (1, 1)), ('2', (1, 1))])