def test2(self):
        superpixels = np.zeros((10, 10), dtype=np.uint32)
        superpixels[1:2] = 1
        superpixels = vigra.taggedView(superpixels, 'yx')

        rag = Rag( superpixels )

        feature_names = ['edgeregion_edge_regionradii', 'edgeregion_edge_regionaxes']
        acc = EdgeRegionEdgeAccumulator(rag, feature_names)

        features_df = rag.compute_features(None, feature_names, accumulator_set=[acc])
         
        # Just 1 edge in this rag (0 -> 1)
        assert len(features_df) == 1
        assert (features_df[['sp1', 'sp2']].values == [0,1]).all()

        # Manually compute the radius 
        x_coords = np.arange(0, 10)
        x_coord_mean = x_coords.mean()
        centralized_x_coords = x_coords - x_coord_mean
        x_coord_variance = centralized_x_coords.dot(centralized_x_coords)/len(x_coords)
        x_radius = np.sqrt(x_coord_variance).astype(np.float32)
         
        assert features_df['edgeregion_edge_regionradii_0'].values[0] == x_radius        
 
        # Eigenvectors are just parallel to the axes
        assert features_df['edgeregion_edge_regionaxes_0x'].values[0] == 1.0
        assert features_df['edgeregion_edge_regionaxes_0y'].values[0] == 0.0
        
        assert features_df['edgeregion_edge_regionaxes_1x'].values[0] == 0.0
        assert features_df['edgeregion_edge_regionaxes_1y'].values[0] == 1.0
    def test_regionradii(self):
        # Create a volume of flat superpixels, where every slice
        # is the same (except for the actual sp ids)
        num_sp_per_slice = 200
        slice_superpixels = generate_random_voronoi((100,200), num_sp_per_slice)
        
        superpixels = np.zeros( shape=((10,) + slice_superpixels.shape), dtype=np.uint32 )
        for z in range(10):
            superpixels[z] = slice_superpixels + z*num_sp_per_slice
        superpixels = vigra.taggedView(superpixels, 'zyx')

        rag_flat = Rag( superpixels, flat_superpixels=True )
        
        values = np.random.random(size=(superpixels.shape)).astype(np.float32)
        values = vigra.taggedView(values, 'zyx')
        
        flat_features_df = rag_flat.compute_features(values, ['standard_flatedge_regionradii'], edge_group='z')

        # Now compute the radii using a normal 'dense' rag
        rag_dense = Rag( superpixels )
        dense_features_df = rag_dense.compute_features(values, ['edgeregion_edge_regionradii'])

        # Both methods should be reasonably close.
        combined_features_df = pd.merge(flat_features_df, dense_features_df, how='left', on=['sp1', 'sp2'])
        assert np.isclose(combined_features_df['standard_flatedge_regionradii_0'], combined_features_df['edgeregion_edge_regionradii_0'], atol=0.001).all()
        assert np.isclose(combined_features_df['standard_flatedge_regionradii_1'], combined_features_df['edgeregion_edge_regionradii_1'], atol=0.001).all()
    def test_volume(self):
        superpixels = generate_random_voronoi((100,200), 200)
        feature_names = ['edgeregion_edge_regionradii', 'edgeregion_edge_volume']
        rag = Rag( superpixels )
        
        try:
            rag.compute_features(None, feature_names)
        except AssertionError:
            pass
        except:
            raise
        else:
            assert False, "EdgeRegion accumulator should refuse to compute 'volume' for 2D images."

        superpixels = generate_random_voronoi((25,50,100), 200)
        feature_names = ['edgeregion_edge_regionradii', 'edgeregion_edge_area', 'edgeregion_edge_volume']

        rag = Rag( superpixels )
        features_df = rag.compute_features(None, feature_names)
        
        radii = features_df[['edgeregion_edge_regionradii_0', 'edgeregion_edge_regionradii_1', 'edgeregion_edge_regionradii_2']].values
        assert (radii[:,0] >= radii[:,1]).all() and (radii[:,1] >= radii[:,2]).all()
        
        volumes = features_df[['edgeregion_edge_volume']].values        
        assert ((radii[:,0] * radii[:,1] * radii[:,2]) == volumes[:,0]).all()

        areas = features_df[['edgeregion_edge_area']].values        
        assert ((radii[:,0] * radii[:,1]) == areas[:,0]).all()
Пример #4
0
    def test_edge_decisions_from_groundtruth(self):
        # 1 2
        # 3 4
        vol1 = np.zeros((20,20), dtype=np.uint32)
        vol1[ 0:10,  0:10] = 1
        vol1[ 0:10, 10:20] = 2
        vol1[10:20,  0:10] = 3
        vol1[10:20, 10:20] = 4
        
        vol1 = vigra.taggedView(vol1, 'yx')
        rag = Rag(vol1)
    
        # 2 3
        # 4 5
        vol2 = vol1.copy() + 1

        decisions = rag.edge_decisions_from_groundtruth(vol2)
        assert decisions.all()

        # 7 7
        # 4 5
        vol2[( vol2 == 2 ).nonzero()] = 7
        vol2[( vol2 == 3 ).nonzero()] = 7
        
        decision_dict = rag.edge_decisions_from_groundtruth(vol2, asdict=True)
        assert decision_dict[(1,2)] == False
        assert decision_dict[(1,3)] == True
        assert decision_dict[(2,4)] == True
        assert decision_dict[(3,4)] == True
    def test_edge_features_no_histogram(self):
        """
        Make sure vigra edge filters still work even if no histogram features are selected.
        """
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )

        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)

        feature_names = ['standard_edge_mean', 'standard_edge_minimum', 'standard_edge_maximum']

        features_df = rag.compute_features(values, feature_names)
        assert len(features_df) == len(rag.edge_ids)
        assert list(features_df.columns.values) == ['sp1', 'sp2'] + list(feature_names), \
            "Wrong output feature names: {}".format( features_df.columns.values )

        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        for row_tuple in features_df.itertuples():
            row = OrderedDict( zip(['index', 'sp1', 'sp2'] + list(feature_names),
                                   row_tuple) )
            sp1 = row['sp1']
            sp2 = row['sp2']
            # Values were identical to the superpixels, so this is boring...
            assert row['standard_edge_mean'] == (sp1+sp2)/2.
            assert row['standard_edge_minimum'] == (sp1+sp2)/2.
            assert row['standard_edge_maximum'] == (sp1+sp2)/2.
    def test_quantiles(self):
        # Create a volume of flat superpixels, where every slice 
        # is the same (except for the actual sp ids)
        num_sp_per_slice = 200
        slice_superpixels = generate_random_voronoi((100,200), num_sp_per_slice)
        
        superpixels = np.zeros( shape=((10,) + slice_superpixels.shape), dtype=np.uint32 )
        for z in range(10):
            superpixels[z] = slice_superpixels + z*num_sp_per_slice
        superpixels = vigra.taggedView(superpixels, 'zyx')

        rag_flat = Rag( superpixels, flat_superpixels=True )
        
        values = np.random.random(size=(superpixels.shape)).astype(np.float32)
        values = vigra.taggedView(values, 'zyx')
        
        flat_features_df = rag_flat.compute_features(values, ['standard_flatedge_quantiles'], edge_group='z')
        flat_features_df2 = rag_flat.compute_features(values, ['standard_edge_quantiles'], edge_group='yx')
        
        # Rename columns
        flat_features_df2.columns = flat_features_df.columns.values
        flat_features_df = pd.concat( (flat_features_df, flat_features_df2), axis=0 )

        # Now compute the quantiles using a normal 'dense' rag
        rag_dense = Rag( superpixels )
        dense_features_df = rag_dense.compute_features(values, ['standard_edge_quantiles'])

        all_features_df = pd.merge(dense_features_df, flat_features_df, how='left', on=['sp1', 'sp2'])
        assert (all_features_df['standard_edge_quantiles_0'] == all_features_df['standard_flatedge_quantiles_0']).all()
        assert (all_features_df['standard_edge_quantiles_100'] == all_features_df['standard_flatedge_quantiles_100']).all()

        # Due to the way histogram ranges are computed, we can't expect quantiles_10 to match closely
        # ... but quantiles_50 seems to be good.
        assert np.isclose(all_features_df['standard_edge_quantiles_50'], all_features_df['standard_edge_quantiles_50']).all()
    def test_area(self):
        superpixels = generate_random_voronoi((100,200), 200)
        feature_names = ['edgeregion_edge_regionradii', 'edgeregion_edge_area']

        rag = Rag( superpixels )
        features_df = rag.compute_features(None, feature_names)
        
        radii = features_df[['edgeregion_edge_regionradii_0', 'edgeregion_edge_regionradii_1']].values
        assert (radii[:,0] >= radii[:,1]).all()

        areas = features_df[['edgeregion_edge_area']].values
        assert ((radii[:,0] * radii[:,1]) == areas[:,0]).all()
Пример #8
0
    def test_serialization_without_labels(self):
        """
        Users can opt to serialize the Rag without serializing the labels,
        but then they can't use superpixel features on the deserialized Rag.
        """
        import h5py
 
        superpixels = generate_random_voronoi((100,200), 200)
        original_rag = Rag( superpixels )
 
        tmp_dir = tempfile.mkdtemp()
        filepath = os.path.join(tmp_dir, 'test_rag.h5')
        rag_groupname = 'saved_rag'
 
        # Serialize with labels       
        with h5py.File(filepath, 'w') as f:
            rag_group = f.create_group(rag_groupname)
            original_rag.serialize_hdf5(rag_group, store_labels=False) # Don't store
 
        # Deserialize
        with h5py.File(filepath, 'r') as f:
            rag_group = f[rag_groupname]
            deserialized_rag = Rag.deserialize_hdf5(rag_group)
 
        assert deserialized_rag.label_img.dtype == original_rag.label_img.dtype
        assert deserialized_rag.label_img.shape == original_rag.label_img.shape
        assert deserialized_rag.label_img.axistags == original_rag.label_img.axistags
        #assert (deserialized_rag.label_img == original_rag.label_img).all() # not stored
 
        assert (deserialized_rag.sp_ids == original_rag.sp_ids).all()
        assert deserialized_rag.max_sp == original_rag.max_sp
        assert deserialized_rag.num_sp == original_rag.num_sp
        assert deserialized_rag.num_edges == original_rag.num_edges
        assert (deserialized_rag.edge_ids == original_rag.edge_ids).all()
 
        # Check some features
        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)
        feature_names = ['standard_edge_mean', 'standard_edge_count']
        features_df_original = original_rag.compute_features(values, feature_names)
        features_df_deserialized = deserialized_rag.compute_features(values, feature_names)
        assert (features_df_original.values == features_df_deserialized.values).all()
 
        try:
            deserialized_rag.compute_features(values, ['standard_sp_count'])
        except NotImplementedError:
            pass
        except:
            raise
        else:
            assert False, "Shouldn't be able to use superpixels if labels weren't serialized/deserialized!"
    def test1(self):
        superpixels = generate_random_voronoi((100,200), 200)
        superpixels.axistags = vigra.defaultAxistags('yx')

        feature_names = ['edgeregion_edge_regionradii']

        rag = Rag( superpixels )
        acc = EdgeRegionEdgeAccumulator(rag, feature_names)
        features_df = rag.compute_features(None, feature_names, accumulator_set=[acc])
        radii = features_df[features_df.columns.values[2:]].values
        assert (radii[:,0] >= radii[:,1]).all()
 
        # Transpose superpixels and check again
        # Should match (radii are sorted by magnitude).
        superpixels.axistags = vigra.defaultAxistags('xy')
        rag = Rag( superpixels )
        acc = EdgeRegionEdgeAccumulator(rag, feature_names)

        transposed_features_df = rag.compute_features(None, feature_names, accumulator_set=[acc])
        transposed_radii = transposed_features_df[transposed_features_df.columns.values[2:]].values

        assert (transposed_features_df[['sp1', 'sp1']].values == features_df[['sp1', 'sp1']].values).all()
        
        DEBUG = False
        if DEBUG:
            count_features = rag.compute_features(None, ['standard_edge_count', 'standard_sp_count'])
    
            import pandas as pd
            combined_features_df = pd.merge(features_df, transposed_features_df, how='left', on=['sp1', 'sp2'], suffixes=('_orig', '_transposed'))
            combined_features_df = pd.merge(combined_features_df, count_features, how='left', on=['sp1', 'sp2'])
            
            problem_rows = np.logical_or(np.isclose(radii[:, 0], transposed_radii[:, 0]) != 1,
                                         np.isclose(radii[:, 1], transposed_radii[:, 1]) != 1)
            problem_df = combined_features_df.loc[problem_rows][sorted(list(combined_features_df.columns))]
            print(problem_df.transpose())
            
            debug_sp = np.zeros_like(superpixels, dtype=np.uint8)
            for sp1 in problem_df['sp1'].values:
                debug_sp[superpixels == sp1] = 128
            for sp2 in problem_df['sp2'].values:
                debug_sp[superpixels == sp2] = 255
    
            vigra.impex.writeImage(debug_sp, '/tmp/debug_sp.png', dtype='NATIVE')
                
        # The first axes should all be close.
        # The second axes may differ somewhat in the case of purely linear edges,
        # so we allow a higher tolerance.
        assert np.isclose(radii[:,0], transposed_radii[:,0]).all()
        assert np.isclose(radii[:,1], transposed_radii[:,1], atol=0.001).all()
    def test_sp_features_with_histogram(self):
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )

        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)

        # Manually compute the sp counts
        sp_counts = np.bincount(superpixels.flat[:])

        # COUNT
        features_df = rag.compute_features(values, ['standard_sp_count', 'standard_sp_quantiles_25', 'standard_sp_quantiles_75'])
        assert len(features_df) == len(rag.edge_ids)
        assert (features_df.columns.values == ['sp1', 'sp2',
                                               'standard_sp_count_sum',
                                               'standard_sp_count_difference',
                                               'standard_sp_quantiles_25_sum',
                                               'standard_sp_quantiles_25_difference',
                                               'standard_sp_quantiles_75_sum',
                                               'standard_sp_quantiles_75_difference']).all()

        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        # Check dtypes (pandas makes it too easy to get this wrong).
        dtypes = { colname: series.dtype for colname, series in features_df.iterkv() }
        assert all(dtype != np.float64 for dtype in dtypes.values()), \
            "An accumulator returned float64 features. That's a waste of ram.\n"\
            "dtypes were: {}".format(dtypes)

        # sp count features are normalized, consistent with the multicut paper.
        for _index, sp1, sp2, \
            sp_count_sum, sp_count_difference, \
            sp_quantiles_25_sum, sp_quantiles_25_difference, \
            sp_quantiles_75_sum, sp_quantiles_75_difference in features_df.itertuples():
            
            assert type(sp_quantiles_25_sum) == np.float32
            assert type(sp_quantiles_75_sum) == np.float32
            assert type(sp_quantiles_25_difference) == np.float32
            assert type(sp_quantiles_75_difference) == np.float32
            
            assert sp_count_sum == np.power(sp_counts[sp1] + sp_counts[sp2], 1./superpixels.ndim).astype(np.float32)
            assert sp_count_difference == np.power(np.abs(sp_counts[sp1] - sp_counts[sp2]), 1./superpixels.ndim).astype(np.float32)
            assert sp_quantiles_25_sum == float(sp1 + sp2), \
                "{} != {}".format( sp_quantiles_25_sum, float(sp1 + sp2) )
            assert sp_quantiles_75_sum == float(sp1 + sp2)
            assert sp_quantiles_25_difference == abs(float(sp1) - sp2)
            assert sp_quantiles_75_difference == abs(float(sp1) - sp2)
    def test_sp_features_no_histogram(self):
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )

        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)

        # Manually compute the sp counts
        sp_counts = np.bincount(superpixels.flat[:])

        # COUNT
        features_df = rag.compute_features(values, ['standard_sp_count'])
        assert len(features_df) == len(rag.edge_ids)
        assert (features_df.columns.values == ['sp1', 'sp2', 'standard_sp_count_sum', 'standard_sp_count_difference']).all()
        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()
        dtypes = { colname: series.dtype for colname, series in features_df.iterkv() }
        assert all(dtype != np.float64 for dtype in dtypes.values()), \
            "An accumulator returned float64 features. That's a waste of ram.\n"\
            "dtypes were: {}".format(dtypes)

        # sp count features are normalized, consistent with the multicut paper.
        for _index, sp1, sp2, sp_count_sum, sp_count_difference in features_df.itertuples():
            assert sp_count_sum == np.power(sp_counts[sp1] + sp_counts[sp2], 1./superpixels.ndim).astype(np.float32)
            assert sp_count_difference == np.power(np.abs(sp_counts[sp1] - sp_counts[sp2]), 1./superpixels.ndim).astype(np.float32)

        # SUM
        features_df = rag.compute_features(values, ['standard_sp_sum'])
        assert len(features_df) == len(rag.edge_ids)
        assert (features_df.columns.values == ['sp1', 'sp2', 'standard_sp_sum_sum', 'standard_sp_sum_difference']).all()
        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        # sp sum features ought to be normalized, too...
        for _index, sp1, sp2, sp_sum_sum, sp_sum_difference in features_df.itertuples():
            assert sp_sum_sum == np.power(sp1*sp_counts[sp1] + sp2*sp_counts[sp2], 1./superpixels.ndim).astype(np.float32)
            assert sp_sum_difference == np.power(np.abs(sp1*sp_counts[sp1] - sp2*sp_counts[sp2]), 1./superpixels.ndim).astype(np.float32)

        # MEAN
        features_df = rag.compute_features(values, ['standard_sp_mean'])
        assert len(features_df) == len(rag.edge_ids)
        assert (features_df.columns.values == ['sp1', 'sp2', 'standard_sp_mean_sum', 'standard_sp_mean_difference']).all()
        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        # No normalization for other features...
        # Should there be?
        for _index, sp1, sp2, sp_mean_sum, sp_mean_difference in features_df.itertuples():
            assert sp_mean_sum == sp1 + sp2
            assert sp_mean_difference == np.abs(np.float32(sp1) - sp2)
Пример #12
0
    def test_serialization_with_external_labels(self):
        """
        Users can opt to serialize the Rag without serializing the labels,
        but then they can't use superpixel features on the deserialized Rag.
         
        When deserializing, they can provide the labels from an external source,
        as tested here.
        """
        import h5py
 
        superpixels = generate_random_voronoi((100,200), 200)
        original_rag = Rag( superpixels )
 
        tmp_dir = tempfile.mkdtemp()
        filepath = os.path.join(tmp_dir, 'test_rag.h5')
        rag_groupname = 'saved_rag'
 
        # Serialize with labels       
        with h5py.File(filepath, 'w') as f:
            rag_group = f.create_group(rag_groupname)
            original_rag.serialize_hdf5(rag_group, store_labels=False)
 
        # Deserialize
        with h5py.File(filepath, 'r') as f:
            rag_group = f[rag_groupname]
            deserialized_rag = Rag.deserialize_hdf5(rag_group, label_img=superpixels) # Provide labels explicitly
 
        assert deserialized_rag.label_img.dtype == original_rag.label_img.dtype
        assert deserialized_rag.label_img.shape == original_rag.label_img.shape
        assert deserialized_rag.label_img.axistags == original_rag.label_img.axistags
        assert (deserialized_rag.label_img == original_rag.label_img).all()
 
        assert (deserialized_rag.sp_ids == original_rag.sp_ids).all()
        assert deserialized_rag.max_sp == original_rag.max_sp
        assert deserialized_rag.num_sp == original_rag.num_sp
        assert deserialized_rag.num_edges == original_rag.num_edges
        assert (deserialized_rag.edge_ids == original_rag.edge_ids).all()
 
        # Check some features
        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)
        feature_names = ['standard_edge_mean', 'standard_sp_count']
        features_df_original = original_rag.compute_features(values, feature_names)
        features_df_deserialized = deserialized_rag.compute_features(values, feature_names)
        
        assert (features_df_original.values == features_df_deserialized.values).all()
Пример #13
0
 def _deserialize(self, rags_group, slot):
     for lane_index, (_rag_groupname, rag_group) in enumerate(sorted(rags_group.items())):
         label_img = self.labels_slot[lane_index][:].wait()
         label_img = vigra.taggedView( label_img, self.labels_slot.meta.axistags )
         label_img = label_img.dropChannelAxis()
         
         rag = Rag.deserialize_hdf5( rag_group, label_img )
         self.cache[lane_index].forceValue( rag )
    def test_shorthand_names(self):
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )

        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)

        features_df = rag.compute_features(values, ['standard_edge_quantiles', 'standard_sp_quantiles'])
        
        quantile_names = ['quantiles_0', 'quantiles_10', 'quantiles_25', 'quantiles_50', 'quantiles_75', 'quantiles_90', 'quantiles_100']
        edge_feature_names = map( lambda name: 'standard_edge_' + name, quantile_names )
        sp_features_names = map( lambda name: 'standard_sp_' + name, quantile_names )
        
        sp_output_columns = []
        for name in sp_features_names:
            sp_output_columns.append( name + '_sum' )
            sp_output_columns.append( name + '_difference' )

        assert list(features_df.columns.values) == ['sp1', 'sp2'] + edge_feature_names + sp_output_columns
Пример #15
0
    def _deserialize(self, rags_group, slot):
        for lane_index, (_rag_groupname,
                         rag_group) in enumerate(sorted(rags_group.items())):
            label_img = self.labels_slot[lane_index][:].wait()
            label_img = vigra.taggedView(label_img,
                                         self.labels_slot.meta.axistags)
            label_img = label_img.dropChannelAxis()

            rag = Rag.deserialize_hdf5(rag_group, label_img)
            self.cache[lane_index].forceValue(rag)
Пример #16
0
    def test_serialization_with_labels(self):
        """
        Serialize the rag and labels to hdf5,
        then deserialize it and make sure nothing was lost.
        """
        import h5py
 
        superpixels = generate_random_voronoi((100,200), 200)
        original_rag = Rag( superpixels )
 
        tmp_dir = tempfile.mkdtemp()
        filepath = os.path.join(tmp_dir, 'test_rag.h5')
        rag_groupname = 'saved_rag'
 
        # Serialize with labels       
        with h5py.File(filepath, 'w') as f:
            rag_group = f.create_group(rag_groupname)
            original_rag.serialize_hdf5(rag_group, store_labels=True)
 
        # Deserialize
        with h5py.File(filepath, 'r') as f:
            rag_group = f[rag_groupname]
            deserialized_rag = Rag.deserialize_hdf5(rag_group)
 
        assert deserialized_rag.label_img.dtype == original_rag.label_img.dtype
        assert deserialized_rag.label_img.shape == original_rag.label_img.shape
        assert deserialized_rag.label_img.axistags == original_rag.label_img.axistags
        assert (deserialized_rag.label_img == original_rag.label_img).all()        
 
        assert (deserialized_rag.sp_ids == original_rag.sp_ids).all()
        assert deserialized_rag.max_sp == original_rag.max_sp
        assert deserialized_rag.num_sp == original_rag.num_sp
        assert deserialized_rag.num_edges == original_rag.num_edges
        assert (deserialized_rag.edge_ids == original_rag.edge_ids).all()
 
        # Check some features
        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)
        feature_names = ['standard_edge_mean', 'standard_sp_count']
        features_df_original = original_rag.compute_features(values, feature_names)
        features_df_deserialized = deserialized_rag.compute_features(values, feature_names)
        assert (features_df_original.values == features_df_deserialized.values).all()
Пример #17
0
    def test_naive_segmentation_from_edge_decisions(self):
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )
        
        # The 'groundtruth' is just divided into quadrants
        groundtruth = np.zeros_like(superpixels)
        groundtruth[0:50,   0:100] = 1
        groundtruth[50:100, 0:100] = 2
        groundtruth[0:50,   100:200] = 3
        groundtruth[50:100, 100:200] = 4

        decisions = rag.edge_decisions_from_groundtruth(groundtruth)
        segmentation = rag.naive_segmentation_from_edge_decisions(decisions)
        
        # We don't know where the exact boundary is, but pixels 
        # near the corners should definitely be homogenous
        assert (segmentation[:20,   :20] == segmentation[0,  0]).all()
        assert (segmentation[:20,  -20:] == segmentation[0, -1]).all()
        assert (segmentation[-20:,  :20] == segmentation[-1, 0]).all()
        assert (segmentation[-20:, -20:] == segmentation[-1,-1]).all()
    def test_count(self):
        # Create a volume of flat superpixels, where every slice 
        # is the same (except for the actual sp ids)
        num_sp_per_slice = 200
        slice_superpixels = generate_random_voronoi((100,200), num_sp_per_slice)
         
        superpixels = np.zeros( shape=((10,) + slice_superpixels.shape), dtype=np.uint32 )
        for z in range(10):
            superpixels[z] = slice_superpixels + z*num_sp_per_slice
        superpixels = vigra.taggedView(superpixels, 'zyx')
 
        rag = Rag( superpixels, flat_superpixels=True )
 
        # Manually compute the sp counts for the first N-1 slices,
        # which happens to be the same as the edge counts (since superpixels are identical on all slices)
        sp_counts = np.bincount(superpixels[:-1].flat[:])
        sp_counts = sp_counts[1:] # Drop zero sp (it doesn't exist)
         
        features_df = rag.compute_features(None, ['standard_flatedge_count'], edge_group='z')
        assert sorted(features_df['standard_flatedge_count'].values) == sorted(sp_counts)
    def test_edge_features_with_histogram_blank_data(self):
        """
        There was a bug related to histogram min/max if all edge pixels had the exact same value.
        In that case, min and max were identical and vigra complained.
        For example, if you give vigra histogramRange=(0.0, 0.0), that's a problem.
        This test verifies that no crashes occur in such cases.
        """
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )

        values = np.zeros_like(superpixels, dtype=np.float32)

        feature_names = ['standard_edge_mean', 'standard_edge_minimum', 'standard_edge_maximum', 'standard_edge_variance',
                         'standard_edge_quantiles_25', 'standard_edge_quantiles_50', 'standard_edge_quantiles_75',
                         'standard_edge_count', 'standard_edge_sum']

        features_df = rag.compute_features(values, feature_names)
        assert len(features_df) == len(rag.edge_ids)
        assert list(features_df.columns.values) == ['sp1', 'sp2'] + list(feature_names), \
            "Wrong output feature names: {}".format( features_df.columns.values )

        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        # Check dtypes (pandas makes it too easy to get this wrong).
        dtypes = { colname: series.dtype for colname, series in features_df.iteritems() }
        assert all(dtype != np.float64 for dtype in list(dtypes.values())), \
            "An accumulator returned float64 features. That's a waste of ram.\n"\
            "dtypes were: {}".format(dtypes)

        for row_tuple in features_df.itertuples():
            row = OrderedDict( list(zip(['index', 'sp1', 'sp2'] + list(feature_names),
                                   row_tuple)) )

            assert row['standard_edge_mean'] == 0.0
            assert row['standard_edge_minimum'] == 0.0
            assert row['standard_edge_maximum'] == 0.0
            assert row['standard_edge_variance'] == 0.0
            assert row['standard_edge_quantiles_25'] == 0.0
            assert row['standard_edge_quantiles_75'] == 0.0
            assert row['standard_edge_count'] > 0
            assert row['standard_edge_sum'] == 0.0
    def test_correlation(self):
        # Create a volume of flat superpixels, where every slice 
        # is the same (except for the actual sp ids)
        num_sp_per_slice = 200
        slice_superpixels = generate_random_voronoi((100,200), num_sp_per_slice)
        
        superpixels = np.zeros( shape=((10,) + slice_superpixels.shape), dtype=np.uint32 )
        for z in range(10):
            superpixels[z] = slice_superpixels + z*num_sp_per_slice
        superpixels = vigra.taggedView(superpixels, 'zyx')

        rag = Rag( superpixels, flat_superpixels=True )

        # For simplicity, just make values identical to the first slice
        values = np.zeros_like(superpixels, dtype=np.float32)
        values[:] = slice_superpixels[None]

        features_df = rag.compute_features(values, ['similarity_flatedge_correlation'], edge_group='z')
        assert (features_df['similarity_flatedge_correlation'].values == 1.0).all()

        # Now add a little noise from one slice to the next.
        for z in range(10):
            if z == 0:
                continue
            if z == 1:
                values[z] += 0.001*np.random.random(size=(values[z].shape))
            else:
                values[z] += 1.0001*np.abs(values[z-1] - values[z-2])

        features_df = rag.compute_features(values, ['similarity_flatedge_correlation'], edge_group='z')        
        assert (features_df['similarity_flatedge_correlation'].values >= 0.7).all()
        assert (features_df['similarity_flatedge_correlation'].values <= 1.0).all()
        
        # Now use just noise
        values = np.random.random(size=values.shape).astype(np.float32)
        values = vigra.taggedView(values, 'zyx')
        features_df = rag.compute_features(values, ['similarity_flatedge_correlation'], edge_group='z')
        assert (features_df['similarity_flatedge_correlation'].values <= 1.0).all()
        assert (features_df['similarity_flatedge_correlation'].values >= -1.0).all()
    def test_edge_features_with_histogram(self):
        superpixels = generate_random_voronoi((100,200), 200)
        rag = Rag( superpixels )

        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)

        feature_names = ['standard_edge_mean', 'standard_edge_minimum', 'standard_edge_maximum', 'standard_edge_variance',
                         'standard_edge_quantiles_25', 'standard_edge_quantiles_50', 'standard_edge_quantiles_75',
                         'standard_edge_count', 'standard_edge_sum']

        features_df = rag.compute_features(values, feature_names)
        assert len(features_df) == len(rag.edge_ids)
        assert list(features_df.columns.values) == ['sp1', 'sp2'] + list(feature_names), \
            "Wrong output feature names: {}".format( features_df.columns.values )

        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        # Check dtypes (pandas makes it too easy to get this wrong).
        dtypes = { colname: series.dtype for colname, series in features_df.iterkv() }
        assert all(dtype != np.float64 for dtype in dtypes.values()), \
            "An accumulator returned float64 features. That's a waste of ram.\n"\
            "dtypes were: {}".format(dtypes)

        for row_tuple in features_df.itertuples():
            row = OrderedDict( zip(['index', 'sp1', 'sp2'] + list(feature_names),
                                   row_tuple) )
            sp1 = row['sp1']
            sp2 = row['sp2']
            # Values were identical to the superpixels, so this is boring...
            assert row['standard_edge_mean'] == (sp1+sp2)/2.
            assert row['standard_edge_minimum'] == (sp1+sp2)/2.
            assert row['standard_edge_maximum'] == (sp1+sp2)/2.
            assert row['standard_edge_variance'] == 0.0
            assert row['standard_edge_quantiles_25'] == (sp1+sp2)/2.
            assert row['standard_edge_quantiles_75'] == (sp1+sp2)/2.
            assert row['standard_edge_count'] > 0
            assert row['standard_edge_sum'] == row['standard_edge_count'] * (sp1+sp2)/2.
Пример #22
0
    def test_construction(self):
        superpixels = generate_random_voronoi((100,200), 200)
        
        rag = Rag( superpixels )
        assert rag.num_sp == 200, "num_sp was: {}".format(rag.num_sp)
        assert rag.max_sp == 200
        assert (rag.sp_ids == np.arange(1,201)).all()
        assert rag.sp_ids.dtype == np.uint32
        assert isinstance(rag.max_sp, np.uint32)
        assert rag.edge_ids.dtype == np.uint32
        assert (rag.unique_edge_tables['yx'][['sp1', 'sp2', 'edge_label']].dtypes == np.uint32).all()
        for _axiskey, df in rag.dense_edge_tables.items():
            assert df['sp1'].dtype == df['sp2'].dtype == np.uint32
            assert df['forwardness'].dtype == bool
            assert df['edge_label'].dtype == np.uint32
            
            # The coordinate dtype can be uint16 or uint32,
            # depending on the size of the size of the image 
            assert df['y'].dtype == df['x'].dtype == np.uint16

        # Just check some basic invariants of the edge_ids
        assert rag.edge_ids.shape == (rag.num_edges, 2)

        # For all edges (u,v): u < v
        edge_ids_copy = rag.edge_ids.copy()
        edge_ids_copy.sort(axis=1)
        assert (rag.edge_ids == edge_ids_copy).all()

        # edge_ids should be sorted by u, then v.
        edge_df = pd.DataFrame(edge_ids_copy, columns=['sp1', 'sp2'])
        edge_df.sort(columns=['sp1', 'sp2'], inplace=True)
        assert (rag.edge_ids == edge_df.values).all()

        # We're just using default features, so the supported features should match.
        default_features = [acc_cls.supported_features(rag) for acc_cls in Rag.DEFAULT_ACCUMULATOR_CLASSES.values()]
        default_features = itertools.chain(*default_features)
        assert set(rag.supported_features()) == set( default_features )
Пример #23
0
    def _deserialize(self, rags_group, slot):
        assert slot.level == 1
        # Pair stored indexes with their keys,
        # e.g. [("Rag_0000", 0), ("RAG_0002", 2), ("RAG_0003", 3)]
        keys_to_indexes = {k: int(k[4::]) for k in list(rags_group.keys())}
        # Ensure the slot is at least big enough to deserialize into.
        max_index = max([0] + list(keys_to_indexes.values()))

        if len(slot) < max_index + 1:
            slot.resize(max_index + 1)

        for rag_groupname, rag_group in rags_group.items():
            lane_index = keys_to_indexes[rag_groupname]
            label_img = self.labels_slot[lane_index][:].wait()
            label_img = vigra.taggedView(label_img,
                                         self.labels_slot.meta.axistags)
            label_img = label_img.dropChannelAxis()

            rag = Rag.deserialize_hdf5(rag_group, label_img)
            self.cache[lane_index].forceValue(rag)
Пример #24
0
def compute_dense_rag_table(volume):
    """
    Find all voxel-level adjacencies in the volume,
    except for adjacencies to ID 0.
    
    Returns:
        dataframe with columns:
        ['label_a', 'label_b', 'forwardness', 'z', 'y', 'x', 'axis']
        where ``label_a < label_b`` for all rows, and `forwardness` indicates whether
        label_a is on the 'left' (upper) or on the 'right' (lower) side
        of the voxel boundary.
    """
    assert volume.dtype == np.uint32
    rag = Rag(vigra.taggedView(volume, 'zyx'))

    # Edges are stored by axis -- concatenate them all.
    edges_z, edges_y, edges_x = rag.dense_edge_tables.values()

    del rag

    if len(edges_z) == len(edges_y) == len(edges_x) == 0:
        return None  # No edges detected

    edges_z['axis'] = 'z'
    edges_y['axis'] = 'y'
    edges_x['axis'] = 'x'

    all_edges = list(filter(len, [edges_z, edges_y, edges_x]))
    edges_df = pd.concat(all_edges, ignore_index=True)
    edges_df.rename(columns={'sp1': 'label_a', 'sp2': 'label_b'}, inplace=True)
    del edges_df['edge_label']

    # Filter: not interested in label 0
    edges_df.query("label_a != 0 and label_b != 0", inplace=True)

    return edges_df
def rag(superpixels):
    return Rag(vigra.taggedView(superpixels, axistags="yxc").withAxes("yx"))
    def test_sp_region_features(self):
        # Create a superpixel for each y-column
        superpixels = np.zeros( (100, 200), dtype=np.uint32 )
        superpixels[:] = np.arange(200)[None, :]
        superpixels = vigra.taggedView(superpixels, 'yx')
        
        rag = Rag( superpixels )

        # For simplicity, just make values identical to superpixels
        values = superpixels.astype(np.float32)

        feature_names = ['standard_sp_regionradii_0',
                         'standard_sp_regionradii_1',
                         'standard_sp_regionaxes_0x',
                         'standard_sp_regionaxes_0y',
                         'standard_sp_regionaxes_1x',
                         'standard_sp_regionaxes_1y',
                         ]

        output_columns = ['sp1', 'sp2',
                          'standard_sp_regionradii_0_sum',
                          'standard_sp_regionradii_0_difference',
                          'standard_sp_regionradii_1_sum',
                          'standard_sp_regionradii_1_difference',
                        
                          'standard_sp_regionaxes_0x_sum',
                          'standard_sp_regionaxes_0x_difference',
                          'standard_sp_regionaxes_0y_sum',
                          'standard_sp_regionaxes_0y_difference',

                          'standard_sp_regionaxes_1x_sum',
                          'standard_sp_regionaxes_1x_difference',
                          'standard_sp_regionaxes_1y_sum',
                          'standard_sp_regionaxes_1y_difference' ]
        
        features_df = rag.compute_features(values, feature_names)
        assert len(features_df) == len(rag.edge_ids)
        assert list(features_df.columns.values) == output_columns, \
            "Wrong output feature names: {}".format( features_df.columns.values )

        # Using shorthand names should result in the same columns
        features_df = rag.compute_features(values, ['standard_sp_regionradii', 'standard_sp_regionaxes'])
        assert len(features_df) == len(rag.edge_ids)
        assert list(features_df.columns.values) == output_columns, \
            "Wrong output feature names: {}".format( features_df.columns.values )

        assert (features_df[['sp1', 'sp2']].values == rag.edge_ids).all()

        # Check dtypes (pandas makes it too easy to get this wrong).
        dtypes = { colname: series.dtype for colname, series in features_df.iterkv() }
        assert all(dtype != np.float64 for dtype in dtypes.values()), \
            "An accumulator returned float64 features. That's a waste of ram.\n"\
            "dtypes were: {}".format(dtypes)

        # Manually compute the 'radius' of each superpixel
        # This is easy because each superpixel is just 1 column wide.
        col_coord_mean = np.arange(100).mean()
        centralized_col_coords = np.arange(100) - col_coord_mean
        col_coord_variance = centralized_col_coords.dot(centralized_col_coords)/100.
        col_radius = np.sqrt(col_coord_variance).astype(np.float32)

        for row_tuple in features_df.itertuples():
            row = OrderedDict( zip(['index'] + list(features_df.columns.values),
                                   row_tuple) )
            # The superpixels were just vertical columns
            assert row['standard_sp_regionradii_0_sum'] == 2*col_radius
            assert row['standard_sp_regionradii_0_difference'] == 0.0
            assert row['standard_sp_regionradii_1_sum'] == 0.0
            assert row['standard_sp_regionradii_1_difference'] == 0.0

            # Axes are just parallel to the coordinate axes, so this is boring.
            # The x_sum is 0.0 because all superpixels are only 1 pixel wide in that direction.
            assert row['standard_sp_regionaxes_0x_sum'] == 0.0
            assert row['standard_sp_regionaxes_0x_difference'] == 0.0
            assert row['standard_sp_regionaxes_0y_sum'] == 2.0
            assert row['standard_sp_regionaxes_0y_difference'] == 0.0

            # The second axis is the smaller one, so here the y_sum axes are non-zero
            assert row['standard_sp_regionaxes_1x_sum'] == 2.0
            assert row['standard_sp_regionaxes_1x_difference'] == 0.0
            assert row['standard_sp_regionaxes_1y_sum'] == 0.0
            assert row['standard_sp_regionaxes_1y_difference'] == 0.0