def createSignature(self, a, b): s = Signatures() s.names = ['ft [0]', 'ft [1]'] s.values = [a, b] s.source_file = '%s %s' % (a, b) s.version = self.feature_vector_version return s
def createSignature(self, a, b): s = Signatures() s.names = ["ft [0]", "ft [1]"] s.values = [a, b] s.source_file = "%s %s" % (a, b) s.version = self.feature_vector_version return s
def test_LargeFeatureSetGrayscale(self): """Large feature set, grayscale image""" reference_sample = Signatures.NewFromSigFile( self.sig_file_path, image_path=self.test_tif_path) target_sample = Signatures.LargeFeatureSet(self.test_tif_path) for target_val, res_val in zip(reference_sample.values, target_sample.values): self.assertAlmostEqual(target_val, res_val, delta=self.epsilon)
def test_reduceFeatures(self): ftnames = ["c", "b"] s = Signatures() s.names = ["a", "b", "c"] s.values = [1.0, 2.0, 3.0] s.source_file = "" s1 = s.FeatureReduce(ftnames) self.assertEqual(s1.names, ftnames) self.assertAlmostEqual(s1.values, [3.0, 2.0])
def test_reduceFeatures(self): ftnames = ['c', 'b'] s = Signatures() s.names = ['a', 'b', 'c'] s.values = [1.0, 2.0, 3.0] s.source_file = '' s1 = s.FeatureReduce(ftnames) self.assertEqual(s1.names, ftnames) self.assertAlmostEqual(s1.values, [3.0, 2.0])
def test_calculateLargeFeatureSet(self): ft1 = Signatures.LargeFeatureSet(self.image1) self.assertEqual(len(ft1.names), self.LargeFSexpectedLen) self.assertTrue(all([re.match('^.+ \[\d+\]$', n) for n in ft1.names])) self.assertEqual(len(set(ft1.names)), self.LargeFSexpectedLen) self.assertEqual(len(ft1.values), self.LargeFSexpectedLen) self.assertFalse(any(np.isinf(ft1.values))) self.assertFalse(any(np.isnan(ft1.values))) ft2 = Signatures.LargeFeatureSet(self.image2) self.assertEqual(ft1.names, ft2.names) self.assertEqual(len(ft2.values), self.LargeFSexpectedLen) self.assertFalse(any(np.isinf(ft2.values))) self.assertFalse(any(np.isnan(ft2.values)))
def addToFeatureSet(ftb, ds, fts, classId, imagesOnly): message = '' tid = WndcharmStorage.getAttachedTableFile(ftb.tc, ds) if tid: if not ftb.openTable(tid): return message + '\nERROR: Table not opened' version = unwrap(ftb.versiontag.getTextValue()) message += 'Opened table id:%d version:%s\n' % (tid, version) else: message += 'ERROR: Table not found for Dataset id:%d' % ds.getId() return message if imagesOnly: for image in ds.listChildren(): imId = image.getId() message += '\tProcessing features for image id:%d\n' % imId sig = Signatures() (sig.names, sig.values) = ftb.loadFeatures(imId) sig.source_file = str(imId) sig.version = version fts.AddSignature(sig, classId) else: names, values, ids = ftb.bulkLoadFeatures() message += '\tProcessing all features for dataset id:%d\n' % ds.getId() for imId, vals in izip(ids, values): sig = Signatures() sig.names = names sig.values = vals sig.source_file = str(imId) sig.version = version fts.AddSignature(sig, classId) fts.classnames_list[classId] = ds.getName() return message
class TestWND5Classification(unittest.TestCase): """WND5 Classification""" epsilon = 0.00001 # Define paths to original files test_sig_path = join(test_dir, 't1_s01_c05_ij-l_precalculated.sig') test_fit_path = join(test_dir, 'test-l.fit') test_feat_wght_path = join(test_dir, 'test_fit-l.weights') test_tif_path = join(test_dir, 't1_s01_c05_ij.tif') # Here are the correct values that Python API needs to return: # wndchrm classify -l -f1.0 test-l.fit t1_s01_c05_ij.tif # t1_s01_c05_ij.tif 1.6e-27 0.083 0.917 * 4cell 3.835 # wndchrm classify -l -f0.14765 test-l.fit t1_s01_c05_ij.tif # t1_s01_c05_ij.tif 3.23e-27 0.076 0.924 * 4cell 3.848 # wndchrm classify -l -f0.0685 test-l.fit t1_s01_c05_ij.tif # t1_s01_c05_ij.tif 7.05e-27 0.069 0.931 * 4cell 3.862 correct_marg_probs = {} correct_marg_probs[2919] = [0.083, 0.917] correct_marg_probs[431] = [0.076, 0.924] #correct_marg_probs[200] = [0.044, 0.956] # slight difference in marg probs due to my use of round() below correct_marg_probs[200] = [0.069, 0.931] # Load the original files once and only once for all this class's tests feature_set = FeatureSet_Discrete.NewFromFitFile(test_fit_path) feature_set.Normalize() test_sample = Signatures.NewFromSigFile(test_sig_path, test_tif_path) test_sample.Normalize(feature_set) all_weights = FisherFeatureWeights.NewFromFile(test_feat_wght_path) # -------------------------------------------------------------------------- def Check(self, num_feats=None): weights = self.all_weights.Threshold(num_feats) feat_set = self.feature_set.FeatureReduce(weights.names) sample = self.test_sample.FeatureReduce(weights.names) result = DiscreteImageClassificationResult.NewWND5( feat_set, weights, sample) result_marg_probs = [ round( val, 3 ) \ for val in result.marginal_probabilities ] self.assertSequenceEqual(self.correct_marg_probs[num_feats], result_marg_probs) # -------------------------------------------------------------------------- def test_WND5_all_features(self): """WND5 classification with entire large feature set (2919 features)""" self.Check(2919) # -------------------------------------------------------------------------- def test_WND5_15percent_threshold(self): """WND5 classification with large feature set 15% threshold (431 features)""" self.Check(431) # -------------------------------------------------------------------------- def test_WND5_200_feat_threshold(self): """WND5 classification with large feature set & 200 feature threshold""" self.Check(200)