コード例 #1
0
 def run(self):
     progress = 0.
     results = dict()
     self.set_progress_percentage(progress)
     for s in self.samples:
         truth = os.path.join('/groups/saalfeld/saalfeldlab/larissa/data/cremieval/', self.de,
                              s + '.' + self.m + '.h5')
         test = os.path.join(os.path.dirname(self.input()[0].fn), s+'.'+self.m+'.h5')
         truth = CremiFile(truth, 'a')
         test = CremiFile(test, 'a')
         synaptic_partners_eval = SynapticPartners()
         print(test.read_annotations())
         fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(
             test.read_annotations(), truth.read_annotations(), truth.read_neuron_ids(), all_stats=True)
         results[s] = dict()
         results[s]['fscore'] = fscore
         results[s]['precision'] = precision
         results[s]['recall'] = recall
         results[s]['fp'] = fp
         results[s]['fn'] = fn
         results[s]['filtered_matches'] = filtered_matches
         progress += 100. / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     with self.output().open('w') as done:
         json.dump(results, done)
コード例 #2
0
ファイル: eval.py プロジェクト: 2000ZRL/FFN-PyTorch
def main(argv):
    del argv
    npz_file = FLAGS.base_dir + 'seg_000_' + FLAGS.model_name + '.npz'
    hdf_file = FLAGS.base_dir + 'seg_000_' + FLAGS.model_name + '.hdf'
    gt_file = 'FIB-25/test_sample/groundtruth.h5'
    pred = np.load(npz_file)
    img = pred['segmentation']
    print(np.unique(img))
    print(img.shape)

    with h5py.File(hdf_file, 'w') as f:
        neuron_ids = f.create_dataset('volumes/labels/neuron_ids', data=img)

    with h5py.File(hdf_file, 'r+') as f1:
        f1['volumes/labels/neuron_ids'].attrs.create('resolution',
                                                     [8.0, 8.0, 8.0])

    test = CremiFile(hdf_file, 'r')
    truth = CremiFile(gt_file, 'r')

    neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

    (voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
    adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

    print("Neuron IDs")
    print("==========")
    print("\tvoi split   : " + str(voi_split))
    print("\tvoi merge   : " + str(voi_merge))
    voi_total = voi_split + voi_merge
    print("\tvoi total   : " + str(voi_total))
    print("\tadapted RAND: " + str(adapted_rand))
    cremi_score = sqrt(voi_total * adapted_rand)
    print("\tcremi score : " + str(cremi_score))
    print("\tmodel name  : " + FLAGS.model_name)

    with open(FLAGS.results_file, 'a+') as f:
        f.write("\nvoi split   : " + str(voi_split)+"\nvoi merge   : " + str(voi_merge)+\
                "\nvoi total   : " + str(voi_total)+"\nadapted RAND: " + str(adapted_rand)+\
                "\ncremi score : " + str(cremi_score)+\
                "\ntime cost   : " + FLAGS.time+'\n\n')
コード例 #3
0
 def run(self):
     progress = 0.0
     results = dict()
     self.set_progress_percentage(progress)
     for s in self.samples:
         truth = os.path.join(
             "/groups/saalfeld/saalfeldlab/larissa/data/cremieval/",
             self.de,
             s + "." + self.m + ".h5",
         )
         test = os.path.join(
             os.path.dirname(self.input()[0].fn), s + "." + self.m + ".h5"
         )
         truth = CremiFile(truth, "a")
         test = CremiFile(test, "a")
         synaptic_partners_eval = SynapticPartners()
         print(test.read_annotations())
         fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(
             test.read_annotations(),
             truth.read_annotations(),
             truth.read_neuron_ids(),
             all_stats=True,
         )
         results[s] = dict()
         results[s]["fscore"] = fscore
         results[s]["precision"] = precision
         results[s]["recall"] = recall
         results[s]["fp"] = fp
         results[s]["fn"] = fn
         results[s]["filtered_matches"] = filtered_matches
         progress += 100.0 / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     with self.output().open("w") as done:
         json.dump(results, done)
コード例 #4
0
#!/usr/bin/python

from cremi.io import CremiFile
from cremi.evaluation import NeuronIds, Clefts, SynapticPartners

test = CremiFile('test.hdf', 'r')
truth = CremiFile('groundtruth.hdf', 'r')

neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

(voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)
コード例 #5
0
ファイル: Data.py プロジェクト: tmquan/cremi3d
def Reading(filename, isTest=False):
    # # Read the data into dataset
    # print "Filename: ", filename
    # # with h5py.File('sample_A_20160501.hdf', 'r') as f:
    # with h5py.File(filename, 'r') as f:
    # print f["volumes"]
    # imageDataSet = f["volumes/raw"][:]
    # labelDataSet = f["volumes/labels/neuron_ids"][:]

    # imageDataSet = imageDataSet.astype(np.float32)
    # labelDataSet = labelDataSet.astype(np.float32)
    # return imageDataSet, labelDataSet
    file = CremiFile(filename, "r")
    print filename
    # Check the content of the datafile
    print "Has raw			: " + str(file.has_raw())
    print "Has neuron ids	: " + str(file.has_neuron_ids())
    print "Has clefts		: " + str(file.has_clefts())
    print "Has annotations	: " + str(file.has_annotations())

    # Read everything there is.
    #
    # If you are using the padded versions of the datasets (where raw is larger to
    # provide more context), the offsets of neuron_ids, clefts, and annotations tell
    # you where they are placed in nm relative to (0,0,0) of the raw volume.
    #
    # In other words, neuron_ids, clefts, and annotations are exactly the same
    # between the padded and unpadded versions, except for the offset attribute.
    raw = file.read_raw()
    if not isTest:
        neuron_ids = file.read_neuron_ids()
        clefts = file.read_clefts()
        annotations = file.read_annotations()


    print "Read raw: 	"  + str(raw) + \
       ", resolution "  + str(raw.resolution) + \
       ", offset 	"  + str(raw.offset) + \
      ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")
    if not isTest:
        print "Read neuron_ids: "  + str(neuron_ids) + \
           ", resolution "     + str(neuron_ids.resolution) + \
           ", offset " + str(neuron_ids.offset) + \
          ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")
        # neuron_ids.offset will contain the starting point of neuron_ids inside the raw volume.
        # Note that these numbers are given in nm.

        # print "Read clefts: " + str(clefts) + \
        # ", resolution " + str(clefts.resolution) + \
        # ", offset " + str(clefts.offset) + \
        # ("" if clefts.comment == None else ", comment \"" + clefts.comment + "\"")

        # print "Read annotations:"
        # for (id, type, location) in zip(annotations.ids(), annotations.types(), annotations.locations()):
        # print str(id) + " of type " + type + " at " + str(np.array(location)+np.array(annotations.offset))
        # print "Pre- and post-synaptic partners:"
        # for (pre, post) in annotations.pre_post_partners:
        # print str(pre) + " -> " + str(post)
    with h5py.File(filename, 'r') as f:
        print f["volumes"]
        imageDataSet = f["volumes/raw"][:]
        if not isTest:
            labelDataSet = f["volumes/labels/neuron_ids"][:]
    imageDataSet = imageDataSet.astype(np.float32)
    if not isTest:
        labelDataSet = labelDataSet.astype(np.float32)
    if not isTest:
        return imageDataSet, labelDataSet
    return imageDataSet
コード例 #6
0
ファイル: example_read.py プロジェクト: cremi/cremi_python
# Check the content of the datafile
print "Has raw: " + str(file.has_raw())
print "Has neuron ids: " + str(file.has_neuron_ids())
print "Has clefts: " + str(file.has_clefts())
print "Has annotations: " + str(file.has_annotations())

# Read everything there is.
#
# If you are using the padded versions of the datasets (where raw is larger to 
# provide more context), the offsets of neuron_ids, clefts, and annotations tell 
# you where they are placed in nm relative to (0,0,0) of the raw volume.
#
# In other words, neuron_ids, clefts, and annotations are exactly the same 
# between the padded and unpadded versions, except for the offset attribute.
raw = file.read_raw()
neuron_ids = file.read_neuron_ids()
clefts = file.read_clefts()
annotations = file.read_annotations()

print "Read raw: " + str(raw) + \
    ", resolution " + str(raw.resolution) + \
    ", offset " + str(raw.offset) + \
    ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")

print "Read neuron_ids: " + str(neuron_ids) + \
    ", resolution " + str(neuron_ids.resolution) + \
    ", offset " + str(neuron_ids.offset) + \
    ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")

print "Read clefts: " + str(clefts) + \
    ", resolution " + str(clefts.resolution) + \
コード例 #7
0
# Check the content of the datafile
print "Has raw: " + str(file.has_raw())
print "Has neuron ids: " + str(file.has_neuron_ids())
print "Has clefts: " + str(file.has_clefts())
print "Has annotations: " + str(file.has_annotations())

# Read everything there is.
#
# If you are using the padded versions of the datasets (where raw is larger to
# provide more context), the offsets of neuron_ids, clefts, and annotations tell
# you where they are placed in nm relative to (0,0,0) of the raw volume.
#
# In other words, neuron_ids, clefts, and annotations are exactly the same
# between the padded and unpadded versions, except for the offset attribute.
raw = file.read_raw()
neuron_ids = file.read_neuron_ids()
clefts = file.read_clefts()
annotations = file.read_annotations()

print "Read raw: " + str(raw) + \
    ", resolution " + str(raw.resolution) + \
    ", offset " + str(raw.offset) + \
    ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")

print "Read neuron_ids: " + str(neuron_ids) + \
    ", resolution " + str(neuron_ids.resolution) + \
    ", offset " + str(neuron_ids.offset) + \
    ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")

print "Read clefts: " + str(clefts) + \
    ", resolution " + str(clefts.resolution) + \
コード例 #8
0
#!/usr/bin/python

from cremi.io import CremiFile
from cremi.evaluation import NeuronIds, Clefts, SynapticPartners

test = CremiFile('test.hdf', 'r')
truth = CremiFile('groundtruth.hdf', 'r')

neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

(voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)