예제 #1
0
 def run(self):
     progress = 0.
     results = dict()
     self.set_progress_percentage(progress)
     for s in self.samples:
         truth = os.path.join('/groups/saalfeld/saalfeldlab/larissa/data/cremieval/', self.de,
                              s + '.' + self.m + '.h5')
         test = os.path.join(os.path.dirname(self.input()[0].fn), s+'.'+self.m+'.h5')
         truth = CremiFile(truth, 'a')
         test = CremiFile(test, 'a')
         synaptic_partners_eval = SynapticPartners()
         print(test.read_annotations())
         fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(
             test.read_annotations(), truth.read_annotations(), truth.read_neuron_ids(), all_stats=True)
         results[s] = dict()
         results[s]['fscore'] = fscore
         results[s]['precision'] = precision
         results[s]['recall'] = recall
         results[s]['fp'] = fp
         results[s]['fn'] = fn
         results[s]['filtered_matches'] = filtered_matches
         progress += 100. / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     with self.output().open('w') as done:
         json.dump(results, done)
예제 #2
0
 def run(self):
     progress = 0.0
     results = dict()
     self.set_progress_percentage(progress)
     for s in self.samples:
         truth = os.path.join(
             "/groups/saalfeld/saalfeldlab/larissa/data/cremieval/",
             self.de,
             s + "." + self.m + ".h5",
         )
         test = os.path.join(
             os.path.dirname(self.input()[0].fn), s + "." + self.m + ".h5"
         )
         truth = CremiFile(truth, "a")
         test = CremiFile(test, "a")
         synaptic_partners_eval = SynapticPartners()
         print(test.read_annotations())
         fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(
             test.read_annotations(),
             truth.read_annotations(),
             truth.read_neuron_ids(),
             all_stats=True,
         )
         results[s] = dict()
         results[s]["fscore"] = fscore
         results[s]["precision"] = precision
         results[s]["recall"] = recall
         results[s]["fp"] = fp
         results[s]["fn"] = fn
         results[s]["filtered_matches"] = filtered_matches
         progress += 100.0 / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     with self.output().open("w") as done:
         json.dump(results, done)
예제 #3
0
 def run(self):
     progress = 0.0
     self.set_progress_percentage(progress)
     for s in self.samples:
         print(s)
         filename = os.path.join(os.path.dirname(self.input()[0].fn),
                                 s + ".h5")
         mask_filename = os.path.join(
             config_loader.get_config()["synapses"]["cremieval_path"],
             self.de,
             s + ".n5",
         )
         mask_dataset = "volumes/masks/" + self.m
         filename_tgt = filename.replace("h5", self.m + ".h5")
         # shutil.copy(filename, filename_tgt)
         f = CremiFile(filename, "a")
         g = CremiFile(filename_tgt, "a")
         maskf = zarr.open(mask_filename, mode="r")
         mask = maskf[mask_dataset]
         off = mask.attrs["offset"]
         res = mask.attrs["resolution"]
         mask = np.array(mask[:])
         ann = f.read_annotations()
         shift = sub(ann.offset, off)
         ids = ann.ids()
         rmids = []
         for i in ids:
             t, loc = ann.get_annotation(i)
             vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
             if not mask[tuple(vx_idx)]:
                 rmids.append(i)
         print(rmids)
         for i in rmids:
             print("removing {0:}".format(i))
             ann.remove_annotation(i)
         print(ann.comments.keys())
         print(ann.pre_post_partners)
         g.write_annotations(ann)
         progress += 100.0 / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     for o in self.output():
         done = o.open("w")
         done.close()
예제 #4
0
 def run(self):
     progress = 0.
     self.set_progress_percentage(progress)
     for s in self.samples:
         print(s)
         filename = os.path.join(os.path.dirname(self.input()[0].fn),
                                 s + '.h5')
         mask_filename = os.path.join(
             '/groups/saalfeld/saalfeldlab/larissa/data/cremieval', self.de,
             s + '.n5')
         mask_dataset = 'volumes/masks/' + self.m
         filename_tgt = filename.replace('h5', self.m + '.h5')
         #shutil.copy(filename, filename_tgt)
         f = CremiFile(filename, 'a')
         g = CremiFile(filename_tgt, 'a')
         maskf = z5py.File(mask_filename, use_zarr_format=False)
         mask = maskf[mask_dataset]
         off = mask.attrs['offset']
         res = mask.attrs['resolution']
         mask = np.array(mask[:])
         ann = f.read_annotations()
         shift = sub(ann.offset, off)
         ids = ann.ids()
         rmids = []
         for i in ids:
             t, loc = ann.get_annotation(i)
             vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
             if not mask[tuple(vx_idx)]:
                 rmids.append(i)
         print(rmids)
         for i in rmids:
             print('removing {0:}'.format(i))
             ann.remove_annotation(i)
         print(ann.comments.keys())
         print(ann.pre_post_partners)
         g.write_annotations(ann)
         progress += 100. / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     for o in self.output():
         done = o.open('w')
         done.close()
예제 #5
0
def filter(h5filepath,
           csv_file_src,
           csv_file_tgt=None,
           cleft_ds_name="syncleft_dist_thr0.0_cc"):
    logging.info("Filtering clefts in {0:}/{1:} with {2:}".format(
        h5filepath, cleft_ds_name, csv_file_src))
    cf = CremiFile(h5filepath, "r+")
    ann = cf.read_annotations()
    cleft_to_pre, cleft_to_post = make_cleft_to_prepostsyn_neuron_id_dict(
        csv_file_src)
    cleft_list_verified = cleft_to_pre.keys()
    logging.info("List of verified clefts:\n{0:}".format(cleft_list_verified))
    cleft_ds = np.array(cf.read_volume(cleft_ds_name).data)

    cleft_list_all = list(np.unique(cleft_ds))
    for bg_id in BG_IDS:
        cleft_list_all.remove(bg_id)
    logging.info("List of all clefts:\n{0:}".format(cleft_list_all))
    cleft_list_unmatched = list(set(cleft_list_all) - set(cleft_list_verified))
    logging.info(
        "List of unmatched clefts:\n{0:}".format(cleft_list_unmatched))
    if csv_file_tgt is not None:
        with open(csv_file_tgt, "w") as f:
            writer = csv.writer(f)
            for i in cleft_list_unmatched:
                writer.writerow([i])
    next_id = max(ann.ids()) + 1
    logging.info("Adding annotations...")
    for cleft_id in cleft_list_unmatched:
        logging.info("... for cleft {0:}".format(cleft_id))
        cleft_coords = np.where(cleft_ds == cleft_id)
        cleft_center = (
            40.0 * cleft_coords[0][int(len(cleft_coords[0]) / 2.0)],
            4.0 * cleft_coords[1][int(len(cleft_coords[1]) / 2.0)],
            4.0 * cleft_coords[2][int(len(cleft_coords[2]) / 2.0)],
        )
        ann.add_annotation(next_id, "synapse", cleft_center)
        ann.add_comment(next_id, str(cleft_id))
        next_id += 1
    logging.info("Saving annotations...")
    cf.write_annotations(ann)
    cf.close()
    logging.info("...done \n\n")
def remove_annotations_in_mask(filename, mask_filename, mask_ds):
    fh = CremiFile(filename, 'a')
    if mask_filename.endswith('.h5') or mask_filename.endswith('.hdf'):
        maskfh = h5py.File(mask_filename, 'r')
    else:
        maskfh = z5py.File(mask_filename, use_zarr_format=False)
    mask = maskfh[mask_ds]
    off = mask.attrs['offset']
    res = mask.attrs['resolution']
    mask = mask[:]
    ann = fh.read_annotations()
    shift = sub(ann.offset, off)

    ids = ann.ids()
    rmids = []
    for i in ids:
        t, loc = ann.get_annotation(i)
        vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
        if not mask[tuple(vx_idx)]:
            rmids.append(i)
    for i in rmids:
        print('removing {0:}'.format(i))
        ann.remove_annotation(i)
    fh.write_annotations(ann)
예제 #7
0
def remove_annotations_in_mask(filename, mask_filename, mask_ds):
    fh = CremiFile(filename, "a")
    if mask_filename.endswith(".h5") or mask_filename.endswith(".hdf"):
        maskfh = h5py.File(mask_filename, "r")
    else:
        maskfh = zarr.open(mask_filename, mode="r")
    mask = maskfh[mask_ds]
    off = mask.attrs["offset"]
    res = mask.attrs["resolution"]
    mask = mask[:]
    ann = fh.read_annotations()
    shift = sub(ann.offset, off)

    ids = ann.ids()
    rmids = []
    for i in ids:
        t, loc = ann.get_annotation(i)
        vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
        if not mask[tuple(vx_idx)]:
            rmids.append(i)
    for i in rmids:
        print("removing {0:}".format(i))
        ann.remove_annotation(i)
    fh.write_annotations(ann)
예제 #8
0
print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)
print "\tfalse negatives: " + str(false_negative_count)

print "\tdistance to ground truth: " + str(false_positive_stats)
print "\tdistance to proposal    : " + str(false_negative_stats)

synaptic_partners_evaluation = SynapticPartners()
fscore = synaptic_partners_evaluation.fscore(test.read_annotations(), truth.read_annotations(), truth.read_neuron_ids())

print "Synaptic partners"
print "================="
print "\tfscore: " + str(fscore)
예제 #9
0
파일: Data.py 프로젝트: tmquan/cremi3d
def Reading(filename, isTest=False):
    # # Read the data into dataset
    # print "Filename: ", filename
    # # with h5py.File('sample_A_20160501.hdf', 'r') as f:
    # with h5py.File(filename, 'r') as f:
    # print f["volumes"]
    # imageDataSet = f["volumes/raw"][:]
    # labelDataSet = f["volumes/labels/neuron_ids"][:]

    # imageDataSet = imageDataSet.astype(np.float32)
    # labelDataSet = labelDataSet.astype(np.float32)
    # return imageDataSet, labelDataSet
    file = CremiFile(filename, "r")
    print filename
    # Check the content of the datafile
    print "Has raw			: " + str(file.has_raw())
    print "Has neuron ids	: " + str(file.has_neuron_ids())
    print "Has clefts		: " + str(file.has_clefts())
    print "Has annotations	: " + str(file.has_annotations())

    # Read everything there is.
    #
    # If you are using the padded versions of the datasets (where raw is larger to
    # provide more context), the offsets of neuron_ids, clefts, and annotations tell
    # you where they are placed in nm relative to (0,0,0) of the raw volume.
    #
    # In other words, neuron_ids, clefts, and annotations are exactly the same
    # between the padded and unpadded versions, except for the offset attribute.
    raw = file.read_raw()
    if not isTest:
        neuron_ids = file.read_neuron_ids()
        clefts = file.read_clefts()
        annotations = file.read_annotations()


    print "Read raw: 	"  + str(raw) + \
       ", resolution "  + str(raw.resolution) + \
       ", offset 	"  + str(raw.offset) + \
      ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")
    if not isTest:
        print "Read neuron_ids: "  + str(neuron_ids) + \
           ", resolution "     + str(neuron_ids.resolution) + \
           ", offset " + str(neuron_ids.offset) + \
          ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")
        # neuron_ids.offset will contain the starting point of neuron_ids inside the raw volume.
        # Note that these numbers are given in nm.

        # print "Read clefts: " + str(clefts) + \
        # ", resolution " + str(clefts.resolution) + \
        # ", offset " + str(clefts.offset) + \
        # ("" if clefts.comment == None else ", comment \"" + clefts.comment + "\"")

        # print "Read annotations:"
        # for (id, type, location) in zip(annotations.ids(), annotations.types(), annotations.locations()):
        # print str(id) + " of type " + type + " at " + str(np.array(location)+np.array(annotations.offset))
        # print "Pre- and post-synaptic partners:"
        # for (pre, post) in annotations.pre_post_partners:
        # print str(pre) + " -> " + str(post)
    with h5py.File(filename, 'r') as f:
        print f["volumes"]
        imageDataSet = f["volumes/raw"][:]
        if not isTest:
            labelDataSet = f["volumes/labels/neuron_ids"][:]
    imageDataSet = imageDataSet.astype(np.float32)
    if not isTest:
        labelDataSet = labelDataSet.astype(np.float32)
    if not isTest:
        return imageDataSet, labelDataSet
    return imageDataSet
예제 #10
0
print "Has neuron ids: " + str(file.has_neuron_ids())
print "Has clefts: " + str(file.has_clefts())
print "Has annotations: " + str(file.has_annotations())

# Read everything there is.
#
# If you are using the padded versions of the datasets (where raw is larger to 
# provide more context), the offsets of neuron_ids, clefts, and annotations tell 
# you where they are placed in nm relative to (0,0,0) of the raw volume.
#
# In other words, neuron_ids, clefts, and annotations are exactly the same 
# between the padded and unpadded versions, except for the offset attribute.
raw = file.read_raw()
neuron_ids = file.read_neuron_ids()
clefts = file.read_clefts()
annotations = file.read_annotations()

print "Read raw: " + str(raw) + \
    ", resolution " + str(raw.resolution) + \
    ", offset " + str(raw.offset) + \
    ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")

print "Read neuron_ids: " + str(neuron_ids) + \
    ", resolution " + str(neuron_ids.resolution) + \
    ", offset " + str(neuron_ids.offset) + \
    ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")

print "Read clefts: " + str(clefts) + \
    ", resolution " + str(clefts.resolution) + \
    ", offset " + str(clefts.offset) + \
    ("" if clefts.comment == None else ", comment \"" + clefts.comment + "\"")
예제 #11
0
print "Has neuron ids: " + str(file.has_neuron_ids())
print "Has clefts: " + str(file.has_clefts())
print "Has annotations: " + str(file.has_annotations())

# Read everything there is.
#
# If you are using the padded versions of the datasets (where raw is larger to
# provide more context), the offsets of neuron_ids, clefts, and annotations tell
# you where they are placed in nm relative to (0,0,0) of the raw volume.
#
# In other words, neuron_ids, clefts, and annotations are exactly the same
# between the padded and unpadded versions, except for the offset attribute.
raw = file.read_raw()
neuron_ids = file.read_neuron_ids()
clefts = file.read_clefts()
annotations = file.read_annotations()

print "Read raw: " + str(raw) + \
    ", resolution " + str(raw.resolution) + \
    ", offset " + str(raw.offset) + \
    ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")

print "Read neuron_ids: " + str(neuron_ids) + \
    ", resolution " + str(neuron_ids.resolution) + \
    ", offset " + str(neuron_ids.offset) + \
    ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")

print "Read clefts: " + str(clefts) + \
    ", resolution " + str(clefts.resolution) + \
    ", offset " + str(clefts.offset) + \
    ("" if clefts.comment == None else ", comment \"" + clefts.comment + "\"")
예제 #12
0
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)
print "\tfalse negatives: " + str(false_negative_count)

print "\tdistance to ground truth: " + str(false_positive_stats)
print "\tdistance to proposal    : " + str(false_negative_stats)

synaptic_partners_evaluation = SynapticPartners()
fscore = synaptic_partners_evaluation.fscore(test.read_annotations(),
                                             truth.read_annotations(),
                                             truth.read_neuron_ids())

print "Synaptic partners"
print "================="
print "\tfscore: " + str(fscore)