Пример #1
0
def make_seg_submission(seg_valume_dict):
    submission_folder = 'submission'
    if not os.path.exists(submission_folder):
        os.makedirs(submission_folder)
    for name, seg_v in seg_valume_dict.iteritems():
        seg_v = seg_v.astype(np.uint64)
        neuron_ids = Volume(seg_v,
                            resolution=(40.0, 4.0, 4.0),
                            comment="Second submission in 2018")
        file = CremiFile(submission_folder + '/' + name + '.hdf', "w")
        file.write_neuron_ids(neuron_ids)
Пример #2
0
 def run(self):
     progress = 0.
     results = dict()
     self.set_progress_percentage(progress)
     for s in self.samples:
         truth = os.path.join('/groups/saalfeld/saalfeldlab/larissa/data/cremieval/', self.de,
                              s + '.' + self.m + '.h5')
         test = os.path.join(os.path.dirname(self.input()[0].fn), s+'.'+self.m+'.h5')
         truth = CremiFile(truth, 'a')
         test = CremiFile(test, 'a')
         synaptic_partners_eval = SynapticPartners()
         print(test.read_annotations())
         fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(
             test.read_annotations(), truth.read_annotations(), truth.read_neuron_ids(), all_stats=True)
         results[s] = dict()
         results[s]['fscore'] = fscore
         results[s]['precision'] = precision
         results[s]['recall'] = recall
         results[s]['fp'] = fp
         results[s]['fn'] = fn
         results[s]['filtered_matches'] = filtered_matches
         progress += 100. / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     with self.output().open('w') as done:
         json.dump(results, done)
Пример #3
0
    def _write_cremi_data(self, cremi_data, path, mode="a", **kwargs):
        raw_vol = Volume(cremi_data.raw_data, resolution=cremi_data.res_list)
        clefts_vol = Volume(
            np.zeros(cremi_data.raw_data.shape, dtype=np.uint64),
            resolution=cremi_data.res_list,
        )

        annotations = Annotations()
        for annotation_tuple in cremi_data.annotation_tuples:
            annotations.add_annotation(*annotation_tuple)

        annotations.set_pre_post_partners(*cremi_data.annotation_partners)

        def key(id_, **kwargs):
            return (kwargs["type"], -id_)

        annotations.sort(key_fn=key, reverse=True)

        with closing(CremiFile(path, mode)) as f:
            f.write_raw(raw_vol)
            f.write_clefts(clefts_vol)
            f.write_annotations(annotations)

            f.h5file.attrs["project_offset"] = cremi_data.offset_nm.to_list()
            f.h5file.attrs["stack_offset"] = cremi_data.offset_px.to_list()
            for key, value in kwargs.items():
                f.h5file.attrs[key] = value
Пример #4
0
    def __init__(self, output_path, mode="r"):
        self.output_path = str(output_path)
        self._cremi_file = None
        self.resolution = None
        self.translation = None

        self.timestamp = datetime.now().astimezone().isoformat()
        CremiFile(self.output_path, mode).close()
        self.mode = mode if mode.startswith("r") else "a"
def main(s, mode=0, data=None):
    # samples = ['A','B', 'C']
    samples = [(s.split('/')[-1]).split('_')[0]]
    for sample in samples:
        logging.info("evaluating synapse predictions for sample {0:}".format(sample))
        truth_fn = '/groups/saalfeld/saalfeldlab/larissa/data/cremi-2017/sample_{' \
                   '0:}_padded_20170424.aligned.hdf'.format(sample)
        if data is not None:
            logging.info('sample {0:} in mode {1:} using {2:}'.format(sample, mode, data))
        if data == 'val' or data == 'validation' or data == 'VAL' or data == 'VALIDATION':
            assert s.endswith('.hdf')
            test = CremiFile(s.replace('.hdf', '.validation.hdf'), 'a')
            truth = CremiFile(truth_fn.replace('.hdf', '.validation.hdf'), 'a')
        elif data == 'train' or data == 'training' or data == 'TRAIN' or data == 'TRAINING':
            assert s.endswith('.hdf')
            test = CremiFile(s.replace('.hdf', '.training.hdf'), 'a')
            truth = CremiFile(truth_fn.replace('.hdf', '.training.hdf'), 'a')
        else:
            test = CremiFile(s, 'a')
            truth = CremiFile(truth_fn, 'a')

        if mode == 0:
            evaluate(test, truth)
        elif mode == 1:
            evaluate_multrecgt(test, truth, add_in_file=True)
        elif mode == 2:
            evaluate_multrecgt(test, truth)
Пример #6
0
def main(s, mode=0, data=None):
    # samples = ['A','B', 'C']
    samples = [(s.split("/")[-1]).split("_")[0]]
    for sample in samples:
        logging.info(
            "evaluating synapse predictions for sample {0:}".format(sample))
        truth_fn = (os.path.join(
            config_loader.get_config()["synapses"]["cremi17_data_path"],
            "sample_{0:}_padded_20170424.aligned.hdf".format(sample)))
        if data is not None:
            logging.info("sample {0:} in mode {1:} using {2:}".format(
                sample, mode, data))
        if (data == "val" or data == "validation" or data == "VAL"
                or data == "VALIDATION"):
            assert s.endswith(".hdf")
            test = CremiFile(s.replace(".hdf", ".validation.hdf"), "a")
            truth = CremiFile(truth_fn.replace(".hdf", ".validation.hdf"), "a")
        elif (data == "train" or data == "training" or data == "TRAIN"
              or data == "TRAINING"):
            assert s.endswith(".hdf")
            test = CremiFile(s.replace(".hdf", ".training.hdf"), "a")
            truth = CremiFile(truth_fn.replace(".hdf", ".training.hdf"), "a")
        else:
            test = CremiFile(s, "a")
            truth = CremiFile(truth_fn, "a")

        if mode == 0:
            evaluate(test, truth)
        elif mode == 1:
            evaluate_multrecgt(test, truth, add_in_file=True)
        elif mode == 2:
            evaluate_multrecgt(test, truth)
Пример #7
0
def remove_annotations_in_mask(filename, mask_filename, mask_ds):
    fh = CremiFile(filename, "a")
    if mask_filename.endswith(".h5") or mask_filename.endswith(".hdf"):
        maskfh = h5py.File(mask_filename, "r")
    else:
        maskfh = zarr.open(mask_filename, mode="r")
    mask = maskfh[mask_ds]
    off = mask.attrs["offset"]
    res = mask.attrs["resolution"]
    mask = mask[:]
    ann = fh.read_annotations()
    shift = sub(ann.offset, off)

    ids = ann.ids()
    rmids = []
    for i in ids:
        t, loc = ann.get_annotation(i)
        vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
        if not mask[tuple(vx_idx)]:
            rmids.append(i)
    for i in rmids:
        print("removing {0:}".format(i))
        ann.remove_annotation(i)
    fh.write_annotations(ann)
def remove_annotations_in_mask(filename, mask_filename, mask_ds):
    fh = CremiFile(filename, 'a')
    if mask_filename.endswith('.h5') or mask_filename.endswith('.hdf'):
        maskfh = h5py.File(mask_filename, 'r')
    else:
        maskfh = z5py.File(mask_filename, use_zarr_format=False)
    mask = maskfh[mask_ds]
    off = mask.attrs['offset']
    res = mask.attrs['resolution']
    mask = mask[:]
    ann = fh.read_annotations()
    shift = sub(ann.offset, off)

    ids = ann.ids()
    rmids = []
    for i in ids:
        t, loc = ann.get_annotation(i)
        vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
        if not mask[tuple(vx_idx)]:
            rmids.append(i)
    for i in rmids:
        print('removing {0:}'.format(i))
        ann.remove_annotation(i)
    fh.write_annotations(ann)
Пример #9
0
def filter(h5filepath,
           csv_file_src,
           csv_file_tgt=None,
           cleft_ds_name="syncleft_dist_thr0.0_cc"):
    logging.info("Filtering clefts in {0:}/{1:} with {2:}".format(
        h5filepath, cleft_ds_name, csv_file_src))
    cf = CremiFile(h5filepath, "r+")
    ann = cf.read_annotations()
    cleft_to_pre, cleft_to_post = make_cleft_to_prepostsyn_neuron_id_dict(
        csv_file_src)
    cleft_list_verified = cleft_to_pre.keys()
    logging.info("List of verified clefts:\n{0:}".format(cleft_list_verified))
    cleft_ds = np.array(cf.read_volume(cleft_ds_name).data)

    cleft_list_all = list(np.unique(cleft_ds))
    for bg_id in BG_IDS:
        cleft_list_all.remove(bg_id)
    logging.info("List of all clefts:\n{0:}".format(cleft_list_all))
    cleft_list_unmatched = list(set(cleft_list_all) - set(cleft_list_verified))
    logging.info(
        "List of unmatched clefts:\n{0:}".format(cleft_list_unmatched))
    if csv_file_tgt is not None:
        with open(csv_file_tgt, "w") as f:
            writer = csv.writer(f)
            for i in cleft_list_unmatched:
                writer.writerow([i])
    next_id = max(ann.ids()) + 1
    logging.info("Adding annotations...")
    for cleft_id in cleft_list_unmatched:
        logging.info("... for cleft {0:}".format(cleft_id))
        cleft_coords = np.where(cleft_ds == cleft_id)
        cleft_center = (
            40.0 * cleft_coords[0][int(len(cleft_coords[0]) / 2.0)],
            4.0 * cleft_coords[1][int(len(cleft_coords[1]) / 2.0)],
            4.0 * cleft_coords[2][int(len(cleft_coords[2]) / 2.0)],
        )
        ann.add_annotation(next_id, "synapse", cleft_center)
        ann.add_comment(next_id, str(cleft_id))
        next_id += 1
    logging.info("Saving annotations...")
    cf.write_annotations(ann)
    cf.close()
    logging.info("...done \n\n")
Пример #10
0
 def run(self):
     progress = 0.0
     self.set_progress_percentage(progress)
     for s in self.samples:
         print(s)
         filename = os.path.join(os.path.dirname(self.input()[0].fn),
                                 s + ".h5")
         mask_filename = os.path.join(
             config_loader.get_config()["synapses"]["cremieval_path"],
             self.de,
             s + ".n5",
         )
         mask_dataset = "volumes/masks/" + self.m
         filename_tgt = filename.replace("h5", self.m + ".h5")
         # shutil.copy(filename, filename_tgt)
         f = CremiFile(filename, "a")
         g = CremiFile(filename_tgt, "a")
         maskf = zarr.open(mask_filename, mode="r")
         mask = maskf[mask_dataset]
         off = mask.attrs["offset"]
         res = mask.attrs["resolution"]
         mask = np.array(mask[:])
         ann = f.read_annotations()
         shift = sub(ann.offset, off)
         ids = ann.ids()
         rmids = []
         for i in ids:
             t, loc = ann.get_annotation(i)
             vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
             if not mask[tuple(vx_idx)]:
                 rmids.append(i)
         print(rmids)
         for i in rmids:
             print("removing {0:}".format(i))
             ann.remove_annotation(i)
         print(ann.comments.keys())
         print(ann.pre_post_partners)
         g.write_annotations(ann)
         progress += 100.0 / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     for o in self.output():
         done = o.open("w")
         done.close()
Пример #11
0
def prepare_submission(sample,
                       path_segm,
                       inner_path_segm,
                       path_bbox_slice,
                       ds_factor=None):
    """

    :param path_segm:
    :param inner_path_segm:
    :param path_bbox_slice: path to the csv file
    :param ds_factor: for example (1, 2, 2)
    """

    segm = segm_utils.readHDF5(path_segm, inner_path_segm)

    bbox_data = np.genfromtxt(path_bbox_slice, delimiter=';', dtype='int')
    assert bbox_data.shape[0] == segm.ndim and bbox_data.shape[1] == 2
    # bbox_slice = tuple(slice(b_data[0], b_data[1]) for b_data in bbox_data)

    if ds_factor is not None:
        assert len(ds_factor) == segm.ndim
        segm = zoom(segm, ds_factor, order=0)

    padding = tuple(
        (slc[0], shp - slc[1])
        for slc, shp in zip(bbox_data, shape_padded_aligned_datasets[sample]))
    padded_segm = np.pad(segm, pad_width=padding, mode="constant")

    # Apply Constantin crop and then backalign:
    cropped_segm = padded_segm[magic_bboxes[sample]]
    tmp_file = path_segm.replace(".h5", "_submission_temp.hdf")
    backalign_segmentation(sample,
                           cropped_segm,
                           tmp_file,
                           key="temp_data",
                           postprocess=False)

    # Create a CREMI-style file ready to submit:
    final_submission_path = path_segm.replace(".h5", "_submission.hdf")
    file = CremiFile(final_submission_path, "w")

    # Write volumes representing the neuron and synaptic cleft segmentation.
    backaligned_segm = segm_utils.readHDF5(tmp_file, "temp_data")
    neuron_ids = Volume(backaligned_segm.astype('uint64'),
                        resolution=(40.0, 4.0, 4.0),
                        comment="Emb-submission")

    file.write_neuron_ids(neuron_ids)
    file.close()

    os.remove(tmp_file)
Пример #12
0
 def run(self):
     progress = 0.
     self.set_progress_percentage(progress)
     for s in self.samples:
         print(s)
         filename = os.path.join(os.path.dirname(self.input()[0].fn),
                                 s + '.h5')
         mask_filename = os.path.join(
             '/groups/saalfeld/saalfeldlab/larissa/data/cremieval', self.de,
             s + '.n5')
         mask_dataset = 'volumes/masks/' + self.m
         filename_tgt = filename.replace('h5', self.m + '.h5')
         #shutil.copy(filename, filename_tgt)
         f = CremiFile(filename, 'a')
         g = CremiFile(filename_tgt, 'a')
         maskf = z5py.File(mask_filename, use_zarr_format=False)
         mask = maskf[mask_dataset]
         off = mask.attrs['offset']
         res = mask.attrs['resolution']
         mask = np.array(mask[:])
         ann = f.read_annotations()
         shift = sub(ann.offset, off)
         ids = ann.ids()
         rmids = []
         for i in ids:
             t, loc = ann.get_annotation(i)
             vx_idx = (np.array(add(loc, shift)) / res).astype(np.int)
             if not mask[tuple(vx_idx)]:
                 rmids.append(i)
         print(rmids)
         for i in rmids:
             print('removing {0:}'.format(i))
             ann.remove_annotation(i)
         print(ann.comments.keys())
         print(ann.pre_post_partners)
         g.write_annotations(ann)
         progress += 100. / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     for o in self.output():
         done = o.open('w')
         done.close()
Пример #13
0
 def run(self):
     progress = 0.0
     results = dict()
     self.set_progress_percentage(progress)
     for s in self.samples:
         truth = os.path.join(
             "/groups/saalfeld/saalfeldlab/larissa/data/cremieval/",
             self.de,
             s + "." + self.m + ".h5",
         )
         test = os.path.join(
             os.path.dirname(self.input()[0].fn), s + "." + self.m + ".h5"
         )
         truth = CremiFile(truth, "a")
         test = CremiFile(test, "a")
         synaptic_partners_eval = SynapticPartners()
         print(test.read_annotations())
         fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(
             test.read_annotations(),
             truth.read_annotations(),
             truth.read_neuron_ids(),
             all_stats=True,
         )
         results[s] = dict()
         results[s]["fscore"] = fscore
         results[s]["precision"] = precision
         results[s]["recall"] = recall
         results[s]["fp"] = fp
         results[s]["fn"] = fn
         results[s]["filtered_matches"] = filtered_matches
         progress += 100.0 / len(self.samples)
         try:
             self.set_progress_percentage(progress)
         except:
             pass
     with self.output().open("w") as done:
         json.dump(results, done)
Пример #14
0
def main(argv):
    del argv
    npz_file = FLAGS.base_dir + 'seg_000_' + FLAGS.model_name + '.npz'
    hdf_file = FLAGS.base_dir + 'seg_000_' + FLAGS.model_name + '.hdf'
    gt_file = 'FIB-25/test_sample/groundtruth.h5'
    pred = np.load(npz_file)
    img = pred['segmentation']
    print(np.unique(img))
    print(img.shape)

    with h5py.File(hdf_file, 'w') as f:
        neuron_ids = f.create_dataset('volumes/labels/neuron_ids', data=img)

    with h5py.File(hdf_file, 'r+') as f1:
        f1['volumes/labels/neuron_ids'].attrs.create('resolution',
                                                     [8.0, 8.0, 8.0])

    test = CremiFile(hdf_file, 'r')
    truth = CremiFile(gt_file, 'r')

    neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

    (voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
    adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

    print("Neuron IDs")
    print("==========")
    print("\tvoi split   : " + str(voi_split))
    print("\tvoi merge   : " + str(voi_merge))
    voi_total = voi_split + voi_merge
    print("\tvoi total   : " + str(voi_total))
    print("\tadapted RAND: " + str(adapted_rand))
    cremi_score = sqrt(voi_total * adapted_rand)
    print("\tcremi score : " + str(cremi_score))
    print("\tmodel name  : " + FLAGS.model_name)

    with open(FLAGS.results_file, 'a+') as f:
        f.write("\nvoi split   : " + str(voi_split)+"\nvoi merge   : " + str(voi_merge)+\
                "\nvoi total   : " + str(voi_total)+"\nadapted RAND: " + str(adapted_rand)+\
                "\ncremi score : " + str(cremi_score)+\
                "\ntime cost   : " + FLAGS.time+'\n\n')
Пример #15
0
def find_padding(sample=default_sample):
    unpadded = CremiFile(cremi_path(sample=sample), "r")
    unpadded_raw = unpadded.read_raw()
    unpadded_shape_px = Coordinate(unpadded_raw.data.shape)

    padded = CremiFile(cremi_path(sample=sample, padded=True), "r")
    padded_raw = padded.read_raw()
    padded_shape_px = Coordinate(padded_raw.data.shape)

    fafb_res = Coordinate(unpadded_raw.resolution)

    data_shape_nm = unpadded_shape_px * fafb_res
    padding_px = math.ceil((padded_shape_px - unpadded_shape_px) / 2)

    padding_nm = padding_px * fafb_res

    print("shape (nm): {}".format(data_shape_nm))
    print("padding (nm): {}".format(padding_nm))

    print("l1 shape (px): {}".format(math.ceil(data_shape_nm / L1_RES)))
    print("l1 padding (px): {}".format(math.ceil(padding_nm / L1_RES)))
Пример #16
0
# Create some dummy annotation data
annotations = Annotations()
for id in [0, 1, 2, 3]:
    location = (random.randint(0, 100), random.randint(0, 100),
                random.randint(0, 100))
    annotations.add_annotation(id, "presynaptic_site", location)
for id in [4, 5, 6, 7]:
    location = (random.randint(0, 100), random.randint(0, 100),
                random.randint(0, 100))
    annotations.add_annotation(id, "postsynaptic_site", location)
for (pre, post) in [(0, 4), (1, 5), (2, 6), (3, 7)]:
    annotations.set_pre_post_partners(pre, post)
annotations.add_comment(6, "unsure")

# Open a file for writing (deletes previous file, if exists)
file = CremiFile("example.hdf", "w")

# Write the raw volume. This is given here just for illustration. For your
# submission, you don't need to store the raw data. We have it already.
raw = Volume(np.zeros((10, 100, 100), dtype=np.uint8),
             resolution=(40.0, 4.0, 4.0))
file.write_raw(raw)

# Write volumes representing the neuron and synaptic cleft segmentation.
neuron_ids = Volume(np.ones((10, 100, 100), dtype=np.uint64),
                    resolution=(40.0, 4.0, 4.0),
                    comment="just ones")
clefts = Volume(np.zeros((10, 100, 100), dtype=np.uint64),
                resolution=(40.0, 4.0, 4.0),
                comment="just zeros")
file.write_neuron_ids(neuron_ids)
Пример #17
0
    def write_multicremi(self, rows, path, mode="w-"):
        offset_shapes = []

        for _, row in rows.iterrows():
            offset_px, shape_px = self.row_to_offset_shape_px(row)
            offset_shapes.append(
                (np.array(offset_px.to_list()), np.array(shape_px.to_list())))

        super_offset_px, super_shape_px = get_superroi(*offset_shapes)
        super_offset_nm = super_offset_px * RESOLUTION.to_list(
        ) + TRANSLATION.to_list()

        raw_data = np.zeros(super_shape_px, dtype=np.uint8)
        cleft_data = np.zeros(super_shape_px, dtype=np.uint64)
        res_list = RESOLUTION.to_list()

        id_gen = IdGenerator()
        for col in [
                "conn_id", "pre_tnid", "post_tnid", "pre_skid", "post_skid"
        ]:
            id_gen.exclude.update(int(item) for item in rows[col])

        annotations = Annotations()

        zipped = list(zip(rows.iterrows(), offset_shapes))
        pre_to_conn = dict()

        for (_, row), (offset_px, shape_px) in tqdm(zipped,
                                                    desc="fetching data"):

            raw_slicing = tuple(
                slice(o - sup_o, o - sup_o + s)
                for sup_o, o, s in zip(super_offset_px, offset_px, shape_px))

            raw_data[raw_slicing] = self.get_raw(CoordZYX(offset_px),
                                                 CoordZYX(shape_px))

            conn_zyx = -super_offset_nm + [row["conn_" + dim] for dim in "zyx"]
            post_zyx = -super_offset_nm + [
                row["post_tn_" + dim] for dim in "zyx"
            ]
            post_id = int(row["post_tnid"])

            pre_zyx = make_presynaptic_loc(conn_zyx, post_zyx,
                                           EXTRUSION_FACTOR)
            pre_id = id_gen.next()
            pre_to_conn[pre_id] = int(row["conn_id"])

            annotations.add_annotation(pre_id, "presynaptic_site",
                                       list(pre_zyx))
            annotations.add_annotation(post_id, "postsynaptic_site",
                                       list(post_zyx))
            annotations.set_pre_post_partners(pre_id, post_id)

        pre_to_conn_arr = np.array(sorted(pre_to_conn.items()),
                                   dtype=np.uint64)

        logger.info("writing data")
        with closing(CremiFile(path, mode)) as f:
            f.write_raw(Volume(raw_data, resolution=res_list))
            f.write_clefts(Volume(cleft_data, resolution=res_list))
            f.write_annotations(annotations)

            f.h5file.attrs["project_offset"] = list(super_offset_nm)
            f.h5file.attrs["stack_offset"] = list(super_offset_px)
            f.h5file.attrs["annotation_version"] = ANNOTATION_VERSION
            f.h5file.attrs["next_id"] = id_gen.next()
            ds = f.h5file.create_dataset(Dataset.PRE_TO_CONN,
                                         data=pre_to_conn_arr)
            ds.attrs["explanation"] = PRE_TO_CONN_EXPL

        rows.to_hdf(path, "tables/connectors")
Пример #18
0
    def write_monolithic_cremi(self, df, path, mode="a"):
        # todo: unfinished
        df = df.copy()
        xmax, ymax = -1, -1
        z_total = 0
        offsets = dict()
        logger.info("calculating ROIs")
        for idx, row in tqdm(df.iterrows(), total=len(df)):
            if z_total:
                z_total += 3
            offset_px, shape_px = self.row_to_offset_shape_px(row)
            offset_nm = offset_px * RESOLUTION + TRANSLATION
            ymax = max(shape_px["y"], ymax)
            xmax = max(shape_px["x"], xmax)
            z_total += shape_px["z"]
            offsets[idx] = {
                "offset_px": offset_px,
                "shape_px": shape_px,
                "offset_nm": offset_nm,
            }

        raw = np.zeros((z_total, ymax, xmax), dtype=np.uint8)

        divider = np.ones((3, ymax, xmax), dtype=np.uint8) * 255
        divider[1, :, :] = 0

        annotations = Annotations()

        z_offsets = []
        stack_offsets_rows = []
        px_shapes_rows = []
        project_offsets_rows = []

        last_z = 0
        logger.info("fetching and writing data")
        for idx, row in tqdm(df.iterrows(), total=len(df)):
            this_offsets = offsets[idx]
            stack_offsets_rows.append(this_offsets["offset_px"].to_list())
            project_offsets_rows.append(this_offsets["offset_nm"].to_list())
            px_shapes_rows.append(this_offsets["shape_px"].to_list())
            z_offsets.append(last_z)

            for side in ["pre", "post"]:
                local_coords = (
                    CoordZYX({dim: row[side + "_tn_" + dim]
                              for dim in "zyx"}) -
                    this_offsets["offset_nm"]).to_list()
                local_coords[0] += last_z * RESOLUTION["z"]
                annotations.add_annotation(int(row[side + "_tnid"]),
                                           side + "synaptic_site",
                                           local_coords)

            annotations.set_pre_post_partners(int(row["pre_tnid"]),
                                              int(row["post_tnid"]))

            raw[last_z:last_z + this_offsets["shape_px"]["z"],
                0:this_offsets["shape_px"]["y"],
                0:this_offsets["shape_px"]["x"], ] = self.get_raw(
                    this_offsets["offset_px"], this_offsets["shape_px"])
            last_z += this_offsets["shape_px"]["z"]
            raw[last_z:last_z + 3, :, :] = divider
            last_z += 3

        clefts = np.zeros(raw.shape, dtype=np.uint64)
        res_list = RESOLUTION.to_list()

        with closing(CremiFile(path, mode)) as f:
            f.write_raw(Volume(raw, resolution=res_list))
            f.write_clefts(Volume(clefts, resolution=res_list))
            f.write_annotations(annotations)

            f.h5file.attrs["project_offset"] = offset_nm.to_list()
            f.h5file.attrs["stack_offset"] = offset_px.to_list()
            for key, value in row.items():
                f.h5file.attrs[key] = value

        df.to_hdf(path, "tables/connectors")
        for name, this_table in zip(
            ["stack_offset", "shape_px", "project_offset"],
            [stack_offsets_rows, px_shapes_rows, project_offsets_rows],
        ):
            this_df = pd.DataFrame(this_table,
                                   columns=["z", "y", "x"],
                                   index=df.index)
            this_df.to_hdf(path, "tables/" + name)

        z_df = pd.DataFrame(z_offsets, index=df.index, columns=["z"])
        z_df.to_hdf(path, "tables/z_offset")
Пример #19
0
#!/usr/bin/python

from cremi.io import CremiFile
from cremi.evaluation import NeuronIds, Clefts, SynapticPartners

test = CremiFile('test.hdf', 'r')
truth = CremiFile('groundtruth.hdf', 'r')

neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

(voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)
Пример #20
0
#!/usr/bin/python

from cremi.io import CremiFile
from cremi.evaluation import NeuronIds, Clefts, SynapticPartners

test = CremiFile('test.hdf', 'r')
truth = CremiFile('groundtruth.hdf', 'r')

neuron_ids_evaluation = NeuronIds(truth.read_neuron_ids())

(voi_split, voi_merge) = neuron_ids_evaluation.voi(test.read_neuron_ids())
adapted_rand = neuron_ids_evaluation.adapted_rand(test.read_neuron_ids())

print "Neuron IDs"
print "=========="
print "\tvoi split   : " + str(voi_split)
print "\tvoi merge   : " + str(voi_merge)
print "\tadapted RAND: " + str(adapted_rand)

clefts_evaluation = Clefts(test.read_clefts(), truth.read_clefts())

false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()

false_positive_stats = clefts_evaluation.acc_false_positives()
false_negative_stats = clefts_evaluation.acc_false_negatives()

print "Clefts"
print "======"

print "\tfalse positives: " + str(false_positive_count)
Пример #21
0

# Create some dummy annotation data
annotations = Annotations()
for id in [ 0, 1, 2, 3 ]:
    location = (random.randint(0, 100), random.randint(0, 100), random.randint(0, 100))
    annotations.add_annotation(id, "presynaptic_site", location)
for id in [ 4, 5, 6, 7 ]:
    location = (random.randint(0, 100), random.randint(0, 100), random.randint(0, 100))
    annotations.add_annotation(id, "postsynaptic_site", location)
for (pre, post) in [ (0, 4), (1, 5), (2, 6), (3, 7) ]:
    annotations.set_pre_post_partners(pre, post)
annotations.add_comment(6, "unsure")

# Open a file for writing (deletes previous file, if exists)
file = CremiFile("example.hdf", "w")

# Write the raw volume. This is given here just for illustration. For your 
# submission, you don't need to store the raw data. We have it already.
raw = Volume(np.zeros((10,100,100), dtype=np.uint8), resolution=(40.0, 4.0, 4.0))
file.write_raw(raw)

# Write volumes representing the neuron and synaptic cleft segmentation.
neuron_ids = Volume(np.ones((10,100,100), dtype=np.uint64), resolution=(40.0, 4.0, 4.0), comment="just ones")
clefts = Volume(np.zeros((10,100,100), dtype=np.uint64), resolution=(40.0, 4.0, 4.0), comment="just zeros")
file.write_neuron_ids(neuron_ids)
file.write_clefts(clefts)

# Write synaptic partner annotations.
file.write_annotations(annotations)
Пример #22
0
def Reading(filename, isTest=False):
    # # Read the data into dataset
    # print "Filename: ", filename
    # # with h5py.File('sample_A_20160501.hdf', 'r') as f:
    # with h5py.File(filename, 'r') as f:
    # print f["volumes"]
    # imageDataSet = f["volumes/raw"][:]
    # labelDataSet = f["volumes/labels/neuron_ids"][:]

    # imageDataSet = imageDataSet.astype(np.float32)
    # labelDataSet = labelDataSet.astype(np.float32)
    # return imageDataSet, labelDataSet
    file = CremiFile(filename, "r")
    print filename
    # Check the content of the datafile
    print "Has raw			: " + str(file.has_raw())
    print "Has neuron ids	: " + str(file.has_neuron_ids())
    print "Has clefts		: " + str(file.has_clefts())
    print "Has annotations	: " + str(file.has_annotations())

    # Read everything there is.
    #
    # If you are using the padded versions of the datasets (where raw is larger to
    # provide more context), the offsets of neuron_ids, clefts, and annotations tell
    # you where they are placed in nm relative to (0,0,0) of the raw volume.
    #
    # In other words, neuron_ids, clefts, and annotations are exactly the same
    # between the padded and unpadded versions, except for the offset attribute.
    raw = file.read_raw()
    if not isTest:
        neuron_ids = file.read_neuron_ids()
        clefts = file.read_clefts()
        annotations = file.read_annotations()


    print "Read raw: 	"  + str(raw) + \
       ", resolution "  + str(raw.resolution) + \
       ", offset 	"  + str(raw.offset) + \
      ("" if raw.comment == None else ", comment \"" + raw.comment + "\"")
    if not isTest:
        print "Read neuron_ids: "  + str(neuron_ids) + \
           ", resolution "     + str(neuron_ids.resolution) + \
           ", offset " + str(neuron_ids.offset) + \
          ("" if neuron_ids.comment == None else ", comment \"" + neuron_ids.comment + "\"")
        # neuron_ids.offset will contain the starting point of neuron_ids inside the raw volume.
        # Note that these numbers are given in nm.

        # print "Read clefts: " + str(clefts) + \
        # ", resolution " + str(clefts.resolution) + \
        # ", offset " + str(clefts.offset) + \
        # ("" if clefts.comment == None else ", comment \"" + clefts.comment + "\"")

        # print "Read annotations:"
        # for (id, type, location) in zip(annotations.ids(), annotations.types(), annotations.locations()):
        # print str(id) + " of type " + type + " at " + str(np.array(location)+np.array(annotations.offset))
        # print "Pre- and post-synaptic partners:"
        # for (pre, post) in annotations.pre_post_partners:
        # print str(pre) + " -> " + str(post)
    with h5py.File(filename, 'r') as f:
        print f["volumes"]
        imageDataSet = f["volumes/raw"][:]
        if not isTest:
            labelDataSet = f["volumes/labels/neuron_ids"][:]
    imageDataSet = imageDataSet.astype(np.float32)
    if not isTest:
        labelDataSet = labelDataSet.astype(np.float32)
    if not isTest:
        return imageDataSet, labelDataSet
    return imageDataSet
Пример #23
0
#!/usr/bin/env python

import numpy as np
import skimage.io as io
from skimage.viewer import CollectionViewer

import cremi.evaluation as evaluation
import cremi.io.CremiFile as CremiFile


if __name__ == "__main__":
    img = [io.imread('example.png')]
    
    for w in (0, 2, 4, 10):
        target = np.copy(img[0])[...,np.newaxis]
        evaluation.create_border_mask(img[0][...,np.newaxis],target,w,105,axis=2)
        img.append(target[...,0])

    v = CollectionViewer(img)
    v.show()

    cfIn  = CremiFile('example.h5', 'r')
    cfOut = CremiFile('output.h5', 'a')

    evaluation.create_and_write_masked_neuron_ids(cfIn, cfOut, 3, 240, overwrite=True)
Пример #24
0
def prepare_submission():
    from cremi.io import CremiFile
    from cremi.Volume import Volume

    base = "/home/fabian/drives/datasets/results/nnUNet/test_sets/Task061_CREMI/"
    # a+
    pred = sitk.GetArrayFromImage(
        sitk.ReadImage(join(base, 'results_3d_fullres',
                            "sample_a+.nii.gz"))).astype(np.uint64)
    pred[pred == 0] = 0xffffffffffffffff
    out_a = CremiFile(join(base, 'sample_A+_20160601.hdf'), 'w')
    clefts = Volume(pred, (40., 4., 4.))
    out_a.write_clefts(clefts)
    out_a.close()

    pred = sitk.GetArrayFromImage(
        sitk.ReadImage(join(base, 'results_3d_fullres',
                            "sample_b+.nii.gz"))).astype(np.uint64)
    pred[pred == 0] = 0xffffffffffffffff
    out_b = CremiFile(join(base, 'sample_B+_20160601.hdf'), 'w')
    clefts = Volume(pred, (40., 4., 4.))
    out_b.write_clefts(clefts)
    out_b.close()

    pred = sitk.GetArrayFromImage(
        sitk.ReadImage(join(base, 'results_3d_fullres',
                            "sample_c+.nii.gz"))).astype(np.uint64)
    pred[pred == 0] = 0xffffffffffffffff
    out_c = CremiFile(join(base, 'sample_C+_20160601.hdf'), 'w')
    clefts = Volume(pred, (40., 4., 4.))
    out_c.write_clefts(clefts)
    out_c.close()
Пример #25
0
 def _open(self, mode=None):
     mode = mode or self.mode
     self._cremi_file = CremiFile(self.output_path, mode)
     return self
Пример #26
0
class AbstractCremiFactory(metaclass=ABCMeta):
    data_source = None

    def __init__(self, output_path, mode="r"):
        self.output_path = str(output_path)
        self._cremi_file = None
        self.resolution = None
        self.translation = None

        self.timestamp = datetime.now().astimezone().isoformat()
        CremiFile(self.output_path, mode).close()
        self.mode = mode if mode.startswith("r") else "a"

    @abstractmethod
    def get_raw(self, offset_px, shape_px):
        pass

    @abstractmethod
    def set_input_stack(self,
                        n5_ds,
                        resolution_nm_zyx=None,
                        translation_nm_zyx=None):
        pass

    def populate(
        self,
        offset_from_stack_px,
        shape_px,
        padding_low_px=0,
        padding_high_px=None,
        skip_if_exists=False,
    ):
        """

        Parameters
        ----------
        offset_from_stack_px : CoordZYX
            Offset of unpadded ROI from stack origin, in pixels
        shape_px : CoordZYX
            Shape of unpadded ROI
        padding_low_px : CoordZYX or Number
            Padding to add to lower corner of ROI
        padding_high_px : CoordZYX or Number
            Padding to add to higher corner of ROI

        Returns
        -------

        """
        logger.info("populating cremi file")
        padding_low_px, padding_high_px = resolve_padding(
            padding_low_px, padding_high_px, math.ceil)

        padded_offset_from_stack_px = math.floor(offset_from_stack_px -
                                                 padding_low_px)
        padded_shape_px = math.ceil(shape_px + padding_low_px +
                                    padding_high_px)

        self._populate_raw(padded_offset_from_stack_px, padded_shape_px,
                           skip_if_exists)
        self._populate_clefts(shape_px, padding_low_px, skip_if_exists)
        self._populate_annotations(
            padded_offset_from_stack_px,
            padded_shape_px,
            padding_low_px,
            padding_high_px,
            skip_if_exists,
        )

    def has_raw(self):
        with self._open("r"):
            return self._cremi_file.has_raw()

    def has_clefts(self):
        with self._open("r"):
            return self._cremi_file.has_clefts()

    def has_annotations(self):
        with self._open("r"):
            return self._cremi_file.has_annotations()

    def _populate_raw(self, padded_offset_from_stack_px, padded_shape_px,
                      skip_if_exists):
        """

        Parameters
        ----------
        padded_offset_from_stack_px : CoordZYX
        padded_shape_px : CoordZYX
        skip_if_exists

        Returns
        -------

        """

        if self.has_raw():
            if skip_if_exists:
                logger.info("Raw data already exists, skipping")
                return
            else:
                raise RuntimeError("Raw data already exists")

        logger.debug("reading raw volume")
        raw_data = self.get_raw(padded_offset_from_stack_px, padded_shape_px)

        raw_volume = Volume(raw_data, resolution=self.resolution)

        logger.debug("writing raw volume")
        with self:
            self._cremi_file.write_raw(raw_volume)
            self._cremi_file.h5file["volumes/raw"].attrs[
                "data_source"] = self.data_source
            self._cremi_file.h5file["volumes/raw"].attrs[
                "populated_on"] = self.timestamp
            self._cremi_file.h5file.attrs["roi_offset_from_stack"] = (
                padded_offset_from_stack_px *
                CoordZYX(self.resolution)).to_list()

    def _populate_clefts(self, unpadded_shape_px, padding_low_px,
                         skip_if_exists):
        """

        Parameters
        ----------
        unpadded_shape_px : CoordZYX
        padding_low_px : CoordZYX
        skip_if_exists

        Returns
        -------

        """
        if self.has_clefts():
            if skip_if_exists:
                logger.info("Cleft data already exists, skipping")
                return
            else:
                raise RuntimeError("Cleft data already exists")

        logger.debug("generating cleft volume")
        cleft_volume = Volume(
            np.zeros(unpadded_shape_px.to_list(), dtype=np.uint64),
            resolution=self.resolution,
            offset=(padding_low_px * CoordZYX(self.resolution)).to_list(),
        )

        logger.debug("writing clefts")
        with self:
            self._cremi_file.write_clefts(cleft_volume)
            self._cremi_file.h5file["volumes/labels/clefts"].attrs[
                "refreshed_on"] = self.timestamp

    def _populate_annotations(
        self,
        padded_offset_from_stack_px,
        padded_shape_px,
        padding_low_px,
        padding_high_px,
        skip_if_exists,
    ):
        """

        Parameters
        ----------
        padded_offset_from_stack_px : CoordZYX
        padded_shape_px : CoordZYX
        padding_low_px : CoordZYX
        skip_if_exists

        Returns
        -------

        """
        if self.has_annotations():
            if skip_if_exists:
                logger.info("Annotation data already exists, skipping")
                return
            else:
                raise RuntimeError("Annotation data already exists")

        logger.debug("fetching and mangling annotations")
        resolution = CoordZYX(self.resolution)

        id_gen = IdGenerator.from_hdf(self.output_path)

        # annotations = catmaid_to_annotations_conn(
        # annotations = catmaid_to_annotations_conn_to_tn(
        # annotations = catmaid_to_annotations_tn_to_tn(
        annotations, pre_to_conn = catmaid_to_annotations_near_conn_to_tn(
            CoordZYX(self.translation) +
            padded_offset_from_stack_px * resolution,
            padded_shape_px * resolution,
            padding_low_px * resolution,
            padding_high_px * resolution,
            comment=True,
            id_generator=id_gen,
        )
        pre_to_conn_arr = np.array(sorted(pre_to_conn.items()),
                                   dtype=np.uint64)
        with self:
            logger.debug("writing annotations")
            self._cremi_file.write_annotations(annotations)
            f = self._cremi_file.h5file
            f["/annotations"].attrs["populated_on"] = self.timestamp
            ds = f.create_dataset("/annotations/presynaptic_site/pre_to_conn",
                                  data=pre_to_conn_arr)
            ds.attrs["explanation"] = (
                "BIGCAT only displays one edge per presynapse, so this format creates new presynapses near the "
                "connector node. This dataset maps these nodes to the connector IDs"
            )
            f.attrs["annotation_version"] = ANNOTATION_VERSION

    def _open(self, mode=None):
        mode = mode or self.mode
        self._cremi_file = CremiFile(self.output_path, mode)
        return self

    def close(self):
        try:
            self._cremi_file.close()
        except AttributeError:
            pass
        self._cremi_file = None

    def __enter__(self):
        if self._cremi_file is None:
            return self._open()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
Пример #27
0
#!/usr/bin/python

from cremi import Annotations, Volume
from cremi.io import CremiFile
import numpy as np
import random

# Open a file for reading
file = CremiFile("example.hdf", "r")

# Check the content of the datafile
print "Has raw: " + str(file.has_raw())
print "Has neuron ids: " + str(file.has_neuron_ids())
print "Has clefts: " + str(file.has_clefts())
print "Has annotations: " + str(file.has_annotations())

# Read everything there is.
#
# If you are using the padded versions of the datasets (where raw is larger to
# provide more context), the offsets of neuron_ids, clefts, and annotations tell
# you where they are placed in nm relative to (0,0,0) of the raw volume.
#
# In other words, neuron_ids, clefts, and annotations are exactly the same
# between the padded and unpadded versions, except for the offset attribute.
raw = file.read_raw()
neuron_ids = file.read_neuron_ids()
clefts = file.read_clefts()
annotations = file.read_annotations()

print "Read raw: " + str(raw) + \
    ", resolution " + str(raw.resolution) + \
Пример #28
0
#!/usr/bin/python

from cremi import Annotations, Volume
from cremi.io import CremiFile
import numpy as np
import random

# Open a file for reading
file = CremiFile("example.hdf", "r")

# Check the content of the datafile
print "Has raw: " + str(file.has_raw())
print "Has neuron ids: " + str(file.has_neuron_ids())
print "Has clefts: " + str(file.has_clefts())
print "Has annotations: " + str(file.has_annotations())

# Read everything there is.
#
# If you are using the padded versions of the datasets (where raw is larger to 
# provide more context), the offsets of neuron_ids, clefts, and annotations tell 
# you where they are placed in nm relative to (0,0,0) of the raw volume.
#
# In other words, neuron_ids, clefts, and annotations are exactly the same 
# between the padded and unpadded versions, except for the offset attribute.
raw = file.read_raw()
neuron_ids = file.read_neuron_ids()
clefts = file.read_clefts()
annotations = file.read_annotations()

print "Read raw: " + str(raw) + \
    ", resolution " + str(raw.resolution) + \