Пример #1
0
    def _write_cremi_data(self, cremi_data, path, mode="a", **kwargs):
        raw_vol = Volume(cremi_data.raw_data, resolution=cremi_data.res_list)
        clefts_vol = Volume(
            np.zeros(cremi_data.raw_data.shape, dtype=np.uint64),
            resolution=cremi_data.res_list,
        )

        annotations = Annotations()
        for annotation_tuple in cremi_data.annotation_tuples:
            annotations.add_annotation(*annotation_tuple)

        annotations.set_pre_post_partners(*cremi_data.annotation_partners)

        def key(id_, **kwargs):
            return (kwargs["type"], -id_)

        annotations.sort(key_fn=key, reverse=True)

        with closing(CremiFile(path, mode)) as f:
            f.write_raw(raw_vol)
            f.write_clefts(clefts_vol)
            f.write_annotations(annotations)

            f.h5file.attrs["project_offset"] = cremi_data.offset_nm.to_list()
            f.h5file.attrs["stack_offset"] = cremi_data.offset_px.to_list()
            for key, value in kwargs.items():
                f.h5file.attrs[key] = value
Пример #2
0
def cremi_score(gt, seg, return_all_scores=False, border_threshold=None):
    if cremi is None:
        raise ImportError("The cremi package is necessary to run cremi_score()")

    # # the zeros must be kept in the gt since they are the ignore label
    gt = vigra.analysis.labelVolumeWithBackground(gt.astype(np.uint32))
    # seg = vigra.analysis.labelVolume(seg.astype(np.uint32))

    seg = np.array(seg)
    seg = np.require(seg, requirements=['C'])
    # Make sure that all labels are strictly positive:
    seg = seg.astype('uint32')
    # FIXME: it seems to have some trouble with label 0 in the segmentation:
    seg += 1

    gt = np.array(gt)
    gt = np.require(gt, requirements=['C'])
    gt = (gt - 1).astype('uint32')
    # assert gt.min() >= -1

    gt_ = Volume(gt)
    seg_ = Volume(seg)

    metrics = NeuronIds(gt_, border_threshold=border_threshold)
    arand = metrics.adapted_rand(seg_)

    vi_s, vi_m = metrics.voi(seg_)
    cs = np.sqrt(arand * (vi_s + vi_m))
    # cs = (vi_s + vi_m + arand) / 3.
    if return_all_scores:
        return {'cremi-score': cs.item(), 'vi-merge': vi_m.item(), 'vi-split': vi_s.item(), 'adapted-rand': arand.item()}
    else:
        return cs
def check_ccs():
    binary = z5py.File('./binary_volume.n5')['data'][:]
    ccs_vi = vigra.analysis.labelVolumeWithBackground(binary)
    ccs = z5py.File('./ccs.n5')['data'][:]

    print("Start comparison")
    metric = NeuronIds(Volume(ccs_vi))
    print("Arand", metric.adapted_rand(Volume(ccs)))
Пример #4
0
def eval_block(block_id, res_prefix):
    gt = Volume(vigra.readHDF5('/home/papec/Work/neurodata_hdd/fib25/gt/gt_block%i.h5' % block_id,
                               'data'))
    res = Volume(vigra.readHDF5('%s_%i.h5' % (res_prefix, block_id), 'data'))
    metrics = NeuronIds(gt)
    are = metrics.adapted_rand(res)
    vi_s, vi_m = metrics.voi(res)
    return are, vi_s, vi_m
Пример #5
0
def evaluate(gt, segmentation):
    gt, _, _ = vigra.analysis.relabelConsecutive(gt, start_label=1)
    evaluate = NeuronIds(Volume(gt))

    segmentation = Volume(segmentation)
    vi_split, vi_merge = evaluate.voi(segmentation)
    ri = evaluate.adapted_rand(segmentation)

    return vi_split, vi_merge, ri
Пример #6
0
def cremi_scores(seg, gt):
    gt[gt == 0] = -1
    seg = Volume(seg)
    metric = NeuronIds(Volume(gt))
    vis, vim = metric.voi(seg)
    are = metric.adapted_rand(seg)
    cs = (are + vis + vim) / 3
    return {
        'cremi-score': cs,
        'vi-merge': vim,
        'vi-split': vis,
        'adapted-rand': are
    }
def cremi_scores(seg, gt):
    gt[gt == 0] = -1
    seg = Volume(seg)
    metric = NeuronIds(Volume(gt))
    vis, vim = metric.voi(seg)
    are = metric.adapted_rand(seg)
    # cremi uses the geometric mean of rand and vi !
    cs = sqrt(are * (vis + vim))
    return {
        'cremi-score': cs,
        'vi-merge': vim,
        'vi-split': vis,
        'adapted-rand': are
    }
def make_groundtruth(correct_resolution = True):
    # fill gt with ignore labels (for whatever reason this is not 0...)
    gt = 0xffffffffffffffff * np.ones(shape, dtype = 'uint64')
    # make one gt synapse
    rr, cc = circle(20, 20, syn_radius)
    gt[0, rr, cc] = 1

    if correct_resolution:
        vol = Volume(gt, resolution = (40.,4.,4.) )
        gt_name = 'gt_correct_res.h5'
    else:
        vol = Volume(gt)
        gt_name = 'gt_incorrect_res.h5'
    cremi_f = io.CremiFile('../data/%s' % gt_name, 'w')
    cremi_f.write_clefts(vol)
    return cremi_f
Пример #9
0
    def _populate_clefts(self, unpadded_shape_px, padding_low_px,
                         skip_if_exists):
        """

        Parameters
        ----------
        unpadded_shape_px : CoordZYX
        padding_low_px : CoordZYX
        skip_if_exists

        Returns
        -------

        """
        if self.has_clefts():
            if skip_if_exists:
                logger.info("Cleft data already exists, skipping")
                return
            else:
                raise RuntimeError("Cleft data already exists")

        logger.debug("generating cleft volume")
        cleft_volume = Volume(
            np.zeros(unpadded_shape_px.to_list(), dtype=np.uint64),
            resolution=self.resolution,
            offset=(padding_low_px * CoordZYX(self.resolution)).to_list(),
        )

        logger.debug("writing clefts")
        with self:
            self._cremi_file.write_clefts(cleft_volume)
            self._cremi_file.h5file["volumes/labels/clefts"].attrs[
                "refreshed_on"] = self.timestamp
Пример #10
0
def make_seg_submission(seg_valume_dict):
    submission_folder = 'submission'
    if not os.path.exists(submission_folder):
        os.makedirs(submission_folder)
    for name, seg_v in seg_valume_dict.iteritems():
        seg_v = seg_v.astype(np.uint64)
        neuron_ids = Volume(seg_v,
                            resolution=(40.0, 4.0, 4.0),
                            comment="Second submission in 2018")
        file = CremiFile(submission_folder + '/' + name + '.hdf', "w")
        file.write_neuron_ids(neuron_ids)
def make_predictions(fp_pos, correct_resolution = True):
    prediction = 0xffffffffffffffff * np.ones(shape, dtype = 'uint64')

    # make tp synapse prediction
    rr, cc = circle(22, 18, syn_radius)
    prediction[0, rr, cc] = 1

    # make fp synapse prediction
    rr, cc = circle(fp_pos[1], fp_pos[2], syn_radius)
    prediction[fp_pos[0], rr, cc] = 2

    if correct_resolution:
        vol = Volume(prediction, resolution = (40.,4.,4.) )
        prediction_name = 'prediction_correct_res_%s.h5' % '_'.join(map(str, fp_pos))
    else:
        vol = Volume(prediction)
        prediction_name = 'prediction_incorrect_res_%s.h5' % '_'.join(map(str, fp_pos))
    cremi_f = io.CremiFile('../data/%s' % prediction_name, 'w')
    cremi_f.write_clefts(vol)
    return cremi_f
Пример #12
0
def prepare_submission(sample,
                       path_segm,
                       inner_path_segm,
                       path_bbox_slice,
                       ds_factor=None):
    """

    :param path_segm:
    :param inner_path_segm:
    :param path_bbox_slice: path to the csv file
    :param ds_factor: for example (1, 2, 2)
    """

    segm = segm_utils.readHDF5(path_segm, inner_path_segm)

    bbox_data = np.genfromtxt(path_bbox_slice, delimiter=';', dtype='int')
    assert bbox_data.shape[0] == segm.ndim and bbox_data.shape[1] == 2
    # bbox_slice = tuple(slice(b_data[0], b_data[1]) for b_data in bbox_data)

    if ds_factor is not None:
        assert len(ds_factor) == segm.ndim
        segm = zoom(segm, ds_factor, order=0)

    padding = tuple(
        (slc[0], shp - slc[1])
        for slc, shp in zip(bbox_data, shape_padded_aligned_datasets[sample]))
    padded_segm = np.pad(segm, pad_width=padding, mode="constant")

    # Apply Constantin crop and then backalign:
    cropped_segm = padded_segm[magic_bboxes[sample]]
    tmp_file = path_segm.replace(".h5", "_submission_temp.hdf")
    backalign_segmentation(sample,
                           cropped_segm,
                           tmp_file,
                           key="temp_data",
                           postprocess=False)

    # Create a CREMI-style file ready to submit:
    final_submission_path = path_segm.replace(".h5", "_submission.hdf")
    file = CremiFile(final_submission_path, "w")

    # Write volumes representing the neuron and synaptic cleft segmentation.
    backaligned_segm = segm_utils.readHDF5(tmp_file, "temp_data")
    neuron_ids = Volume(backaligned_segm.astype('uint64'),
                        resolution=(40.0, 4.0, 4.0),
                        comment="Emb-submission")

    file.write_neuron_ids(neuron_ids)
    file.close()

    os.remove(tmp_file)
Пример #13
0
def agglomerate_sp_eval(ws_path, gt_path, prob_path):

    probs = vigra.readHDF5(prob_path, 'data')

    ws = vigra.readHDF5(ws_path, 'data')
    n_nodes = int(ws.max()) + 1

    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    # _, node_sizes = np.unique(ws, return_counts=True)
    # edge_sizes = nrag.accumulateEdgeMeanAndLength(rag, np.zeros(rag.shape, dtype='float32'))[:, 1]
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(rag.uvIds())

    gt = Volume(vigra.readHDF5(gt_path, 'data'))

    # node_factor = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1][::-1]
    node_factor = [.025, .05, .075, .1, .15, .2, .25, .4, .5]

    for nf in node_factor:
        # FIXME agglomerative clustering segfaults
        # n_target_nodes = int(nf * n_nodes)
        # agglomerator = cseg.AgglomerativeClustering(n_target_nodes)
        # node_labeling = agglomerator(graph, probs, edge_sizes=edge_sizes, node_sizes=node_sizes)

        agglomerator = cseg.MalaClustering(nf)
        node_labeling = agglomerator(graph, probs)
        vigra.analysis.relabelConsecutive(node_labeling, out=node_labeling)

        seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
        seg = Volume(seg)
        metrics = NeuronIds(gt)
        vi_s, vi_m = metrics.voi(seg)
        are = metrics.adapted_rand(seg)
        print("Evaluation for reduction", nf)
        print("Voi - Split ", vi_s)
        print("Voi - Merge ", vi_m)
        print("Adapted Rand", are)
        print("N-Nodes:", int(node_labeling.max() + 1), '/', n_nodes)
Пример #14
0
def gt_projection(block_id):
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_block%i.h5' % block_id
    ws = vigra.readHDF5(ws_path, 'data')
    ws = vigra.analysis.labelVolume(ws.astype('uint32'))
    gt = vigra.readHDF5('/home/papec/Work/neurodata_hdd/fib25/gt/gt_block%i.h5' % block_id,
                        'data')

    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    labeling = nrag.gridRagAccumulateLabels(rag, gt)

    projected = Volume(nrag.projectScalarNodeDataToPixels(rag, labeling))

    metrics = NeuronIds(Volume(gt))
    vi_s, vi_m = metrics.voi(projected)
    are = metrics.adapted_rand(projected)

    print(vi_s)
    print(vi_m)
    print(are)
    print()

    os.remove(ws_path)
    vigra.writeHDF5(ws, ws_path, 'data', compression='gzip')
Пример #15
0
def cremi_score(gt,
                seg,
                return_all_scores=True,
                b_thresh=2,
                data_resolution=(1.0, 1.0, 1.0)):
    """compute cremi scores from np.array"""

    if len(gt.shape) == 2:
        gt = gt[None, :, :]
        seg = seg[None, :, :]
    gt_ = Volume(gt, resolution=data_resolution)
    seg_ = Volume(seg, resolution=data_resolution)

    metrics = NeuronIds(gt_, b_thresh)
    arand = metrics.adapted_rand(seg_)

    vi_s, vi_m = metrics.voi(seg_)
    # official cremi score
    cs = np.sqrt(vi_s * (vi_m + arand))

    if return_all_scores:
        return cs, vi_s, vi_m, arand
    else:
        return cs
Пример #16
0
    def _populate_raw(self, padded_offset_from_stack_px, padded_shape_px,
                      skip_if_exists):
        """

        Parameters
        ----------
        padded_offset_from_stack_px : CoordZYX
        padded_shape_px : CoordZYX
        skip_if_exists

        Returns
        -------

        """

        if self.has_raw():
            if skip_if_exists:
                logger.info("Raw data already exists, skipping")
                return
            else:
                raise RuntimeError("Raw data already exists")

        logger.debug("reading raw volume")
        raw_data = self.get_raw(padded_offset_from_stack_px, padded_shape_px)

        raw_volume = Volume(raw_data, resolution=self.resolution)

        logger.debug("writing raw volume")
        with self:
            self._cremi_file.write_raw(raw_volume)
            self._cremi_file.h5file["volumes/raw"].attrs[
                "data_source"] = self.data_source
            self._cremi_file.h5file["volumes/raw"].attrs[
                "populated_on"] = self.timestamp
            self._cremi_file.h5file.attrs["roi_offset_from_stack"] = (
                padded_offset_from_stack_px *
                CoordZYX(self.resolution)).to_list()
Пример #17
0
                random.randint(0, 100))
    annotations.add_annotation(id, "presynaptic_site", location)
for id in [4, 5, 6, 7]:
    location = (random.randint(0, 100), random.randint(0, 100),
                random.randint(0, 100))
    annotations.add_annotation(id, "postsynaptic_site", location)
for (pre, post) in [(0, 4), (1, 5), (2, 6), (3, 7)]:
    annotations.set_pre_post_partners(pre, post)
annotations.add_comment(6, "unsure")

# Open a file for writing (deletes previous file, if exists)
file = CremiFile("example.hdf", "w")

# Write the raw volume. This is given here just for illustration. For your
# submission, you don't need to store the raw data. We have it already.
raw = Volume(np.zeros((10, 100, 100), dtype=np.uint8),
             resolution=(40.0, 4.0, 4.0))
file.write_raw(raw)

# Write volumes representing the neuron and synaptic cleft segmentation.
neuron_ids = Volume(np.ones((10, 100, 100), dtype=np.uint64),
                    resolution=(40.0, 4.0, 4.0),
                    comment="just ones")
clefts = Volume(np.zeros((10, 100, 100), dtype=np.uint64),
                resolution=(40.0, 4.0, 4.0),
                comment="just zeros")
file.write_neuron_ids(neuron_ids)
file.write_clefts(clefts)

# Write synaptic partner annotations.
file.write_annotations(annotations)
Пример #18
0
def roi_and_rand_general(
        sample, half, defect_correct, project_folder,
        source_folder,
        result_file, caching=False, debug=False
):
    print '\nEvaluating spl{}_z{}'.format(sample, half)
    print 'Result file: {}'.format(result_file)

    experiment_folder = os.path.join(project_folder, 'spl{}_z{}/'.format(sample, half))

    if caching:
        cache_filepath = os.path.join(
            experiment_folder,
            re.sub('.h5$', '', result_file) + '_roi_and_rand_cache.pkl'
        )
    else:
        cache_filepath = None

    if caching and os.path.isfile(cache_filepath):
        with open(cache_filepath, mode='r') as f:
            voi_split, voi_merge, adapted_rand = pickle.load(f)

    else:

        if defect_correct:
            defect_correct_str = '_defect_correct'
        else:
            defect_correct_str = ''

        mc_result_key = 'z/{}/data'.format(half)

        # # Load stuff
        # source_folder = '/mnt/localdata02/jhennies/neuraldata/cremi_2016/170321_resolve_false_merges/'

        # # TODO: Change here when switching sample
        # ref_result_filepath = os.path.join(source_folder, 'cremi.spl{}.train.mcseg_betas.crop.axes_xyz.split_z.h5'.format(sample))
        # # TODO: Change here when switching half
        # ref_result_key = 'z/{}/beta_0.5'.format(half)

        gt_filepath = os.path.join(source_folder, 'cremi.spl{}.train.raw_neurons{}.crop.axes_xyz.split_z.h5'.format(sample, defect_correct_str))
        gt_key = 'z/{}/neuron_ids'.format(half)

        # ref_result, _, _ = vigra.analysis.relabelConsecutive(vigra.readHDF5(ref_result_filepath, ref_result_key), start_label=1, keep_zeros=True)
        if not debug:
            gt = vigra.readHDF5(gt_filepath, gt_key)
            vol_gt = Volume(gt)
            neuron_ids_evaluation = NeuronIds(vol_gt)

        mc_result_filepath = os.path.join(experiment_folder, result_file)

        if not debug:
            # Evaluate baseline
            mc_result = vigra.readHDF5(mc_result_filepath, mc_result_key)
            vol_mc_result = Volume(mc_result)
            (voi_split, voi_merge) = neuron_ids_evaluation.voi(vol_mc_result)
            adapted_rand = neuron_ids_evaluation.adapted_rand(vol_mc_result)
        else:
            voi_split = 1.09
            voi_merge = 0.70
            adapted_rand = 0.23

        if caching:
            with open(cache_filepath, mode='w') as f:
                pickle.dump((voi_split, voi_merge, adapted_rand), f)

    print "\tvoi split   : " + str(voi_split)
    print "\tvoi merge   : " + str(voi_merge)
    print "\tadapted RAND: " + str(adapted_rand)

    return (voi_split, voi_merge, adapted_rand)
Пример #19
0
    def write_multicremi(self, rows, path, mode="w-"):
        offset_shapes = []

        for _, row in rows.iterrows():
            offset_px, shape_px = self.row_to_offset_shape_px(row)
            offset_shapes.append(
                (np.array(offset_px.to_list()), np.array(shape_px.to_list())))

        super_offset_px, super_shape_px = get_superroi(*offset_shapes)
        super_offset_nm = super_offset_px * RESOLUTION.to_list(
        ) + TRANSLATION.to_list()

        raw_data = np.zeros(super_shape_px, dtype=np.uint8)
        cleft_data = np.zeros(super_shape_px, dtype=np.uint64)
        res_list = RESOLUTION.to_list()

        id_gen = IdGenerator()
        for col in [
                "conn_id", "pre_tnid", "post_tnid", "pre_skid", "post_skid"
        ]:
            id_gen.exclude.update(int(item) for item in rows[col])

        annotations = Annotations()

        zipped = list(zip(rows.iterrows(), offset_shapes))
        pre_to_conn = dict()

        for (_, row), (offset_px, shape_px) in tqdm(zipped,
                                                    desc="fetching data"):

            raw_slicing = tuple(
                slice(o - sup_o, o - sup_o + s)
                for sup_o, o, s in zip(super_offset_px, offset_px, shape_px))

            raw_data[raw_slicing] = self.get_raw(CoordZYX(offset_px),
                                                 CoordZYX(shape_px))

            conn_zyx = -super_offset_nm + [row["conn_" + dim] for dim in "zyx"]
            post_zyx = -super_offset_nm + [
                row["post_tn_" + dim] for dim in "zyx"
            ]
            post_id = int(row["post_tnid"])

            pre_zyx = make_presynaptic_loc(conn_zyx, post_zyx,
                                           EXTRUSION_FACTOR)
            pre_id = id_gen.next()
            pre_to_conn[pre_id] = int(row["conn_id"])

            annotations.add_annotation(pre_id, "presynaptic_site",
                                       list(pre_zyx))
            annotations.add_annotation(post_id, "postsynaptic_site",
                                       list(post_zyx))
            annotations.set_pre_post_partners(pre_id, post_id)

        pre_to_conn_arr = np.array(sorted(pre_to_conn.items()),
                                   dtype=np.uint64)

        logger.info("writing data")
        with closing(CremiFile(path, mode)) as f:
            f.write_raw(Volume(raw_data, resolution=res_list))
            f.write_clefts(Volume(cleft_data, resolution=res_list))
            f.write_annotations(annotations)

            f.h5file.attrs["project_offset"] = list(super_offset_nm)
            f.h5file.attrs["stack_offset"] = list(super_offset_px)
            f.h5file.attrs["annotation_version"] = ANNOTATION_VERSION
            f.h5file.attrs["next_id"] = id_gen.next()
            ds = f.h5file.create_dataset(Dataset.PRE_TO_CONN,
                                         data=pre_to_conn_arr)
            ds.attrs["explanation"] = PRE_TO_CONN_EXPL

        rows.to_hdf(path, "tables/connectors")
Пример #20
0
    def write_monolithic_cremi(self, df, path, mode="a"):
        # todo: unfinished
        df = df.copy()
        xmax, ymax = -1, -1
        z_total = 0
        offsets = dict()
        logger.info("calculating ROIs")
        for idx, row in tqdm(df.iterrows(), total=len(df)):
            if z_total:
                z_total += 3
            offset_px, shape_px = self.row_to_offset_shape_px(row)
            offset_nm = offset_px * RESOLUTION + TRANSLATION
            ymax = max(shape_px["y"], ymax)
            xmax = max(shape_px["x"], xmax)
            z_total += shape_px["z"]
            offsets[idx] = {
                "offset_px": offset_px,
                "shape_px": shape_px,
                "offset_nm": offset_nm,
            }

        raw = np.zeros((z_total, ymax, xmax), dtype=np.uint8)

        divider = np.ones((3, ymax, xmax), dtype=np.uint8) * 255
        divider[1, :, :] = 0

        annotations = Annotations()

        z_offsets = []
        stack_offsets_rows = []
        px_shapes_rows = []
        project_offsets_rows = []

        last_z = 0
        logger.info("fetching and writing data")
        for idx, row in tqdm(df.iterrows(), total=len(df)):
            this_offsets = offsets[idx]
            stack_offsets_rows.append(this_offsets["offset_px"].to_list())
            project_offsets_rows.append(this_offsets["offset_nm"].to_list())
            px_shapes_rows.append(this_offsets["shape_px"].to_list())
            z_offsets.append(last_z)

            for side in ["pre", "post"]:
                local_coords = (
                    CoordZYX({dim: row[side + "_tn_" + dim]
                              for dim in "zyx"}) -
                    this_offsets["offset_nm"]).to_list()
                local_coords[0] += last_z * RESOLUTION["z"]
                annotations.add_annotation(int(row[side + "_tnid"]),
                                           side + "synaptic_site",
                                           local_coords)

            annotations.set_pre_post_partners(int(row["pre_tnid"]),
                                              int(row["post_tnid"]))

            raw[last_z:last_z + this_offsets["shape_px"]["z"],
                0:this_offsets["shape_px"]["y"],
                0:this_offsets["shape_px"]["x"], ] = self.get_raw(
                    this_offsets["offset_px"], this_offsets["shape_px"])
            last_z += this_offsets["shape_px"]["z"]
            raw[last_z:last_z + 3, :, :] = divider
            last_z += 3

        clefts = np.zeros(raw.shape, dtype=np.uint64)
        res_list = RESOLUTION.to_list()

        with closing(CremiFile(path, mode)) as f:
            f.write_raw(Volume(raw, resolution=res_list))
            f.write_clefts(Volume(clefts, resolution=res_list))
            f.write_annotations(annotations)

            f.h5file.attrs["project_offset"] = offset_nm.to_list()
            f.h5file.attrs["stack_offset"] = offset_px.to_list()
            for key, value in row.items():
                f.h5file.attrs[key] = value

        df.to_hdf(path, "tables/connectors")
        for name, this_table in zip(
            ["stack_offset", "shape_px", "project_offset"],
            [stack_offsets_rows, px_shapes_rows, project_offsets_rows],
        ):
            this_df = pd.DataFrame(this_table,
                                   columns=["z", "y", "x"],
                                   index=df.index)
            this_df.to_hdf(path, "tables/" + name)

        z_df = pd.DataFrame(z_offsets, index=df.index, columns=["z"])
        z_df.to_hdf(path, "tables/z_offset")