コード例 #1
0
ファイル: ilp.py プロジェクト: dagophil/autocontext
    def extend_data_tzyxc(self, data_nr=None):
        """Extends the dimension of the dataset and its labels to tzyxc.

        If data_nr is None, all datasets are extended.
        :param data_nr: number of dataset
        """
        if data_nr is None:
            for i in range(self.data_count):
                self.extend_data_tzyxc(i)
        else:
            # Reshape the data with the correct axistags.
            data = self.get_data(data_nr)
            axisorder = self.get_axisorder(data_nr)
            if not hasattr(data, "axistags"):
                data = vigra.VigraArray(data, axistags=vigra.defaultAxistags(axisorder), dtype=data.dtype)
            new_data = reshape_tzyxc(data)

            # Save the reshaped dataset.
            output_folder, output_filename = os.path.split(self.get_cache_data_path(data_nr))
            output_path = os.path.join(output_folder, str(data_nr).zfill(4) + "_" + output_filename)
            if self.is_internal(data_nr):
                output_key = self.get_dataset_id(data_nr)
            else:
                output_key = self.get_data_key(data_nr)
            vigra.writeHDF5(new_data, output_path, output_key, compression=self._compression)

            # Update the project file.
            self.set_data_path_key(data_nr, output_path, output_key)
            self._set_internal(data_nr, False)
            self.set_axisorder(data_nr, "tzyxc")
            self._set_axistags_from_data(data_nr)

            # If the dataset has labels, reshape them.
            if self._label_block_count(data_nr) > 0:
                self._reshape_labels(data_nr, axisorder, "tzyxc")
コード例 #2
0
    def testFromDataset2(self):
        shape = (500, 500, 500)

        vol = np.zeros(shape, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags='zxy')

        centers = [(45, 15), (45, 350), (360, 50)]
        extent = (10, 10)
        shift = (1, 1)
        zrange = np.arange(0, 20)
        zsteps = np.arange(5, 455, 50)

        for x, y in centers:
            for z in zsteps:
                for t in zrange:
                    sx = x + t * shift[0]
                    sy = y + t * shift[1]
                    vol[zsteps + t, sx - extent[0]:sx + extent[0],
                        sy - extent[0]:sy + extent[0]] = 255

        vol = vol.withAxes(*'xyz')

        # step by step
        op = OpLazyCC(graph=Graph())
        op.Input.setValue(vol)
        op.ChunkShape.setValue((64, 64, 64))
        out1 = np.zeros(op.Output.meta.shape, dtype=op.Output.meta.dtype)
        for z in reversed(range(500)):
            out1[..., z:z + 1] = op.Output[..., z:z + 1].wait()
        vigra.writeHDF5(out1, '/tmp/data.h5', 'data')
        out2 = vigra.analysis.labelVolumeWithBackground(vol)
        assertEquivalentLabeling(out1.view(np.ndarray), out2.view(np.ndarray))
コード例 #3
0
def evaluate_mws(project_folder, sample, prediction, bb):
    stride = [2, 10, 10]
    with open('./mws_offsets.json', 'r') as f:
        affinity_offsets = json.load(f)
    mws = DamWatershed(affinity_offsets, stride, randomize_bounds=False)
    print('Predicting mws.. %s' % sample)
    mws_seg = mws(prediction).astype('int64')
    print('.. done, sample %s' % sample)

    gt_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi/sample%s/gt/sample%s_neurongt_automatically_realignedV2.h5' % (
        sample, sample)
    with h5py.File(gt_path, 'r') as f:
        gt = f['data'][bb].astype('int64')
    assert gt.shape == mws_seg.shape
    pred_path = os.path.join(project_directory, 'Predictions',
                             'prediction_sample%s.h5' % sample)
    vigra.writeHDF5(mws_seg, pred_path, 'mws', compression='gzip')
    quit()

    evals = cremi_scores(mws_seg, gt)
    with Lock():
        eval_file = os.path.join(project_directory, 'evaluation.json')
        if os.path.exists(eval_file):
            with open(eval_file, 'r') as f:
                res = json.load(f)
        else:
            res = {}

        res[sample] = evals
        with open(eval_file, 'w') as f:
            json.dump(res, f, indent=4, sort_keys=True)
コード例 #4
0
ファイル: core.py プロジェクト: giserh/learnDistanceTransform
    def compute_distance_transform_on_gt(self, target="train"):
        """
        Compute the distance transform of the edge image of ground truth of training or test data and save it to the
        cache folder.

        :param target: Either "train" or "test".
        """
        # Read the data.
        if target == "train":
            data = self.get_gt_train()
            file_name = os.path.join(self.cache_folder, "dists_train.h5")
            self.dists_train_path = file_name
        elif target == "test":
            data = self.get_gt_test()
            file_name = os.path.join(self.cache_folder, "dists_test.h5")
            self.dists_test_path = file_name
        else:
            raise Exception(
                'LPData.compute_distance_transform_on_gt(): Parameter "target" must be "train" or "test".'
            )

        # Compute the edge image and the distance transform.
        edges = skneuro.learning.regionToEdgeGt(data)
        edges[edges == 1] = 0
        edges[edges == 2] = 1
        dists = vigra.filters.distanceTransform3D(edges.astype(numpy.float32))

        # Save the result.
        vigra.writeHDF5(dists, file_name, self.dists_h5_key, compression="lzf")
コード例 #5
0
ファイル: ilp.py プロジェクト: timoMa/autocontext
    def replace_labels(self,
                       data_nr,
                       blocks,
                       block_slices,
                       delete_old_blocks=True):
        """Replaces the labels and their block slices of the dataset.

        :param data_nr: number of dataset
        :param blocks: label blocks
        :param block_slices: block slices
        :param delete_old_blocks: whether the old blocks in the project file shall be deleted
        """
        if len(blocks) != len(block_slices):
            raise Exception(
                "The number of blocks and block slices must be the same.")
        if not delete_old_blocks:
            if len(blocks) != self._label_block_count(data_nr):
                raise Exception("Wrong number of label blocks to be inserted.")

        if delete_old_blocks:
            self.remove_labels(data_nr)

        proj = h5py.File(self.project_filename, "r+")
        for i in range(len(blocks)):
            vigra.writeHDF5(blocks[i], self.project_filename,
                            const.label_blocks(data_nr, i))
            h5_blocks = eval_h5(proj, const.label_blocks_list(data_nr, i))
            h5_blocks.attrs['blockSlice'] = block_slices[i]
        proj.close()
コード例 #6
0
def isbi12_multicut(ds_train_str, ds_test_str,
        seg_id_train, seg_id_test,
        local_feats_list, mc_params):

    meta.load()
    ds_train = meta.get_dataset(ds_train_str)
    ds_test = meta.get_dataset(ds_test_str)

    mc_node, mc_edges, mc_energy, t_inf = multicut_workflow(
            ds_train, ds_test,
            seg_id_train, seg_id_test,
            local_feats_list, mc_params)

    if ds_test_str == "isbi2012_train":
        return eval_lazy(mc_edges, ds_test._rag(seg_id_test) )

    else:
        assert ds_test_str == "isbi2012_test"
        res_folder = "/home/consti/Work/nature_experiments/results/isbi12"
        mc_seg = ds_test.project_mc_result( seg_id_test, mc_node )

        # save segmentation result
        seg_name = "_".join( ["mcresult", str(seg_id_test), "seg"] ) + ".h5"
        seg_path = os.path.join(res_folder, seg_name)
        vigra.writeHDF5(mc_seg, seg_path, "data")

        # save binary edges
        edge_name = "_".join( ["mcresult", str(seg_id_test), "edges"] ) + ".tif"
        edge_path = os.path.join(res_folder, edge_name)
        edge_vol = edges_to_binary(ds_test._rag(seg_id_test), mc_edges)
        vigra.impex.writeVolume(edge_vol, edge_path, '', dtype = np.uint8 )
        return 0, 0
コード例 #7
0
def evaluate_mc(project_folder, sample, prediction, bb):
    pred_path = os.path.join(project_directory, 'Predictions',
                             'prediction_sample%s_nnaffinities.h5' % sample)
    # prediction = vigra.readHDF5(pred_path, 'data')
    multicutter = local_affinity_multicut_from_wsdt2d(n_threads=12)
    mc_seg = multicutter(prediction).astype('int64')

    gt_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi/sample%s/gt/sample%s_neurongt_automatically_realignedV2.h5' % (
        sample, sample)
    with h5py.File(gt_path, 'r') as f:
        gt = f['data'][bb].astype('int64')
    assert gt.shape == mc_seg.shape
    vigra.writeHDF5(mc_seg, pred_path, 'multicut', compression='gzip')

    evals = cremi_scores(mc_seg, gt)

    eval_file = os.path.join(project_directory, 'evaluation.json')
    if os.path.exists(eval_file):
        with open(eval_file, 'r') as f:
            res = json.load(f)
    else:
        res = {}

    res[sample] = evals
    with open(eval_file, 'w') as f:
        json.dump(res, f, indent=4, sort_keys=True)
コード例 #8
0
def run_mc(ds_train_name, ds_test_name, mc_params, save_path):

    assert os.path.exists(os.path.split(save_path)[0]), "Please choose an existing folder to save your results"

    # if you have added multiple segmentations, you can choose on which one to run
    # experiments with the seg_id
    seg_id = 0

    # these strings encode the features that are used for the local features
    feature_list = ['raw', 'prob', 'reg']

    meta.load()
    ds_train = meta.get_dataset(ds_train_name)
    ds_test  = meta.get_dataset(ds_test_name)

    # use this for running the mc without defected slices
    mc_nodes, _, _, _ = multicut_workflow(
            ds_train, ds_test,
            seg_id, seg_id,
            feature_list, mc_params)

    # use this for running the mc with defected slices
    #mc_nodes, _, _, _ = multicut_workflow_with_defect_correction(
    #        ds_train, ds_test,
    #        seg_id, seg_id,
    #        feature_list, mc_params)

    segmentation = ds_test.project_mc_result(seg_id, mc_nodes)
    vigra.writeHDF5(segmentation, save_path, 'data', compression = 'gzip')
コード例 #9
0
def make_superpix_from_intepolation(prob_path, prob_key, save_path, anisotropy):
    from wsDtSegmentation import wsDtSegmentation

    pmem = vigra.readHDF5(prob_path, prob_key)

    print pmem.shape
    print anisotropy

    # for some datasets, we have to invert the probabilities
    #probs = 1. - probs

    # interpolate the probability in z - direction
    print "doing spline interpolation"
    pmem_interpol = vigra.sampling.resize(pmem, shape=(pmem.shape[0], pmem.shape[1], anisotropy* pmem.shape[2]))
    pmem_interpol = np.array(pmem_interpol)
    print "Finished interpolation"

    superpix = wsDtSegmentation(pmem_interpol, 0.45, 20, 100, 1.6, 2.)[0]

    superpix = superpix[:,:,::anisotropy]

    #volumina_n_layer( [pmem, superpix.astype(np.uint32)] )

    assert superpix.shape == pmem.shape

    vigra.writeHDF5(superpix, save_path, "superpixel")
コード例 #10
0
    def testFromDataset2(self):
        shape = (500, 500, 500)

        vol = np.zeros(shape, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags="zxy")

        centers = [(45, 15), (45, 350), (360, 50)]
        extent = (10, 10)
        shift = (1, 1)
        zrange = np.arange(0, 20)
        zsteps = np.arange(5, 455, 50)

        for x, y in centers:
            for z in zsteps:
                for t in zrange:
                    sx = x + t * shift[0]
                    sy = y + t * shift[1]
                    vol[zsteps + t, sx - extent[0] : sx + extent[0], sy - extent[0] : sy + extent[0]] = 255

        vol = vol.withAxes(*"zyx")

        # step by step
        op = OpLazyCC(graph=Graph())
        op.Input.setValue(vol)
        op.ChunkShape.setValue((64, 64, 64))
        out1 = np.zeros(op.Output.meta.shape, dtype=op.Output.meta.dtype)
        for z in reversed(list(range(500))):
            out1[..., z : z + 1] = op.Output[..., z : z + 1].wait()
        vigra.writeHDF5(out1, "/tmp/data.h5", "data")
        out2 = vigra.analysis.labelVolumeWithBackground(vol)
        assertEquivalentLabeling(out1.view(np.ndarray), out2.view(np.ndarray))
コード例 #11
0
ファイル: fib_blocks.py プロジェクト: weihuang527/cremi_tools
def segment_block(block_id, weight_edges=False, cached=False):
    import cremi_tools.segmentation as cseg
    raw_path = '/home/papec/Work/neurodata_hdd/fib25/raw/raw_block%i.h5' % block_id
    pmap_path = '/home/papec/Work/neurodata_hdd/fib25/pmaps/probs_squeezed_block%i.h5' % block_id
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_agglomerated_0.075000_block%i.h5' % block_id

    # load pmap and watersheds
    raw = vigra.readHDF5(raw_path, 'data').astype('float32')
    pmap = vigra.readHDF5(pmap_path, 'data')
    ws = vigra.readHDF5(ws_path, 'data')

    if cached:
        edge_probs = vigra.readHDF5('edge_probs_%i.h5' % block_id, 'data')
        rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
        # TODO edge sizes
    else:
        # feature extractor and multicut
        feature_extractor = cseg.RandomForestFeatures('./rf.pkl', True)
        # make graph and costs
        rag, edge_probs, _, edge_sizes = feature_extractor(pmap, ws, raw=raw)
        vigra.writeHDF5(edge_probs, 'edge_probs_%i.h5' % block_id, 'data')
    graph = nifty.graph.undirectedGraph(rag.numberOfNodes)
    graph.insertEdges(rag.uvIds())

    mc = cseg.Multicut('kernighan-lin', weight_edges=weight_edges)
    if weight_edges:
        costs = mc.probabilities_to_costs(edge_probs, edge_sizes)
    else:
        costs = mc.probabilities_to_costs(edge_probs)
    node_labels = mc(graph, costs)
    return nrag.projectScalarNodeDataToPixels(rag, node_labels)
コード例 #12
0
ファイル: ilp.py プロジェクト: timoMa/autocontext
    def set_axisorder(self, data_nr, new_axisorder):
        """Sets the axisorder of the dataset.

        :param data_nr: number of dataset
        :param new_axisorder: new axisorder of dataset
        """
        h5_key = const.axisorder(data_nr)
        vigra.writeHDF5(new_axisorder, self.project_filename, h5_key)
コード例 #13
0
def convert_to_volume(options):
    # data = tifffile.imread(options.input_file)
    path, files = hack(options.input_file)
    os.chdir(path)
    data = tifffile.imread(files)
    reshapedData = hytra.util.axesconversion.adjustOrder(data, options.tif_input_axes, options.output_axes)
    logging.getLogger('stack_to_h5.py').info("Saving h5 volume of shape {}".format(data.shape))
    vigra.writeHDF5(reshapedData, options.output_file, options.output_path)
コード例 #14
0
ファイル: ilp.py プロジェクト: timoMa/autocontext
    def set_axistags(self, data_nr, new_axistags):
        """Sets the axistags of the dataset (only in the project file, not in the dataset itself).

        :param data_nr: number of dataset
        :param new_axistags: new axistags of dataset
        """
        h5_key = const.axistags(data_nr)
        vigra.writeHDF5(new_axistags, self.project_filename, h5_key)
コード例 #15
0
ファイル: ilp.py プロジェクト: dagophil/autocontext
    def set_axisorder(self, data_nr, new_axisorder):
        """Sets the axisorder of the dataset.

        :param data_nr: number of dataset
        :param new_axisorder: new axisorder of dataset
        """
        h5_key = const.axisorder(data_nr)
        vigra.writeHDF5(new_axisorder, self.project_filename, h5_key)
コード例 #16
0
ファイル: ilp.py プロジェクト: dagophil/autocontext
    def set_axistags(self, data_nr, new_axistags):
        """Sets the axistags of the dataset (only in the project file, not in the dataset itself).

        :param data_nr: number of dataset
        :param new_axistags: new axistags of dataset
        """
        h5_key = const.axistags(data_nr)
        vigra.writeHDF5(new_axistags, self.project_filename, h5_key)
コード例 #17
0
def grow_WS(json_filename, config_dict, project_directory, experiment_name):
    if config_dict["WS_growing"]:
        return

    experiment_dir_path = os.path.join(project_directory, experiment_name)
    export_file = os.path.join(experiment_dir_path, 'out_segms',
                               json_filename.replace('.json', '.h5'))
    if not os.path.exists(export_file):
        return

    print(json_filename)

    post_proc_config = config_dict['postproc_config']
    offsets = get_dataset_offsets("CREMI")

    # Load affinities:
    print("Loading ", json_filename)
    affinities, _ = get_dataset_data("CREMI",
                                     config_dict["sample"],
                                     config_dict["crop"],
                                     run_connected_components=False)
    pred_segm = vigra.readHDF5(export_file, "segm")

    grow = SizeThreshAndGrowWithWS(
        post_proc_config['thresh_segm_size'],
        offsets,
        hmap_kwargs=post_proc_config['prob_map_kwargs'],
        apply_WS_growing=True,
        size_of_2d_slices=True,
        with_background=True)

    print("Computing WS ")
    try:
        pred_segm_WS = grow(affinities, pred_segm)
    except MemoryError:
        print("Memory error on ", json_filename)
        return

    # TODO: add option to compute scores (but check if test)
    # evals_WS = cremi_score(GT, pred_segm_WS, border_threshold=None, return_all_scores=True)
    # print("Scores achieved ({} - {} - {}): ".format(agglo_type, non_link, noise_factor), evals_WS)
    # new_results.update(
    #     {'energy': np.asscalar(MC_energy), 'score': evals, 'score_WS': evals_WS, 'runtime': out_dict['runtime']})

    # ------------------------------
    # SAVING RESULTS:
    # ------------------------------
    # TODO: save_growing True, delete old segm, update scores

    vigra.writeHDF5(pred_segm_WS.astype('uint64'), export_file, 'segm_WS')

    # Save config setup:
    config_dict["WS_growing"] = True

    json_file_path = os.path.join(experiment_dir_path, 'scores', json_filename)
    with open(json_file_path, 'w') as f:
        json.dump(config_dict, f, indent=4, sort_keys=True)
コード例 #18
0
ファイル: stack_to_h5.py プロジェクト: tomldh/hytra
def convert_to_volume(options):
    # data = tifffile.imread(options.input_file)
    path, files = hack(options.input_file)
    os.chdir(path)
    data = tifffile.imread(files)
    reshapedData = hytra.util.axesconversion.adjustOrder(
        data, options.tif_input_axes, options.output_axes)
    logging.getLogger('stack_to_h5.py').info(
        "Saving h5 volume of shape {}".format(data.shape))
    vigra.writeHDF5(reshapedData, options.output_file, options.output_path)
コード例 #19
0
ファイル: ilp.py プロジェクト: dagophil/autocontext
    def set_data_path_key(self, data_nr, new_path, new_key):
        """Sets file path and h5 key of the dataset.

        :param data_nr: number of dataset
        :param new_path: new file path
        :param new_key: new h5 key
        """
        rel_path = os.path.relpath(os.path.abspath(new_path), self.project_dir) + "/" + new_key
        h5_key = const.filepath(data_nr)
        vigra.writeHDF5(rel_path, self.project_filename, h5_key)
コード例 #20
0
    def cc_block(block_id):
        # get all the relevant blocks
        block = blocking.getBlockWithHalo(block_id, halo)
        inner_block, outer_block, local_block = block.innerBlock, block.outerBlock, block.innerBlockLocal

        # we offset with the coordinate of the leftmost pixel
        offset = sum(e * s for e, s in zip(inner_block.begin, shape))

        # get all bounding boxes
        bb_outer = tuple(
            slice(b, e) for b, e in zip(outer_block.begin, outer_block.end))
        begin, end = inner_block.begin, inner_block.end
        bb_inner = tuple(slice(b, e) for b, e in zip(begin, end))
        bb_local = tuple(
            slice(b, e) for b, e in zip(local_block.begin, local_block.end))
        outer_shape = outer_block.shape

        # get the subvolume, find connected components and write non-overlapping part to file
        subvolume = ds[bb_outer]
        cc = vigra.analysis.labelVolumeWithBackground(subvolume).astype(
            'uint64')
        cc[cc != 0] += offset
        ds_out[bb_inner] = cc[bb_local]

        # serialize all the overlaps
        overlap_ids = []
        for ii in range(6):
            axis = ii // 2
            to_lower = ii % 2
            neighbor_id = blocking.getNeighborId(block_id,
                                                 axis=axis,
                                                 lower=to_lower)

            if neighbor_id != -1:
                overlap_bb = tuple(
                    slice(None) if i != axis else
                    slice(0, 2) if to_lower else slice(outer_shape[i] -
                                                       2, outer_shape[i])
                    for i in range(3))

                overlap = cc[overlap_bb]

                vigra.writeHDF5(
                    overlap,
                    os.path.join(tmp_folder,
                                 'block_%i_%i.h5' % (block_id, neighbor_id)),
                    'data',
                    compression='gzip')

                # we only return the overlap ids, if the block id is smaller than the neighbor id,
                # to keep the pairs unique
                if block_id < neighbor_id:
                    overlap_ids.append((block_id, neighbor_id))
        max_id = int(cc.max())
        return overlap_ids, max_id
コード例 #21
0
ファイル: ilp.py プロジェクト: timoMa/autocontext
    def set_data_path_key(self, data_nr, new_path, new_key):
        """Sets file path and h5 key of the dataset.

        :param data_nr: number of dataset
        :param new_path: new file path
        :param new_key: new h5 key
        """
        rel_path = os.path.relpath(os.path.abspath(new_path),
                                   self.project_dir) + "/" + new_key
        h5_key = const.filepath(data_nr)
        vigra.writeHDF5(rel_path, self.project_filename, h5_key)
コード例 #22
0
ファイル: ilp.py プロジェクト: dagophil/autocontext
    def _set_internal(self, data_nr, val):
        """Sets the ilp flag of the given dataset to "ProjectInternal" if val is True, else to "FileSystem".

        :param data_nr: number of dataset
        :param val: whether to set the flag to "ProjectInternal" or "FileSystem"
        """
        h5_key = const.datalocation(data_nr)
        if val:
            vigra.writeHDF5("ProjectInternal", self.project_filename, h5_key)
        else:
            vigra.writeHDF5("FileSystem", self.project_filename, h5_key)
コード例 #23
0
ファイル: ilp.py プロジェクト: timoMa/autocontext
    def _set_internal(self, data_nr, val):
        """Sets the ilp flag of the given dataset to "ProjectInternal" if val is True, else to "FileSystem".

        :param data_nr: number of dataset
        :param val: whether to set the flag to "ProjectInternal" or "FileSystem"
        """
        h5_key = const.datalocation(data_nr)
        if val:
            vigra.writeHDF5("ProjectInternal", self.project_filename, h5_key)
        else:
            vigra.writeHDF5("FileSystem", self.project_filename, h5_key)
コード例 #24
0
def extract_scale_leve(scale):
    assert scale >= 6
    sys.path.append('/home/papec/Work/my_projects/z5/bld/python')
    import z5py
    path = '/home/papec/mnt/saalfeldlab/FAFB00/v14_align_tps_20170818_dmg.n5/volumes/raw'
    key = 's%i' % scale
    data = z5py.File(path, use_zarr_format=False)[key][:]
    out_path = '/home/papec/Work/neurodata_hdd/fafb/raw.h5'
    vigra.writeHDF5(data,
                    out_path,
                    key,
                    compression='gzip',
                    chunks=(64, 64, 64))
コード例 #25
0
def project_new_result(ds_name, meta_folder, new_nodes_filepath, save_path,
                       results_name):

    ds = load_dataset(meta_folder, ds_name)
    seg_id = 0

    # Load resolving result
    with open(new_nodes_filepath) as f:
        new_node_labels = pickle.load(f)

    # project the result back to the volume
    mc_seg = ds.project_mc_result(seg_id, new_node_labels)

    # Write the result
    vigra.writeHDF5(mc_seg, save_path, results_name, compression='gzip')
コード例 #26
0
def project_gt_isbi2012():
    labels_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-labels.h5"
    gt_path     = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/gt_mc.h5"
    raw_path    = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-volume.h5"

    labels  = vigra.readHDF5(labels_path, "labels")
    gt      = vigra.readHDF5(gt_path, "gt")
    raw     = vigra.readHDF5(raw_path, "data")

    gt = project_gt(labels, gt)

    save_path     = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/gt_mc_bkg.h5"

    volumina_n_layer( (raw, gt, labels) )

    vigra.writeHDF5(gt, save_path, "gt")
コード例 #27
0
def compute_disc_rank_order(raw, save_path):

    print "Computing DiscRankOrder"

    list_path    = save_path + "feats"
    if os.path.exists(list_path):
        feature_file = open(list_path, 'a')
    else:
        feature_file = open(list_path, 'w')

    feat_array = np.zeros(raw.shape)

    for z in range(raw.shape[2]):
        feat_array[:,:,z] = vigra.filters.discRankOrderFilter(raw[:,:,z])

    path = save_path + "DiscRankOrder" + ".h5"
    vigra.writeHDF5(feat_array.astype(np.float32), path, "data")
コード例 #28
0
def agglomerate_sp(ws_path, prob_path, out_path, threshold):
    probs = vigra.readHDF5(prob_path, 'data')

    ws = vigra.readHDF5(ws_path, 'data')
    n_nodes = int(ws.max()) + 1

    rag = nrag.gridRag(ws, numberOfLabels=n_nodes)
    graph = nifty.graph.undirectedGraph(n_nodes)
    graph.insertEdges(rag.uvIds())

    agglomerator = cseg.MalaClustering(threshold)
    node_labeling = agglomerator(graph, probs)
    vigra.analysis.relabelConsecutive(node_labeling, out=node_labeling)
    seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
    view([ws, seg])

    vigra.writeHDF5(seg, out_path, 'data', compression='gzip')
コード例 #29
0
def compute_nonlinear_diffusion(raw, save_path, edge_threshold, scale):

    print "Computing NonLinearDiffusion"

    list_path    = save_path + "feats"
    if os.path.exists(list_path):
        feature_file = open(list_path, 'a')
    else:
        feature_file = open(list_path, 'w')

    feat_array = np.zeros(raw.shape)

    for z in range(raw.shape[2]):
        feat_array[:,:,z] = vigra.filters.nonlinearDiffusion(raw[:,:,z], edge_threshold, scale)

    path = save_path + "NonLinearDiffusion" + ".h5"
    vigra.writeHDF5(feat_array.astype(np.float32), path, "data")
コード例 #30
0
def segment_sample(sample):

    aff_path = '%s' % sample

    print("Load affinities")
    affs = 1. - z5py.File(aff_path)['predictions/full_affs'][:]
    # affs = 1. - vigra.readHDF5('./sampleB+_affs_cut.h5', 'data')
    print("done")

    # TODO multi-threaded
    print("making oversegmentation")
    seg = make_oversegmentation(affs, 8)
    print("done")

    # for z in range(seg.shape[0]):
    #     print(seg[z].min(), seg[z].max(), seg[z].max() - seg[z].min())
    # quit()

    print("computing features")
    rag, lr_uvs, local_prob, lr_prob = compute_features(seg, affs)
    print("done")
    assert rag.numberOfEdges == len(local_prob)
    assert len(lr_uvs) == len(lr_prob)

    uvs = rag.uvIds()
    n_nodes = rag.numberOfNodes
    assert lr_uvs.max() + 1 == n_nodes

    print("compute mutex clustering")
    # TODO do I need to invert the lr weights ?!
    lr_prob = 1. - lr_prob
    t0 = time.time()
    node_labeling = nmws.computeMwsClustering(n_nodes, uvs.astype('uint32'),
                                              lr_uvs.astype('uint32'),
                                              local_prob, lr_prob)
    assert len(node_labeling) == n_nodes
    print("done in", time.time() - t0, "s")

    # get segmentation
    mws_seg = nrag.projectScalarNodeDataToPixels(rag, node_labeling)
    out_path = '' % sample
    vigra.writeHDF5(mws_seg,
                    out_path,
                    'volumes/labels/neuron_ids',
                    compression='gzip')
コード例 #31
0
ファイル: ia_05_02.py プロジェクト: tikhoncheva/unigit
def ex2():
    import matplotlib.image as mpimg
    import os.path
    L = mpimg.imread('scene1_row3_col1.png')
    R = mpimg.imread('scene1_row3_col3.png')
    GT = mpimg.imread('truedisp_row3_col3.png')
    
    P = PatchMaker()
    
    fn = 'cost_volume.h5'
    
    if not os.path.exists(fn):
        cost = P.computeCostVolume(L,R)
        vigra.writeHDF5(cost, fn, 'data')
    else:
        cost = vigra.readHDF5(fn, 'data')
        cost = cost.view(np.ndarray)
        
    scost = vigra.filters.gaussianSmoothing(\
        vigra.taggedView(cost.astype(np.float32),\
        axistags=vigra.defaultAxistags('yxc')),3.5)
    SD = normalize(np.argmax(\
        np.flipud(scost.swapaxes(0,2)).swapaxes(0,2), axis=2))
    D = normalize(np.argmax(\
        np.flipud(cost.swapaxes(0,2)).swapaxes(0,2), axis=2))
    
    
    

    plot.subplot(2,2,1)
    p = plot.imshow(L)
    plot.title('Left Image')
    plot.subplot(2,2,2)
    plot.imshow(R)
    plot.title('Right Image')
    plot.subplot(2,2,3)
    p = plot.imshow(D)
    p.set_cmap('hot')
    plot.title('Disparity Map')
    plot.subplot(2,2,4)
    p = plot.imshow(SD)
    p.set_cmap('hot')
    plot.title('Smoothened Disparity Map')
    
    plot.show()
コード例 #32
0
def postprocess_mask(prediction_path, raw_path, out_path):

    # get rid of axis tags
    prediction = vigra.readHDF5(prediction_path,
                                'exported_data').view(np.ndarray)
    # TODO transpose ?!
    prediction = prediction.transpose((2, 1, 0, 3))

    mask = postprocess_ilastik_predictions(prediction[..., 2])

    print("Percentage in mask:", np.sum(mask) / mask.size)

    # save mask
    vigra.writeHDF5(mask, out_path, 'data', compression='gzip')

    # I screwed that one up...
    raw = vigra.readHDF5(raw_path, 'key')
    view([raw, prediction, mask])
コード例 #33
0
    def create_simple_input(cls, name, data):
        """
        Creates a file named '{name}_{input_axes}' from 'data' with axis order 'input_axes'


        Args:
            name (str): first part of the name for created h5 file (without '.h5')
            data (numpy.ndarray, Vigra.VigraArray): data (for numpy axis order is 'tczyx')
            input_axes (str): desired axis order. Is added to the name.
        """
        write_at = os.path.join(cls.PROJECT_FILE_BASE, 'inputdata', name + '_' + 'simple.h5')
        if not os.path.exists(write_at):
            assert isinstance(data, vigra.VigraArray)

            vigra.writeHDF5(data, write_at, 'simple')
            cls.created_data.append(write_at)

        return write_at
コード例 #34
0
ファイル: get_gt.py プロジェクト: constantinpape/stuff_master
def gt_pedunculus():
    labels_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401_pedunculus_membrane_labeling.tif"
    raw_path    = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5"

    labels = vigra.readVolume(labels_path)

    labels = np.squeeze(labels)
    labels = np.delete(labels, 6, axis = 2)

    raw = vigra.readHDF5(raw_path, "data")

    labels = preprocess_for_bgsmoothing_pedunculus(labels)
    gt = smooth_background(labels).astype(np.uint32)

    volumina_n_layer( (raw, labels, gt) )

    gt_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/ground_truth_seg.h5"
    vigra.writeHDF5(gt, gt_path, "gt")
コード例 #35
0
def predict_rf(rfs, n_threads=2, save=False, save_name="prediction.h5"):

    with futures.ThreadPoolExecutor(max_workers=n_threads) as executor:
        tasks = []
        for t in xrange(n_threads):
            tasks.append(executor.submit(rfs[t].predictProbabilities, X))
        sub_probs = [
            rfs[ii].treeCount() * tt.result() for ii, tt in enumerate(tasks)
        ]
        probs = np.sum(sub_probs, axis=0)
        probs /= n_trees

        if save:
            if not os.path.exists('../results'):
                os.mkdir('../results')
            vigra.writeHDF5(probs.reshape((shape[0], shape[1], shape[2], 4)),
                            '../results/' + save_name, 'data')
    return probs
コード例 #36
0
    def create_simple_input(cls, name, data):
        """
        Creates a file named '{name}_{input_axes}' from 'data' with axis order 'input_axes'


        Args:
            name (str): first part of the name for created h5 file (without '.h5')
            data (numpy.ndarray, Vigra.VigraArray): data (for numpy axis order is 'tczyx')
            input_axes (str): desired axis order. Is added to the name.
        """
        write_at = os.path.join(cls.PROJECT_FILE_BASE, "inputdata", name + "_" + "simple.h5")
        if not os.path.exists(write_at):
            assert isinstance(data, vigra.VigraArray)

            vigra.writeHDF5(data, write_at, "simple")
            cls.created_data.append(write_at)

        return write_at
コード例 #37
0
ファイル: ilp.py プロジェクト: timoMa/autocontext
    def extend_data_tzyxc(self, data_nr=None):
        """Extends the dimension of the dataset and its labels to tzyxc.

        If data_nr is None, all datasets are extended.
        :param data_nr: number of dataset
        """
        if data_nr is None:
            for i in range(self.data_count):
                self.extend_data_tzyxc(i)
        else:
            # Reshape the data with the correct axistags.
            data = self.get_data(data_nr)
            axisorder = self.get_axisorder(data_nr)
            if not hasattr(data, "axistags"):
                data = vigra.VigraArray(
                    data,
                    axistags=vigra.defaultAxistags(axisorder),
                    dtype=data.dtype)
            new_data = reshape_tzyxc(data)

            # Save the reshaped dataset.
            output_folder, output_filename = os.path.split(
                self.get_cache_data_path(data_nr))
            output_path = os.path.join(
                output_folder,
                str(data_nr).zfill(4) + "_" + output_filename)
            if self.is_internal(data_nr):
                output_key = self.get_dataset_id(data_nr)
            else:
                output_key = self.get_data_key(data_nr)
            vigra.writeHDF5(new_data,
                            output_path,
                            output_key,
                            compression=self._compression)

            # Update the project file.
            self.set_data_path_key(data_nr, output_path, output_key)
            self._set_internal(data_nr, False)
            self.set_axisorder(data_nr, "tzyxc")
            self._set_axistags_from_data(data_nr)

            # If the dataset has labels, reshape them.
            if self._label_block_count(data_nr) > 0:
                self._reshape_labels(data_nr, axisorder, "tzyxc")
コード例 #38
0
def stitch_fib_blocks(block_folder, out_path, ovlp_threshold=.01):
    # exapmple file path:
    # 'result_x_5000_5520_y_2480_3000_z_3480_4000.h5'

    block_files = os.listdir(block_folder)

    # get the block coordinates
    block_coordinates = []
    for f in block_files:
        split = f.split('_')[1:]
        # get rid of the .h5 ending
        split[-1] = split[-1][:-3]
        split = [int(sp) for sp in split if sp.isdigit()]
        coords = np.s_[split[0]:split[1],
                       split[2]:split[3],
                       split[4]:split[5]]
        block_coordinates.append(coords)

    # load all the blocks
    print("Loading blocks")
    blocks = [vigra.readHDF5(os.path.join(block_folder, f), 'data') for f in block_files]

    # get the overlaps
    print("Extracting overlaps")
    overlaps = {}
    for id_a, id_b in combinations(range(len(blocks)), 2):
        get_ovlp(id_a, id_b, block_coordinates, blocks, overlaps)

    # we expect this to be 12
    print("Number of overlaps:", len(overlaps))

    # run stitching
    print("Running stitcher")
    segmentation = stitch_segmentations_by_overlap(blocks, block_coordinates,
                                                   overlaps,
                                                   ovlp_threshold=ovlp_threshold)

    if HAVE_VIEWER:
        view([segmentation])
    print("Save segmentation")
    # TODO add coordinate offsets to save name
    # out_path += 'x_%i_%i_y_%i_%i_z_%i_%i' % zip()
    vigra.writeHDF5(segmentation, out_path, 'data', compression='gzip')
コード例 #39
0
def project_gt_pedunculus():
    labels_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401_pedunculus_membrane_labeling.tif"
    gt_path     = "/home/constantin/Work/data_ssd/data_080515/pedunculus/gt_mc.h5"
    raw_path    = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5"

    labels = vigra.readVolume(labels_path)
    labels = np.squeeze(labels)
    labels = np.delete(labels, 6, axis = 2)

    gt = vigra.readHDF5(gt_path, "gt")
    raw = vigra.readHDF5(raw_path, "data")

    gt = project_gt(labels, gt)

    save_path     = "/home/constantin/Work/data_ssd/data_080515/pedunculus/gt_mc_bkg.h5"

    volumina_n_layer( (raw, gt, labels) )

    vigra.writeHDF5(gt, save_path, "gt")
コード例 #40
0
def superpixels(pmaps, outfile=None):
    import wsdt
    from wsdt import wsDtSegmentation
    # 2d distance transform superpixel for the probability maps
    #pmap_path = "/path/to/neurocut_examples/probability_map.h5"
    #pmap_key  = "data"
    #pmaps = vigra.readHDF5(pmap_path, pmap_key)

    # parameters for the watershed on distance trafo

    # threshold for computing the distance trafo
    threshold = 0.5
    # minimal size of connected components that are taken into account
    # for the distance trafo
    min_mem = 50
    # minimal size of segments in the result
    min_seg = 75
    # sigma for smoothing the seed map
    sig_seeds = 1.6
    # sigma for smoothing the weight map
    sig_weights = 2.0

    segmentation = numpy.zeros_like(pmaps, dtype = numpy.uint32)
    # we need an offset for each slice, because we need distinct ids in each slice
    offset = 0
    # iterate over the z-slices and perform the wsdt in each
    #for z in xrange(segmentation.shape[2]):
    segmentation[:,:] = wsDtSegmentation(
        pmaps[:,:], threshold,
        min_mem, min_seg,
        sig_seeds, sig_weights)
    # add the offset
    #segmentation[:,:] += offset
    # get the new offset
    #offset = numpy.max(segmentation)

    # save the result
    if outfile is not None:
        save_key  = "superpixel"

        vigra.writeHDF5(segmentation, outfile, save_key)

    return segmentation
コード例 #41
0
def run_lmc(ds_train_name, ds_test_name, mc_params, save_path):

    assert os.path.exists(os.path.split(save_path)[0]), "Please choose an existing folder to save your results"

    # if you have added multiple segmentations, you can choose on which one to run
    # experiments with the seg_id
    seg_id = 0

    # these strings encode the features that are used for the local features
    feature_list = ['raw', 'prob', 'reg']

    # these strings encode the features that will be used for the lifted edges
    feature_list_lifted = ['cluster', 'reg']

    # this factor determines the weighting of lifted vs. local edge costs
    gamma = 2.

    meta.load()
    ds_train = meta.get_dataset(ds_train_name)
    ds_test  = meta.get_dataset(ds_test_name)

    # need to make filters for the trainset beforehand
    ds_train.make_filters(0, mc_params.anisotropy_factor)
    ds_train.make_filters(1, mc_params.anisotropy_factor)

    # use this for running the mc without defected slices
    mc_nodes, _, _, _ = lifted_multicut_workflow(
            ds_train, ds_test,
            seg_id, seg_id,
            feature_list, feature_list_lifted,
            mc_params, gamma = gamma)

    # use this for running the mc with defected slices
    #mc_nodes, _, _, _ = lifted_multicut_workflow_with_defect_correction(
    #        ds_train, ds_test,
    #        seg_id, seg_id,
    #        feature_list, feature_list_lifted,
    #        mc_params, gamma = gamma)

    segmentation = ds_test.project_mc_result(seg_id, mc_nodes)
    vigra.writeHDF5(segmentation, save_path, 'data', compression = 'gzip')
コード例 #42
0
ファイル: get_gt.py プロジェクト: constantinpape/stuff_master
def gt_sopnetcompare( gt_path ):

    gt = vigra.readHDF5(gt_path, "gt")

    gt = vigra.analysis.labelVolumeWithBackground(gt)

    unique_labs = np.unique(gt)

    i = 0
    for l in unique_labs:
        if l != i:
            print "Non-consecutive labeling:"
            print l, i
            quit()
        i += 1

    print "Consecutive labeling"

    save_path = gt_path[:-3] + "_smoothed.h5"

    vigra.writeHDF5(gt, save_path, "gt")
コード例 #43
0
ファイル: core.py プロジェクト: giserh/learnDistanceTransform
    def predict(self, file_name=None, invert_gt=False, lam=0.1):
        """Predict the test data and [optional] save the predicted labels.

        :param file_name: output file name, if file_name is None, no output file will be produced
        :param invert_gt: whether the ground truth values where modified by exp(-lam * gt)
        :param lam: the value of lam used for the inversion
        :return: predicted labels of the test data
        """
        log.info("Predicting.")
        pred = self.rf_regressor.predict(self.get_test_x())
        log.info("... done with predicting.")

        # Revert the values.
        if invert_gt:
            pred = LPData.e_power_inv(pred, lam)

        # Save the output.
        if file_name is not None:
            vigra.writeHDF5(pred, file_name, self.pred_h5_key)
            self.pred_path = file_name

        return pred
コード例 #44
0
def save_weights_as_h5():

    e_opengm = vigra.readHDF5("edge_weights_opengm.h5", "energy")

    #f = open("ccc3dprobs.txt", 'r')

    #e_ccc3d = []
    #for line in f:
    #    num = float(line[:-1])
    #    e_ccc3d.append(num)

    #e_ccc3d = np.array(e_ccc3d)

    e_ccc3d = np.loadtxt("ccc3dweights.txt")

    print "opengm:"
    print e_opengm.shape

    print "ccc3d:"
    print e_ccc3d.shape

    vigra.writeHDF5(e_ccc3d, "edge_weights_ccc3d.h5", "energy")
コード例 #45
0
ファイル: ilp.py プロジェクト: dagophil/autocontext
    def replace_labels(self, data_nr, blocks, block_slices, delete_old_blocks=True):
        """Replaces the labels and their block slices of the dataset.

        :param data_nr: number of dataset
        :param blocks: label blocks
        :param block_slices: block slices
        :param delete_old_blocks: whether the old blocks in the project file shall be deleted
        """
        if len(blocks) != len(block_slices):
            raise Exception("The number of blocks and block slices must be the same.")
        if not delete_old_blocks:
            if len(blocks) != self._label_block_count(data_nr):
                raise Exception("Wrong number of label blocks to be inserted.")

        if delete_old_blocks:
            self.remove_labels(data_nr)

        proj = h5py.File(self.project_filename, "r+")
        for i in range(len(blocks)):
            vigra.writeHDF5(blocks[i], self.project_filename, const.label_blocks(data_nr, i))
            h5_blocks = eval_h5(proj, const.label_blocks_list(data_nr, i))
            h5_blocks.attrs['blockSlice'] = block_slices[i]
        proj.close()
コード例 #46
0
ファイル: evaluate.py プロジェクト: weihuang527/cremi_tools
def gt_projection(block_id):
    ws_path = '/home/papec/Work/neurodata_hdd/fib25/watersheds/watershed_block%i.h5' % block_id
    ws = vigra.readHDF5(ws_path, 'data')
    ws = vigra.analysis.labelVolume(ws.astype('uint32'))
    gt = vigra.readHDF5('/home/papec/Work/neurodata_hdd/fib25/gt/gt_block%i.h5' % block_id,
                        'data')

    rag = nrag.gridRag(ws, numberOfLabels=int(ws.max()) + 1)
    labeling = nrag.gridRagAccumulateLabels(rag, gt)

    projected = Volume(nrag.projectScalarNodeDataToPixels(rag, labeling))

    metrics = NeuronIds(Volume(gt))
    vi_s, vi_m = metrics.voi(projected)
    are = metrics.adapted_rand(projected)

    print(vi_s)
    print(vi_m)
    print(are)
    print()

    os.remove(ws_path)
    vigra.writeHDF5(ws, ws_path, 'data', compression='gzip')
コード例 #47
0
def inference(project_folder,
              sample,
              gpu,
              prefix=None,
              only_nn_channels=False):

    data_config_file = './template_config/prediction_configs/sample%s.yml' % sample
    cremi = RawVolumeWithDefectAugmentation.from_config(data_config_file)

    # Load model
    checkpoint = os.path.join(project_folder, 'Weights')
    trainer = Trainer().load(from_directory=checkpoint, best=True)
    model = trainer.model.cuda(gpu)

    print("[*] Start inference on gpu:", gpu)
    inference_config_file = './template_config/prediction_configs/inference_config.yml'
    inference_engine = SimpleInferenceEngine.from_config(
        inference_config_file, model, gpu)
    output = inference_engine.infer(cremi).astype('float32')

    print("[*] Output has shape {}".format(str(output.shape)))
    save_folder = os.path.join(project_folder, 'Predictions')
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    if prefix is None:
        save_path = os.path.join(save_folder,
                                 'prediction_sample%s.h5' % sample)
    else:
        save_path = os.path.join(
            save_folder, 'prediction_%s_sample%s.h5' % (prefix, sample))

    if only_nn_channels:
        output = output[:3]
        save_path = save_path[:-3] + '_nnaffinities.h5'

    vigra.writeHDF5(output, save_path, 'data', compression='gzip')
    return output
コード例 #48
0
def find_overlaps(block_id, blocking, ws, inner_block, outer_block,
                  local_block, halo, tmp_folder):
    # serialize the overlaps
    overlap_ids = []
    for ii in range(6):
        axis = ii // 2
        to_lower = ii % 2
        neighbor_id = blocking.getNeighborId(block_id,
                                             axis=axis,
                                             lower=to_lower)

        if neighbor_id != -1:
            overlap_bb = tuple(
                slice(None) if i != axis else slice(0, 2 * halo[i]) if to_lower
                else slice(inner_block.end[i] - halo[i] -
                           outer_block.begin[i], outer_block.end[i] -
                           outer_block.begin[i]) for i in range(3))

            overlap = ws[overlap_bb]

            ovlp_path = os.path.join(
                tmp_folder, 'block_%i_%i.h5' % (block_id, neighbor_id))
            vigra.writeHDF5(overlap, ovlp_path, 'data', compression='gzip')

            with h5py.File(ovlp_path) as f:
                attrs = f['data'].attrs
                attrs['overlap_dimension'] = axis
                attrs['overlap_begin'] = tuple(local_block.begin[i]
                                               for i in range(3))
                attrs['overlap_end'] = tuple(local_block.end[i]
                                             for i in range(3))

            # we only return the overlap ids, if the block id is smaller than the neighbor id,
            # to keep the pairs unique
            if block_id < neighbor_id:
                overlap_ids.append((block_id, neighbor_id))
    return overlap_ids
コード例 #49
0
pred = vigra.taggedView(pred, axistags='zxyc')

i = 0
for x, y in centers:
    i += 1
    for t in zrange:
        sx = x+t*shift[0]
        sy = y+t*shift[1]
        pred[t, sx-extent[0]:sx+extent[0], sy-extent[0]:sy+extent[0], i%2 +1] += .8

predNull = pred[..., 0]
predNull[np.logical_and(pred[..., 1] < .5, pred[..., 2] < .5)] += .8

vol = vol.withAxes(*'xyz')
pred = pred.withAxes(*'xyzc')
vigra.writeHDF5(vol, directory + 'data.h5', '/data')
vigra.writeHDF5(pred, directory + 'pred.h5', '/data')


vol = vol.withAxes(*'xyzt')
pred = pred.withAxes(*'xyzct')

volflipped = np.flipud(vol)
predflipped = np.flipud(pred)

vol = np.concatenate((vol, volflipped), axis=3)
assert len(vol.shape) == 4
vol = vigra.taggedView(vol, axistags='xyzt')

pred = np.concatenate((pred, predflipped), axis=4)
assert len(pred.shape) == 5
コード例 #50
0
import numpy as np
import vigra

directory = '/home/burger/hci/hci-data/test/'

shape = (500, 500, 500)

vol = np.zeros(shape, dtype=np.uint8)
vol = vigra.taggedView(vol, axistags='zxy')

centers = [(45, 15), (45, 350), (360, 50)]
extent = (10, 10)
shift = (1, 1)
zrange = np.arange(0, 20)
zsteps = np.arange(5, 455, 50)

for x, y in centers:
    for z in zsteps:
        for t in zrange:
            sx = x+t*shift[0]
            sy = y+t*shift[1]
            vol[zsteps + t, sx-extent[0]:sx+extent[0], sy-extent[0]:sy+extent[0]] = 255

vol = vol.withAxes(*'xyz')
vigra.writeHDF5(vol, directory + 'huge_segmentation.h5', '/data', compression='gzip')
コード例 #51
0
ファイル: get_gt.py プロジェクト: constantinpape/stuff_master
def gt_isbi2013():
    labels_path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/ground_truth/ground-truth.h5"
    raw_path    = "/home/constantin/Work/data_ssd/data_150615/isbi2013/train-input.h5"

    gt_ignore   = vigra.readHDF5(labels_path, "gt")
    raw         = vigra.readHDF5(raw_path, "data")

    # smooth the gt to get rid of the ignorelabel
    gt = np.zeros_like( gt_ignore )

    for z in range(gt_ignore.shape[2]):

        print "processing slice", z, "of", gt_ignore.shape[2]

        gt_ignore_z = gt_ignore[:,:,z]

        binary = np.zeros_like(gt_ignore_z)
        binary[gt_ignore_z != 0] = 255

        close = vigra.filters.discClosing( binary.astype(np.uint8), 4 )
        cc = vigra.analysis.labelImageWithBackground(close.astype(np.uint32), background_value = 255)

        # find the largest cc (except for zero)
        counts = np.bincount(cc.flatten())
        counts_sort = np.sort(counts)[::-1]

        mask_myelin = np.zeros_like( cc )

        for c in counts_sort:
            # magic threshold!
            if c > 4000:
                id = np.where(counts == c)[0][0]
                if id != 0:
                    #print "Heureka!", c, id
                    #print np.unique(cc)[id]
                    mask_myelin[cc == id] = 1
            else:
                break

        #volumina_n_layer( [ raw[:,:,z], gt_ignore_z.astype(np.uint32), mask_myelin ]  )
        #quit()

        derivative_filter = vigra.filters.gaussianGradientMagnitude( gt_ignore_z, 0.5 )
        derivative_thresh = np.zeros_like( derivative_filter )
        derivative_thresh[derivative_filter > 0.1] = 1.

        dt = vigra.filters.distanceTransform2D(derivative_thresh)
        dt = np.array(dt)

        dtInv = vigra.filters.distanceTransform2D(derivative_thresh, background = False)
        dtInv = np.array(dt)
        dtInv[dtInv >0 ] -= 1

        dtSigned = dt.max() - dt + dtInv

        smoothed, maxRegionLabel = vigra.analysis.watersheds(
                    dtSigned.astype(np.float32),
                    neighborhood = 8,
                    seeds = gt_ignore_z.astype(np.uint32) )

        smoothed[ mask_myelin == 1] = 0

        #volumina_n_layer( [ raw[:,:,z], gt_ignore_z.astype(np.uint32),  smoothed.astype(np.uint32)] )
        #quit()

        gt[:,:,z] =  smoothed

    #gt = gt.transpose(1,0,2)

    gt_path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/ground_truth/ground-truth_nobg.h5"
    vigra.writeHDF5(gt, gt_path, "gt")

    volumina_n_layer( [ raw, gt_ignore.astype(np.uint32), gt.astype(np.uint32) ] )
コード例 #52
0
    print "loading trainingset"
    images_train, labels_train =  read( wanted_digits,
                                        dataset="training",
                                        path = mnist_path )
    images_train = np.reshape(images_train, (np.shape(images_train)[0], 28, 28))

    print "loading testset"
    images_test, labels_test =  read(   wanted_digits,
                                        dataset="testing",
                                        path = mnist_path )
    images_test = np.reshape(images_test, (np.shape(images_test)[0], 28, 28))

    print "downsample train"
    images_train_new, labels_train_new = downsapmple(images_train, labels_train[:,0], 2)

    print "downsample test"
    images_test_new,  labels_test_new  = downsapmple(images_test, labels_test[:,0], 10)

    labels_train = np.array(labels_train)
    labels_test = np.array(labels_test)

    path_train_new = "small/train.h5"
    path_test_new = "small/test.h5"

    save = 0
    if save:
        vigra.writeHDF5(images_train_new, path_train_new, "images")
        vigra.writeHDF5(labels_train_new, path_train_new, "labels")

        vigra.writeHDF5(images_test_new, path_test_new, "images")
        vigra.writeHDF5(labels_test_new, path_test_new, "labels")
コード例 #53
0
import vigra
import pdb
import numpy

test = vigra.readHDF5("labels_big_stuff_classifier.h5", "data", order = 'C')
print numpy.unique(test)
for i in [3, 2, 1, 0]:
    test[test == i] = i + 1
print numpy.unique(test)
pdb.set_trace()
vigra.writeHDF5(test, "labels_big_stuff_classifier_no_zero.h5", "data")
コード例 #54
0
    op_detect.InputVolume.setValue(volume)
    op_detect.HaloSize.setValue(100)
    op_detect.DetectionMethod.setValue('svm')
    op_detect.NHistogramBins.setValue(30)
    op_detect.positive_TrainingHistograms.setValue(False)
    op_detect.negative_TrainingHistograms.setValue(False)

    op_detect.PatchSize.setValue(150)
    op_detect.OverloadDetector.setValue("2016-06-29_19.58_detector_150_30_30.pkl")
    op_view_test_results = _opDetectMissing(graph = Graph())
    op_view_test_results.input_volume.connect(op_detect.Output)
    result = op_view_test_results.input_volume[:].wait()

    cut_out_volume = result * volume

    vigra.writeHDF5(result, "after_tuning_150_cremi_test_A_prediction_150_30_30.h5", "data")
    vigra.writeHDF5(cut_out_volume, "after_tuning_150_cremi_test_A_cutout_150_30_30.h5", "data")
    '''
    # for viewing results directly
    n = 4
    plt.imshow(result[n, :, :])
    plt.show()
    plt.savefig("patch_" + str(op_detect.PatchSize.value) +
                "_halo_" + str(op_detect.HaloSize.value) +
                "_bins_" + str(op_detect.NHistogramBins.value) +
                "_z" + str(n) +
                "_test_good_volume.png")
    plt.close()
    '''
    t_stop = time.time()
コード例 #55
0
import sys
import vigra
import numpy as np

if __name__ == '__main__':
    path_in = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30.tif"
    data_in = vigra.impex.readVolume(path_in)
    print data_in.shape
    data_in = np.squeeze(data_in)


    file_out = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30.h5"
    label  = "data"
    vigra.writeHDF5(data_in, file_out, label)

import vigra
import pdb
import numpy as np

# lsvm_mask = vigra.readHDF5("full_set_defected_prediction_result_250_v3_chosen_bg_v3.h5", "data", order = 'C')
lsvm_mask = vigra.readHDF5("thesis_defect_types_LSVM_prediction_150_30_30.h5", "data", order = 'C')
#pdb.set_trace()
segmentation_volume = vigra.readHDF5("thesis_defect_types_segmentation_full_volume.h5", "data", order = 'C')
segmentation_volume = vigra.dropChannelAxis(segmentation_volume)
# sets real background from LSVM-result to 0

for i in reversed(np.unique(segmentation_volume)):
    segmentation_volume[segmentation_volume == i] = i + 1
segmentation_volume[segmentation_volume == 4] = 0
segmentation_volume *= lsvm_mask

vigra.writeHDF5(segmentation_volume, "thesis_defect_types_segmentation_full_volume.h5", "data")
コード例 #57
0
import sys
import vigra
import numpy as np
if __name__ == '__main__':

    path_in = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30.h5"
    path_out = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5"
    key = "data"

    data_in = vigra.readHDF5(path_in,key)

    slice_to_remove = 6
    data_in = np.delete(data_in, slice_to_remove, axis = 2)

    vigra.writeHDF5(data_in, path_out, key)
コード例 #58
0
import numpy as np
import vigra

from volumina_viewer import volumina_double_layer

def normalize(dat):
    dat = np.squeeze(dat)
    dat = np.array(dat)
    #dat = dat.transpose((1,0,2))
    means = np.mean(dat, axis = (0,1) )
    assert means.shape[0] == dat.shape[2]
    for z in range(dat.shape[2]):
        dat[:,:,z] -= means[z]
    dat -= dat.min()
    dat /= dat.max()
    return dat


if __name__ == '__main__':
    #f = "/home/constantin/raw_stack1.h5"
    f = "/media/constantin/4c03279b-1283-477d-a03e-440898f78d6f/constantin_projects/data/data_131115/Sample_A_gt/raw_data.h5"
    dat = vigra.readHDF5(f,"data")
    dat_norm = normalize(dat)

    #volumina_double_layer( dat, dat_norm )

    save_f = "/media/constantin/4c03279b-1283-477d-a03e-440898f78d6f/constantin_projects/data/data_131115/Sample_A_gt/raw_data_norm.h5"
    vigra.writeHDF5(dat_norm, save_f, "data")
コード例 #59
0
    return mc_seg


if __name__ == '__main__':

    # parameters for the Multicut
    mc_params = ExperimentSettings()
    mc_params.set_rfcache(os.path.join(meta.meta_folder, "rf_cache"))
    mc_params.set_nthreads(20)

    mc_params.set_anisotropy(5.)
    mc_params.set_use2d(True)
    mc_params.set_ignore_mask(True)

    mc_params.set_ntrees(1000)
    mc_params.set_weighting_scheme("all")

    mc_params.set_seed_fraction(0.05)
    mc_params.set_solver("opengm_exact")

    mc_params.set_verbose(True)

    local_feats_list = ("raw", "prob", "reg", "topo")
    seg_id = 0

    mc_seg = snemi3d_mc("snemi3d_train", "snemi3d_test", seg_id, seg_id,
                        local_feats_list, mc_params)

    vigra.writeHDF5(mc_seg, "snemi_result_train.h5", "data")
コード例 #60
0
import vigra
import numpy
import pdb

object_prediction = vigra.readHDF5("cremi_test_C_objects_png.h5", "data", order = 'C')
for i in reversed(numpy.unique(object_prediction)):
    object_prediction[object_prediction == i] = i + 1
print numpy.unique(object_prediction)
object_prediction[object_prediction == 4] = 0


pdb.set_trace()

vigra.writeHDF5(object_prediction, "cremi_test_C_objects_png_corrected.h5", "data")