コード例 #1
0
def regression_test_isbi(cache_folder, data_folder):

    # if the cache does not exist, create it
    if not os.path.exists(os.path.join(cache_folder, 'isbi_train')):
        meta = init(cache_folder, data_folder, 'isbi')
    else:
        meta = MetaSet(cache_folder)
        meta.load()

    # isbi params
    params = ExperimentSettings()
    params.rf_cache_folder = os.path.join(cache_folder, "rf_cache")
    params.use_2d = True
    params.anisotropy_factor = 25.
    params.learn_2d = True
    params.ignore_mask = False
    params.n_trees = 500
    params.weighting_scheme = "z"
    params.solver = "multicut_fusionmoves"

    local_feats_list = ("raw", "prob", "reg", "topo")
    lifted_feats_list = ("mc", "cluster", "reg")

    ds_train = meta.get_dataset('isbi_train')
    ds_test = meta.get_dataset('isbi_test')
    mc_seg = run_mc(ds_train, ds_test, local_feats_list, params)
    lmc_seg = run_lmc(ds_train, ds_test, local_feats_list, lifted_feats_list,
                      params, 2.)

    #vigra.writeHDF5(mc_seg, './cache_isbi/isbi_test/mc_seg.h5', 'data', compression = 'gzip')
    #vigra.writeHDF5(lmc_seg, './cache_isbi/isbi_test/lmc_seg.h5', 'data', compression = 'gzip')

    print "Regression Test MC..."
    # Eval differences with same parameters and according regression thresholds
    # vi-split:   0.0718660622942 -> 0.1
    vi_split_ref = 0.1
    # vi-merge:   0.0811051987574 -> 0.1
    vi_merge_ref = 0.1
    # adapted-ri: 0.0218391269081 -> 0.05
    adapted_ri_ref = 0.05
    regression_test(
        vigra.readHDF5(os.path.join(data_folder, 'mc_seg.h5'), 'data'), mc_seg,
        vi_split_ref, vi_merge_ref, adapted_ri_ref)
    print "... passed"

    print "Regression Test LMC..."
    # Eval differences with same parameters and according regression thresholds
    # vi-split: 0.161923549092 -> 0.2
    vi_split_ref = 0.2
    # vi-merge: 0.0792288680404 -> 0.1
    vi_merge_ref = 0.1
    # adapted-ri: 0.0334914933439 -> 0.05
    adapted_ri_ref = 0.05
    regression_test(
        vigra.readHDF5(os.path.join(data_folder, 'lmc_seg.h5'), 'data'),
        lmc_seg, vi_split_ref, vi_merge_ref, adapted_ri_ref)
    print "... passed"
コード例 #2
0
def regression_test_nproof(cache_folder, data_folder):

    # if the cache does not exist, create it
    if not os.path.exists( os.path.join(cache_folder, 'nproof_train') ):
        meta = init(cache_folder, data_folder, 'nproof')
    else:
        meta = MetaSet(cache_folder)
        meta.load()

    # isbi params
    params = ExperimentSettings()
    params.rf_cache_folder = os.path.join(cache_folder, "rf_cache")
    params.use_2d = False
    params.anisotropy_factor = 1.
    params.ignore_mask = False
    params.n_trees = 500
    params.solver = "multicut_fusionmoves"
    params.lifted_neighborhood = 2

    local_feats_list  = ("raw", "prob", "reg", "topo")
    lifted_feats_list = ("cluster", "reg")

    ds_train = meta.get_dataset('nproof_train')
    ds_test  = meta.get_dataset('nproof_test')
    mc_seg  = run_mc( ds_train, ds_test, local_feats_list, params)

    lmc_seg = run_lmc(ds_train, ds_test, local_feats_list, lifted_feats_list, params, 2.)

    print "Regression Test MC..."
    # Eval differences with same parameters and according regression thresholds
    # vi-split: 0.31985479849 -> 0.35
    vi_split_ref = 0.35
    # vi-merge: 0.402968960935 -> 0.45
    vi_merge_ref = 0.45
    # adapted-ri: 0.122123986224 -> 0.15
    adapted_ri_ref = 0.15
    regression_test(
            vigra.readHDF5(os.path.join(data_folder,'gt_test.h5'), 'data'),
            mc_seg,
            vi_split_ref,
            vi_merge_ref,
            adapted_ri_ref
            )
    print "... passed"

    # Eval differences with same parameters and according regression thresholds
    # vi-split: 0.332745302066 => 0.4
    vi_split_ref = 0.4
    # vi-merge: 0.332349723508 => 0.4
    vi_merge_ref = 0.4
    # adapted-ri: 0.0942531472586 => 0.12
    adapted_ri_ref = 0.12

    regression_test(
            vigra.readHDF5(os.path.join(data_folder,'gt_test.h5'), 'data'),
            lmc_seg
            )
    print "... passed"
コード例 #3
0
def project_resolved_objects_to_segmentation(meta_folder, ds_name,
                                             mc_seg_filepath, mc_seg_key,
                                             new_nodes_filepath, save_path,
                                             results_name):

    ds = load_dataset(meta_folder, ds_name)
    seg_id = 0

    mc_segmentation = vigra.readHDF5(mc_seg_filepath, mc_seg_key)

    # Load resolving result
    with open(new_nodes_filepath) as f:
        resolved_objs = pickle.load(f)

    rag = ds.rag(seg_id)
    mc_labeling = nrag.gridRagAccumulateLabels(rag, mc_segmentation)
    new_label_offset = np.max(mc_labeling) + 1
    for obj in resolved_objs:
        resolved_nodes = resolved_objs[obj]
        for node_id in resolved_nodes:
            mc_labeling[node_id] = new_label_offset + resolved_nodes[node_id]
        new_label_offset += np.max(np.array(resolved_nodes.values())) + 1
    mc_segmentation = nrag.projectScalarNodeDataToPixels(
        rag, mc_labeling,
        ExperimentSettings().n_threads)

    # Write the result
    vigra.writeHDF5(mc_segmentation,
                    save_path,
                    results_name,
                    compression='gzip')
コード例 #4
0
                          'z/1/beta_0.5', 'z/1/beta_0.45', 'z/1/beta_0.55',
                          'z/1/beta_0.4', 'z/1/beta_0.6', 'z/1/beta_0.35',
                          'z/1/beta_0.65', 'z/1/beta_0.3', 'z/1/beta_0.7'
                      ],
                      [
                          'z/0/beta_0.5', 'z/0/beta_0.45', 'z/0/beta_0.55',
                          'z/0/beta_0.4', 'z/0/beta_0.6', 'z/0/beta_0.35',
                          'z/0/beta_0.65', 'z/0/beta_0.3', 'z/0/beta_0.7'
                      ],
                      [
                          'z/1/beta_0.5', 'z/1/beta_0.45', 'z/1/beta_0.55',
                          'z/1/beta_0.4', 'z/1/beta_0.6', 'z/1/beta_0.35',
                          'z/1/beta_0.65', 'z/1/beta_0.3', 'z/1/beta_0.7'
                      ]]

    ExperimentSettings().anisotropy_factor = 10.
    ExperimentSettings().n_threads = computer_cores
    ExperimentSettings().n_trees = 500

    for ds_id in experiment_ids:

        result_key = result_keys[ds_id]
        ds_name = ds_names[ds_id]

        train_segs_paths = np.delete(all_train_segs, ds_id, axis=0).tolist()
        train_segs_keys = np.delete(all_train_keys, ds_id, axis=0).tolist()

        test_seg_path = os.path.join(project_folder, ds_name, 'result.h5')
        test_seg_key = result_keys[ds_id]

        find_false_merges(ds_name, ds_names, meta_folder, rf_cache_folder,
コード例 #5
0
        [source_folder + 'cremi.splA.train.mcseg_betas.crop.axes_xyz.split_z.h5'] * 9,
        [source_folder + 'cremi.splB.train.mcseg_betas.crop.axes_xyz.split_z.h5'] * 9,
        [source_folder + 'cremi.splB.train.mcseg_betas.crop.axes_xyz.split_z.h5'] * 9,
        [source_folder + 'cremi.splC.train.mcseg_betas.crop.axes_xyz.split_z.h5'] * 9,
        [source_folder + 'cremi.splC.train.mcseg_betas.crop.axes_xyz.split_z.h5'] * 9
    ]
    all_train_keys = [
        ['z/0/beta_0.5', 'z/0/beta_0.45', 'z/0/beta_0.55', 'z/0/beta_0.4', 'z/0/beta_0.6', 'z/0/beta_0.35', 'z/0/beta_0.65', 'z/0/beta_0.3', 'z/0/beta_0.7'],
        ['z/1/beta_0.5', 'z/1/beta_0.45', 'z/1/beta_0.55', 'z/1/beta_0.4', 'z/1/beta_0.6', 'z/1/beta_0.35', 'z/1/beta_0.65', 'z/1/beta_0.3', 'z/1/beta_0.7'],
        ['z/0/beta_0.5', 'z/0/beta_0.45', 'z/0/beta_0.55', 'z/0/beta_0.4', 'z/0/beta_0.6', 'z/0/beta_0.35', 'z/0/beta_0.65', 'z/0/beta_0.3', 'z/0/beta_0.7'],
        ['z/1/beta_0.5', 'z/1/beta_0.45', 'z/1/beta_0.55', 'z/1/beta_0.4', 'z/1/beta_0.6', 'z/1/beta_0.35', 'z/1/beta_0.65', 'z/1/beta_0.3', 'z/1/beta_0.7'],
        ['z/0/beta_0.5', 'z/0/beta_0.45', 'z/0/beta_0.55', 'z/0/beta_0.4', 'z/0/beta_0.6', 'z/0/beta_0.35', 'z/0/beta_0.65', 'z/0/beta_0.3', 'z/0/beta_0.7'],
        ['z/1/beta_0.5', 'z/1/beta_0.45', 'z/1/beta_0.55', 'z/1/beta_0.4', 'z/1/beta_0.6', 'z/1/beta_0.35', 'z/1/beta_0.65', 'z/1/beta_0.3', 'z/1/beta_0.7']
    ]

    ExperimentSettings().anisotropy_factor = 10.
    ExperimentSettings().n_threads = computer_cores
    ExperimentSettings().n_trees = 500
    ExperimentSettings().pruning_factor = pruning_factor
    ExperimentSettings().border_distance = border_distance
    for ds_id in experiment_ids:
        ds_name = ds_names[ds_id]

        train_segs_paths = np.delete(all_train_segs, ds_id, axis=0).tolist()
        train_segs_keys = np.delete(all_train_keys, ds_id, axis=0).tolist()

        test_seg_path = os.path.join(project_folder, ds_name, 'result.h5')
        test_seg_key = result_keys[ds_id]

        # logger.info('Starting find_false_merges...')
コード例 #6
0
sys.path.append(
    '/export/home/amatskev/Bachelor/nature_methods_multicut_pipeline/software/'
)

from multicut_src import ExperimentSettings, load_dataset

from pipeline import resolve_false_merges, project_new_result, project_resolved_objects_to_segmentation

if __name__ == '__main__':

    from init_datasets import meta_folder, project_folder
    from init_datasets import ds_names, result_keys, experiment_ids, computer_cores
    from run_mc_all import rf_cache_folder
    # assert (1==0)
    # These are the parameters as used for the initial mc
    ExperimentSettings().rf_cache_folder = rf_cache_folder
    ExperimentSettings().anisotropy_factor = 10.
    ExperimentSettings().use_2d = False
    ExperimentSettings().n_threads = computer_cores
    ExperimentSettings().n_trees = 500
    ExperimentSettings().solver = 'multicut_fusionmoves'
    ExperimentSettings().verbose = True
    ExperimentSettings().weighting_scheme = 'z'
    ExperimentSettings().lifted_neighborhood = 3

    # Parameters for the resolving algorithm
    ExperimentSettings().min_nh_range = 5
    ExperimentSettings().max_sample_size = 3

    # Parameters deciding which objects to resolve
    min_prob_thresh = 0.3
コード例 #7
0
def resolve_false_merges(ds_name,
                         ds_names,
                         meta_folder,
                         rf_cache_folder,
                         new_nodes_filepath,
                         pre_seg_filepath,
                         pre_seg_key,
                         min_prob_thresh,
                         max_prob_thresh,
                         exclude_objs_with_larger_thresh,
                         global_resolve=True):

    # Path folders
    paths_cache_folder = os.path.join(meta_folder, ds_name, 'path_data')

    # TODO Change here
    weight_filepath = os.path.join(
        meta_folder, ds_name, 'probs_to_energies_0_z_16.0_0.5_rawprobreg.h5')
    lifted_filepath = os.path.join(meta_folder, ds_name,
                                   'lifted_probs_to_energies_0_3_0.5_2.0.h5')

    ds_train = [
        load_dataset(meta_folder, name) for name in ds_names if name != ds_name
    ]
    rf_cache_name = 'rf_merges_%s' % '_'.join([ds.ds_name for ds in ds_train])

    ds = load_dataset(meta_folder, ds_name)
    seg_id = 0

    path_data_filepath = os.path.join(paths_cache_folder,
                                      'paths_ds_{}.h5'.format(ds_name))
    # with open(os.path.join(paths_cache_folder, 'paths_ds_{}.pkl'.format(ds_name))) as f:
    #     path_data = pickle.load(f)
    paths = vigra.readHDF5(path_data_filepath, 'all_paths')
    if paths.size:
        paths = np.array([path.reshape((len(path) / 3, 3)) for path in paths])
    paths_to_objs = vigra.readHDF5(path_data_filepath, 'paths_to_objs')
    with open(os.path.join(paths_cache_folder,
                           'false_paths_predictions.pkl')) as f:
        false_merge_probs = pickle.load(f)

    # Find objects where probability >= min_prob_thresh and <= max_prob_thresh
    objs_with_prob_greater_thresh = np.unique(
        np.array(paths_to_objs)[np.logical_and(
            false_merge_probs >= min_prob_thresh,
            false_merge_probs <= max_prob_thresh)])
    if exclude_objs_with_larger_thresh:
        objs_to_exclude = np.unique(
            np.array(paths_to_objs)[false_merge_probs > max_prob_thresh])
        objs_with_prob_greater_thresh = np.setdiff1d(
            objs_with_prob_greater_thresh, objs_to_exclude)

    # Extract all paths for each of the found objects
    false_paths = {}
    for obj in objs_with_prob_greater_thresh:
        # print paths_to_objs == obj
        false_paths[obj] = np.array(paths)[paths_to_objs == obj]

    rf_filepath = os.path.join(rf_cache_folder, rf_cache_name)
    # with open(rf_filepath) as f:
    #     path_rf = pickle.load(f)
    path_rf = RandomForest.load_from_file(rf_filepath, 'rf',
                                          ExperimentSettings().n_threads)

    mc_segmentation = vigra.readHDF5(pre_seg_filepath, pre_seg_key)
    mc_weights_all = vigra.readHDF5(weight_filepath, "data")
    lifted_weights_all = vigra.readHDF5(lifted_filepath, "data")

    if global_resolve:
        new_node_labels = resolve_merges_with_lifted_edges_global(
            ds,
            seg_id,
            false_paths,
            path_rf,
            mc_segmentation,
            mc_weights_all,
            paths_cache_folder=paths_cache_folder,
            lifted_weights_all=lifted_weights_all)
    else:
        new_node_labels = resolve_merges_with_lifted_edges(
            ds,
            ds_train,
            seg_id,
            false_paths,
            path_rf,
            mc_segmentation,
            mc_weights_all,
            paths_cache_folder=paths_cache_folder,
            lifted_weights_all=lifted_weights_all)

    with open(new_nodes_filepath, 'w') as f:
        pickle.dump(new_node_labels, f)
コード例 #8
0
def main():
    args = process_command_line()

    out_folder = args.output_folder
    assert os.path.exists(
        out_folder), "Please choose an existing folder for the output"
    cache_folder = os.path.join(out_folder, "cache")

    # init the cache when running experiments the first time
    # if the meta set wasn't saved yet, we need to recreate the cache
    if not os.path.exists(os.path.join(cache_folder, "meta_dict.pkl")):
        init(args.data_folder, cache_folder, args.snemi_mode)

    meta = MetaSet(cache_folder)
    meta.load()

    ds_train = meta.get_dataset("ds_train")
    ds_test = meta.get_dataset("ds_test")

    # experiment settings
    exp_params = ExperimentSettings()

    exp_params.set_rfcache(os.path.join(cache_folder, "rf_cache"))

    # use extra 2d features
    exp_params.set_use2d(True)

    # parameters for learning
    exp_params.set_fuzzy_learning(True)
    exp_params.set_ntrees(500)

    # parameters for lifted multicut
    exp_params.set_lifted_neighborhood(3)

    # features used
    local_feats_list = ("raw", "prob", "reg", "topo")
    # we don't use the multicut feature here, because it can take too long
    lifted_feats_list = ("cluster", "reg")

    if args.snemi_mode:
        exp_params.set_anisotropy(5.)
        exp_params.set_weighting_scheme("all")
        exp_params.set_solver("multicut_exact")
        gamma = 10000.
    else:
        exp_params.set_anisotropy(25.)
        exp_params.set_weighting_scheme("z")
        exp_params.set_solver("multicut_fusionmoves")
        gamma = 2.

    seg_id = 0

    if args.use_lifted:
        print "Starting Lifted Multicut Workflow"

        # have to make filters first due to cutouts...
        ds_train.make_filters(0, exp_params.anisotropy_factor)
        ds_train.make_filters(1, exp_params.anisotropy_factor)
        ds_test.make_filters(0, exp_params.anisotropy_factor)
        ds_test.make_filters(1, exp_params.anisotropy_factor)

        mc_node, mc_edges, mc_energy, t_inf = lifted_multicut_workflow(
            ds_train,
            ds_test,
            seg_id,
            seg_id,
            local_feats_list,
            lifted_feats_list,
            exp_params,
            gamma=gamma,
            weight_z_lifted=True)

        save_path = os.path.join(out_folder,
                                 "lifted_multicut_segmentation.tif")

    else:
        print "Starting Multicut Workflow"
        mc_node, mc_edges, mc_energy, t_inf = multicut_workflow(
            ds_train, ds_test, seg_id, seg_id, local_feats_list, exp_params)

        save_path = os.path.join(out_folder, "multicut_segmentation.tif")

    mc_seg = ds_test.project_mc_result(seg_id, mc_node)

    print "Saving Result to", save_path
    vigra.impex.writeVolume(mc_seg, save_path, '')
コード例 #9
0
            mc_params, gamma = gamma)

    # use this for running the mc with defected slices
    #mc_nodes, _, _, _ = lifted_multicut_workflow_with_defect_correction(
    #        ds_train, ds_test,
    #        seg_id, seg_id,
    #        feature_list, feature_list_lifted,
    #        mc_params, gamma = gamma)

    segmentation = ds_test.project_mc_result(seg_id, mc_nodes)
    vigra.writeHDF5(segmentation, save_path, 'data', compression = 'gzip')

if __name__ == '__main__':

    # this object stores different  experiment settings
    mc_params = ExperimentSettings()
    mc_params.set_rfcache(os.path.join(meta.meta_folder, "rf_cache"))

    anisotropy = 25. # the anisotropy of the data, this is used in the filter calculation
    # set to 1. for isotropic data, to the actual degree for mildly anisotropic data or to > 20. to compute filters in 2d
    mc_params.set_anisotropy(25.)

    # set to true for segmentations with flat superpixels
    mc_params.set_use2d(True)

    # number of threads used for multithreaded computations
    mc_params.set_nthreads(8)

    # number of trees used in the random forest
    mc_params.set_ntrees(200)
コード例 #10
0
def regression_test_snemi(cache_folder, data_folder):

    # if the cache does not exist, create it
    if not os.path.exists(os.path.join(cache_folder, 'snmei_train')):
        meta = init(cache_folder, data_folder, 'snemi')
    else:
        meta = MetaSet(cache_folder)
        meta.load()

    # isbi params
    params = ExperimentSettings()
    params.rf_cache_folder = os.path.join(cache_folder, "rf_cache")
    params.use_2d = True
    params.learn_fuzzy = True
    params.anisotropy_factor = 5.
    params.ignore_mask = False
    params.n_trees = 500
    params.weighting_scheme = "all"
    params.solver = "multicut_exact"
    params.lifted_neighborhood = 3

    local_feats_list = ("raw", "prob", "reg", "topo")
    lifted_feats_list = ("cluster", "reg")

    ds_train = meta.get_dataset('snemi_train')
    ds_test = meta.get_dataset('snemi_test')
    mc_seg = run_mc(ds_train, ds_test, local_feats_list, params)
    gamma = 10000.
    lmc_seg = run_lmc(ds_train, ds_test, local_feats_list, lifted_feats_list,
                      params, gamma)

    print "Regression Test MC..."
    # Eval differences with same parameters and according regression thresholds
    # vi-split: 0.0501385345177 -> 0.1
    vi_split_ref = 0.1
    # vi-merge: 0.049803253098 -> 0.1
    vi_merge_ref = 0.1
    # adaptred-ri: 0.0170138077554 -> 0.05
    adapted_ri_ref = 0.05
    regression_test(
        vigra.readHDF5(os.path.join(data_folder, 'mc_seg.h5'), 'data'), mc_seg,
        vi_split_ref, vi_merge_ref, adapted_ri_ref)
    print "... passed"

    print "Regression Test LMC..."
    # FIXME why are these differences so big?
    # vi-split: 0.291149212478 0.141228313621 0.0536859650649
    regression_test(
        vigra.readHDF5(os.path.join(data_folder, 'lmc_seg.h5'), 'data'),
        lmc_seg)
    print "... passed"
コード例 #11
0
def main():
    args = process_command_line()

    out_folder = args.output_folder
    assert os.path.exists(
        out_folder), "Please choose an existing folder for the output"
    cache_folder = os.path.join(out_folder, "cache")

    # if the meta set wasn't saved yet, we need to recreate the cache
    if not os.path.exists(os.path.join(cache_folder, "meta_dict.pkl")):
        init(args.data_folder, cache_folder)

    meta = MetaSet(cache_folder)
    meta.load()

    ds_train = meta.get_dataset("ds_train")
    ds_test = meta.get_dataset("ds_test")

    # experiment settings
    exp_params = ExperimentSettings()

    exp_params.set_rfcache(os.path.join(cache_folder, "rf_cache"))

    # use extra 2d features
    exp_params.set_use2d(True)

    # parameters for learning
    exp_params.set_anisotropy(25.)
    exp_params.set_learn2d(True)
    exp_params.set_ignore_mask(False)
    exp_params.set_ntrees(500)

    exp_params.set_weighting_scheme("z")
    exp_params.set_solver("multicut_fusionmoves")

    local_feats_list = ("raw", "prob", "reg", "topo")
    lifted_feats_list = ("mc", "cluster", "reg")

    seg_id = 0

    if args.use_lifted:
        print "Starting Lifted Multicut Workflow"

        # have to make filters first due to cutouts...
        ds_train.make_filters(0, exp_params.anisotropy_factor)
        ds_train.make_filters(1, exp_params.anisotropy_factor)
        ds_test.make_filters(0, exp_params.anisotropy_factor)
        ds_test.make_filters(1, exp_params.anisotropy_factor)

        mc_node, mc_edges, mc_energy, t_inf = lifted_multicut_workflow(
            ds_train,
            ds_test,
            seg_id,
            seg_id,
            local_feats_list,
            lifted_feats_list,
            exp_params,
            gamma=2.,
            warmstart=False,
            weight_z_lifted=True)

        save_path_seg = os.path.join(out_folder,
                                     "lifted_multicut_segmentation.tif")
        save_path_edge = os.path.join(out_folder,
                                      "lifted_multicut_labeling.tif")

    else:
        print "Starting Multicut Workflow"
        mc_node, mc_edges, mc_energy, t_inf = multicut_workflow(
            ds_train, ds_test, seg_id, seg_id, local_feats_list, exp_params)

        save_path_seg = os.path.join(out_folder, "multicut_segmentation.tif")
        save_path_edge = os.path.join(out_folder, "multicut_labeling.tif")

    mc_seg = ds_test.project_mc_result(seg_id, mc_node)

    print "Saving Segmentation Result to", save_path_seg
    vigra.impex.writeVolume(mc_seg, save_path_seg, '')

    # need to bring results back to the isbi challenge format...
    edge_vol = edges_to_volume(ds_test._rag(seg_id), mc_edges)
    print "Saving Edge Labeling Result to", save_path_edge
    vigra.impex.writeVolume(edge_vol, save_path_edge, '', dtype=np.uint8)