예제 #1
0
# raw_file = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/official_test_samples/sample_{}+_padded_20160601.hdf".format(sample))
#
# import h5py
# GT_mask_file = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/alignment_experiments/sample_{}+_GT_mask.hdf".format(sample))
# mask_inner_path = "volumes/labels/mask"
# GT_box = np.zeros(padded_shape, dtype="uint8")
# GT_box[slice_original_pad] = 1
#
# with h5py.File(GT_mask_file, 'w') as f:
#     f[mask_inner_path] = GT_box
#
from cremi_tools.alignment import backalign_segmentation
from cremi_tools.alignment.backalign import bounding_boxes

mask_GT_path = os.path.join(
    get_trendytukan_drive_path(),
    "datasets/CREMI/alignment_experiments/sample_{}_aligned.hdf".format(
        sample))
# mask_GT_path = os.path.join(get_hci_home_path(), "sampleA+_gt.h5")
mask_GT = vigra.readHDF5(mask_GT_path,
                         "volumes/labels/mask")[bounding_boxes[sample]]

out_file = os.path.join(
    get_trendytukan_drive_path(),
    "datasets/CREMI/alignment_experiments/sample_{}_backaligned.hdf".format(
        sample))
backalign_segmentation(sample,
                       mask_GT,
                       out_file,
                       key="volumes/labels/mask",
                       postprocess=False)
예제 #2
0
from segmfriends.utils.various import starmap_with_kwargs



if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--exp_name', type=str, default="SSBMExperiment")  #DebugExp
    parser.add_argument('--project_directory', default="projects/spectral_compare",  type=str)
    # parser.add_argument('--project_directory', default="projects/agglo_cluster_compare/quadxeon5_results",  type=str)
    # parser.add_argument('--project_directory', default="../quadxeon5_scratch/projects/agglo_cluster_compare",  type=str)

    args = parser.parse_args()

    exp_name = args.exp_name
    project_dir = os.path.join(get_trendytukan_drive_path(), args.project_directory)
    # project_dir = os.path.join(get_hci_home_path(), "../quadxeon5_scratch", args.project_directory)

    fixed_kargs = {
        "experiment_name": exp_name,
        "project_directory": project_dir,
    }

    # Select experiment and plot results:
    experiment = SSBM_experiments.get_experiment_by_name(exp_name)(fixed_kwargs=fixed_kargs)
    experiment.make_plots(project_dir)



예제 #3
0
# Add missing package-paths
import long_range_compare

from long_range_compare.data_paths import get_hci_home_path, get_trendytukan_drive_path
import numpy as np
import json
import os


if __name__ == '__main__':

    # Create dictionary:
    results_collected = {}

    results_dir = os.path.join(get_trendytukan_drive_path(), 'datasets/cityscape/data/gtFine_trainvaltest/evaluationResults/eval_out')

    result_matrix = []
    scores_collected = []
    all_agglo_type = [' ']


    def assign_color(value, good_thresh, bad_thresh, nb_flt, best="lowest"):
        if best == "lowest":
            if value < good_thresh:
                return '{{\color{{ForestGreen}} {num:.{prec}f} }}'.format(prec=nb_flt, num=value)
            if value > good_thresh and value < bad_thresh:
                return '{{\color{{Orange}} {num:.{prec}f} }}'.format(prec=nb_flt, num=value)
            if value > bad_thresh:
                return '{{\color{{Red}} {num:.{prec}f} }}'.format(prec=nb_flt, num=value)
        elif best == "highest":
            if value > good_thresh:
# Original pad for GASP paper:
# slice_GT_mask = (slice(36, -37), slice(890, -890), slice(890, -890))

# # With the embedding we need at least a padding of (2, 240, 240) in the original res (big_pad_version)
# slice_GT_mask = (slice(32, -33), slice(580, -580), slice(580, -580))

# This is almost just a box around the raw data (raw_mask)
slice_GT_mask = (slice(5, -5), slice(180, -180), slice(180, -180))

padded_shape = (200, 3072, 3072)

for sample in ["A", "B", "C"]:

    # test_sample_path = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/constantin_affs/test_samples/sample{}.h5".format(sample))

    raw_file = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/padded_data/sample_{}_padded_20160501.hdf".format(sample))
    out_file = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/padded_data/full_aligned_samples/sample_{}_aligned_plus_raw_mask.hdf".format(sample))

    import h5py
    GT_file = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/padded_data/sample_{}_padded_20160501.hdf".format(sample))
    GT_inner_path = "volumes/labels/neuron_ids"

    from cremi_tools.alignment import realign

    realign(raw_file,
                sample,
                out_file,
                labels_file=GT_file,
                labels_key=GT_inner_path)

예제 #5
0
from segmfriends.utils.various import starmap_with_kwargs

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--exp_name', type=str,
                        default="FullTestSamples")  #DebugExp
    parser.add_argument('--project_directory',
                        default="projects/agglo_cluster_compare",
                        type=str)
    # TODO: option to pass some other fixed kwargs and overwrite it...?

    args = parser.parse_args()

    exp_name = args.exp_name
    proj_dir = os.path.join(get_trendytukan_drive_path(),
                            args.project_directory)

    fixed_kargs = {
        "experiment_name":
        exp_name,
        "project_directory":
        proj_dir,
        "configs_dir_path":
        os.path.join(
            get_hci_home_path(),
            "pyCharm_projects/longRangeAgglo/experiments/cremi/configs")
    }

    # Select experiment and load data:
    experiment = cremi_experiments.get_experiment_by_name(exp_name)(
def run_agglo(problem_name=None, update_rule='mean', CLC=False):
    saving_path = os.path.join(get_trendytukan_drive_path(), "projects/agglo_cluster_compare/bigFruitFlyGraphs/scores")
    check_dir_and_create(saving_path)
    saving_path_2 = os.path.join(saving_path,
                                 problem_name.replace(".txt", ''))

    check_dir_and_create(saving_path_2)
    result_file_path = os.path.join(saving_path_2,
                                    "{}_{}.json".format(update_rule, CLC))
    if os.path.exists(result_file_path):
        print(result_file_path)
        print("Skip agglo ", update_rule, CLC, problem_name)
        return

    uvIds = all_data[problem_name][:,:2].astype('uint64')
    edge_weights = all_data[problem_name][:, 2].astype('float64')


    print("Building graph")
    graph = UndirectedGraph(uvIds.max())
    graph.insertEdges(uvIds)

    # Run agglo:
    tick = time.time()
    print("Start agglo ", update_rule, CLC, problem_name)
    try:
        nodeSeg, _ = runGreedyGraphEdgeContraction(graph, edge_weights, update_rule=update_rule, add_cannot_link_constraints=CLC)
    except RuntimeError:
        print("Nifty Exception on {} {} {}!".format(problem_name, update_rule, CLC))
    tock = time.time()

    edge_labels = graph.nodesLabelsToEdgeLabels(nodeSeg)
    MC_energy = (edge_weights * edge_labels).sum()

    print(tock-tick)
    print(MC_energy)

    new_results = {}
    new_results[update_rule] = {}
    new_results[update_rule][CLC] = {}

    new_results[update_rule][CLC]['MC_energy'] = MC_energy
    new_results[update_rule][CLC]['runtime'] = tock - tick


    with global_lock:
        # if os.path.exists(result_file_path):
        #     try:
        #         with open(result_file_path, 'r') as f:
        #             result_dict = json.load(f)
        #     except Exception:
        #         result_dict = {}
        #         print("Exception raised on {}, {}!!".format(problem_name, update_rule) )
        # else:
        #     result_dict = {}
        #
        # result_dict = recursive_dict_update(new_results, result_dict)

        with open(result_file_path, 'w') as f:
            try:
                json.dump(new_results, f, indent=4, sort_keys=True)
            except Exception:
                print("Exception again!")
        print("saved")
#     path_file = os.path.join(get_trendytukan_drive_path(), DATA_DIR, problem_name)
#     print("Loading data")
#     my_data = np.genfromtxt(path_file, delimiter=' ')
#     print("Writing...")
#     vigra.writeHDF5(my_data, path_file.replace(".txt", ".h5"), 'data')


all_data = {}

for problem_name in ["large_problem_L4.txt",
                    "large_problem_L3.txt",
                     "large_problem_L2.txt",
                    "large_problem_L1.txt",
                    # "large_problem_L0.txt"
                     ]:
    path_file = os.path.join(get_trendytukan_drive_path(), DATA_DIR, problem_name)
    print("Loading data")
    # my_data = np.genfromtxt(path_file, delimiter=' ')
    # print("Writing...")
    # vigra.writeHDF5(my_data, path_file.replace(".txt", ".h5"), 'data')

    all_data[problem_name] = vigra.readHDF5(path_file.replace(".txt", ".h5"), 'data')



def run_agglo(problem_name=None, update_rule='mean', CLC=False):
    saving_path = os.path.join(get_trendytukan_drive_path(), "projects/agglo_cluster_compare/bigFruitFlyGraphs/scores")
    check_dir_and_create(saving_path)
    saving_path_2 = os.path.join(saving_path,
                                 problem_name.replace(".txt", ''))
예제 #8
0
# Add missing package-paths
import long_range_compare


import os
from long_range_compare.data_paths import get_hci_home_path, get_trendytukan_drive_path


from shutil import copyfile

result_root_dir = os.path.join(get_trendytukan_drive_path(), "datasets/cityscape/data/gtFine_trainvaltest/out")

collected_result_dir = os.path.join(get_trendytukan_drive_path(), "datasets/cityscape/data/gtFine_trainvaltest/out/COLLECTED")

original_images_dir = os.path.join(get_trendytukan_drive_path(), "datasets/cityscape/data/leftImg8bit_trainvaltest/leftImg8bit/val")

image_name = 'munster/munster_000167_000019_leftImg8bit_combine.inst.jpg'
image_name = 'frankfurt/frankfurt_000001_020693_leftImg8bit_combine.inst.jpg'
# image_name = "munster/munster_000167_000019_leftImg8bit_combine.inst.jpg"
# image_name = 'frankfurt/frankfurt_000001_015768_leftImg8bit_combine.inst.jpg'


ignored = ["COLLECTED", "MAX_bk_mask", "MEAN_bk_mask", "MEAN_constr_bk_mask"]

for subdir, dirs, files in os.walk(result_root_dir):
    # Copy original image:
    original_image_path = os.path.join(original_images_dir, image_name.replace("_combine.inst.jpg", ".png"))
    copyfile(original_image_path, os.path.join(collected_result_dir, image_name.replace("_combine.inst.jpg", ".png")))

    for agglo_type in dirs:
        # if agglo_type in ignored or ("clean" not in agglo_type and "ORIG" not in agglo_type):

# FOUND CROP SLICES:
# A+ (slice(36, 163, None), slice(1154, 2753, None), slice(934, 2335, None))
# B+ (slice(36, 163, None), slice(1061, 2802, None), slice(1254, 4009, None))
# C+ (slice(36, 163, None), slice(980, 2443, None), slice(1138, 2569, None))

# for sample in ["A+"]:
for sample in ["A+", "B+", "C+"]:

    # Load GT mask:
    print("Loading")
    mask_inner_path = "volumes/labels/mask"
    # source_path_big_pad = os.path.join(get_trendytukan_drive_path(),
    #                             "datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned_plus_big_pad.hdf".format(sample))
    source_path_raw_mask = os.path.join(get_trendytukan_drive_path(),
                                       "datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned_plus_raw_mask.hdf".format(
                                           sample))
    # source_path = os.path.join(get_trendytukan_drive_path(),
    #                             "datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned.hdf".format(sample))
    from segmfriends.utils.various import readHDF5, writeHDF5
    print("Reading...")
    mask_big_pad = readHDF5(source_path_raw_mask, mask_inner_path)
    print("Max big pad: ", mask_big_pad.max())
    # mask_border = mask_big_pad > 10
    mask_big_pad = (mask_big_pad == 1).astype('uint16')


    # print(mask_GT.shape)
    print("Find crop")
    # crop_slice = get_gt_bounding_box(mask_big_pad)