def train():
    print("start edge training")
    raw_train = []
    mem_train = []
    dmv_train = []
    sv_train = []
    new_sv_labels_train = []

    for name in ['s4a2_t010', 's4a2_t012']:
        # raw data
        filename = name + '/raw_crop_center256_256_256.h5'
        f = open_file(path_train + filename, 'r')
        raw_train.append(f['data'][:].astype(np.float32))

        # membrane prediction -- boundaries
        filename = name + '/mem_crop_center256_256_256.h5'
        f = open_file(path_train + filename, 'r')
        mem_train.append(f['data'][:].astype(np.float32))

        # ground truth for DMVs
        filename = name + '/results/raw_DMV.h5'
        f = open_file(path_train + filename, 'r')
        dmv_train.append(f['data'][:].astype(np.float32))

        # supervoxels
        filename = name + '/sv_crop_center256_256_256.h5'
        f = open_file(path_train + filename, 'r')
        sv_train.append(f['data'][:].astype('uint64'))  # (np.float32)

        newlabels_train, maxlabel, mapping = vigra.analysis.relabelConsecutive(
            sv_train[-1])
        new_sv_labels_train.append(newlabels_train)

    # do edge training
    rf = elf_workflow.edge_training(raw=raw_train,
                                    boundaries=mem_train,
                                    labels=dmv_train,
                                    use_2dws=False,
                                    watershed=new_sv_labels_train)
    print("edge training is done")
    return rf
예제 #2
0
    viewer.add_image(supervoxels, name='supervoxels')

#shape of ground truth
nx, ny, nz = dmv.shape
#reshape raw and boundaries for training
#because of the error on edge-training try to use a block 100, 100, 100
raw_train = raw[128:384, 128:384, 128:384]  #.astype(np.float32)
membrane_prediction_train = membrane_prediction[128:384, 128:384,
                                                128:384]  #.astype(np.float32)
#dmv_train = dmv.astype(np.float32)
#sv_train = supervoxels.astype(np.float32)

#do edge training
rf = elf_workflow.edge_training(raw=raw_train,
                                boundaries=membrane_prediction_train,
                                labels=dmv,
                                use_2dws=False,
                                watershed=supervoxels)

print("edge training is done")

#raw_segment = raw.astype(np.float32)
#membrane_predict_segment = (membrane_prediction).astype(np.float32)

#try blockwise segmentation on the same file (but on the entire one, not only the center cube)
segmentation = elf_workflow.multicut_segmentation(
    raw=raw,
    boundaries=membrane_prediction,
    rf=rf,
    use_2dws=False,
    multicut_solver='blockwise-multicut',
예제 #3
0
def get_rf_model(output_folder=None,
                 input_raw_files=None,
                 input_mem_files=None,
                 input_sv_files=None,
                 input_gt_files=None):
    """
    :param output_folder: path to folder where to write a file with the model
    :param input_raw_files: list or just string describing path(s) to raw file(s) for training
    :param input_mem_files:  list or just string describing path(s) to membrane prediction file(s) for training
    :param input_sv_files:  list or just string describing path(s) to supervoxels file(s) for training
    :param input_gt_files:  list or just string describing path(s) to griund truth  file(s) for training
    :return: Random Forest model
    """
    if not output_folder:
        output_folder = '/g/schwab/Viktoriia/src/source/'
    rf_save_path = output_folder + 'rf.pkl'
    if os.path.exists(rf_save_path):
        with open(rf_save_path, 'rb') as f:
            rf = pickle.load(f)
            return rf

    if not input_raw_files:
        path_train = '/g/emcf/common/5792_Sars-Cov-2/Exp_070420/FIB-SEM/segmentation/segmentation_results/'
        names = [
            's4a2_t016', 's4a2_t022', 's4a2_t028', 's4a2_t029', 's4a2_t033',
            's4a2_t034', 's4a2_t035', 's4a2_t032'
        ]
        input_raw_files = [
            path_train + i + '/raw_crop_center256_256_256.h5' for i in names
        ]
    if not input_mem_files:
        path_train = '/g/emcf/common/5792_Sars-Cov-2/Exp_070420/FIB-SEM/segmentation/segmentation_results/'
        names = [
            's4a2_t016', 's4a2_t022', 's4a2_t028', 's4a2_t029', 's4a2_t033',
            's4a2_t034', 's4a2_t035', 's4a2_t032'
        ]
        input_mem_files = [
            path_train + i + '/mem_crop_center256_256_256.h5' for i in names
        ]
    if not input_sv_files:
        path_train = '/g/emcf/common/5792_Sars-Cov-2/Exp_070420/FIB-SEM/segmentation/segmentation_results/'
        names = [
            's4a2_t016', 's4a2_t022', 's4a2_t028', 's4a2_t029', 's4a2_t033',
            's4a2_t034', 's4a2_t035', 's4a2_t032'
        ]
        input_sv_files = [
            path_train + i + '/sv_crop_center256_256_256.h5' for i in names
        ]
    if not input_gt_files:
        path_train = '/g/emcf/common/5792_Sars-Cov-2/Exp_070420/FIB-SEM/segmentation/segmentation_results/'
        names = [
            's4a2_t016', 's4a2_t022', 's4a2_t028', 's4a2_t029', 's4a2_t033',
            's4a2_t034', 's4a2_t035', 's4a2_t032'
        ]
        input_gt_files = [
            path_train + i + '/results/raw_MITO.h5' for i in names
        ]

    if type(input_raw_files) != str:
        n = len(input_raw_files)
        assert len(input_mem_files) == n
        assert len(input_sv_files) == n
        assert len(input_gt_files) == n

        raw_train = []
        mem_train = []
        gt_train = []
        sv_train = []
        new_sv_labels_train = []

        for i in range(n):
            # raw data
            filename = input_raw_files[i]
            f = open_file(filename, 'r')
            raw_train.append(f['data'][:].astype(np.float32))

            # membrane prediction -- boundaries
            filename = input_mem_files[i]
            f = open_file(filename, 'r')
            mem_train.append(f['data'][:].astype(np.float32))

            # ground truth
            filename = input_gt_files[i]
            f = open_file(filename, 'r')
            gt_train.append(f['data'][:].astype(np.float32))

            # supervoxels
            filename = input_sv_files[i]
            f = open_file(filename, 'r')
            sv_train.append(f['data'][:].astype('uint64'))  # (np.float32)

            newlabels_train, maxlabel, mapping = vigra.analysis.relabelConsecutive(
                sv_train[-1])
            new_sv_labels_train.append(newlabels_train)
    else:
        #raw data
        f = open_file(input_raw_files, 'r')
        raw_train = f['data'][:].astype(np.float32)

        # membrane prediction -- boundaries
        f = open_file(input_mem_files, 'r')
        mem_train = f['data'][:].astype(np.float32)

        # ground truth
        f = open_file(input_gt_files, 'r')
        gt_train = f['data'][:].astype(np.float32)

        # supervoxels
        f = open_file(input_sv_files, 'r')
        sv_train = f['data'][:].astype('uint64')  # (np.float32)

        new_sv_labels_train, maxlabel, mapping = vigra.analysis.relabelConsecutive(
            sv_train)

    # do edge training
    rf = elf_workflow.edge_training(raw=raw_train,
                                    boundaries=mem_train,
                                    labels=gt_train,
                                    use_2dws=False,
                                    watershed=new_sv_labels_train)
    with open(rf_save_path, 'wb') as f:
        pickle.dump(rf, f)
    return rf
예제 #4
0
    #print(len(np.unique(sv_train[-1])))
    #print(np.max(sv_train[-1]))

    new_labels_train, maxlabel, mapping = vigra.analysis.relabelConsecutive(
        sv_train[-1])

    #print(np.unique(new_labels_train))
    #print(len(np.unique(new_labels_train)))
    #print(np.max(new_labels_train), maxlabel)

    new_sv_labels_train.append(new_labels_train)

#do edge training
rf = elf_workflow.edge_training(raw=raw_train,
                                boundaries=mem_train,
                                labels=dmv_train,
                                use_2dws=False,
                                watershed=new_sv_labels_train)

print("edge training is done")

# get validation data
#raw_test = []
#mem_test = []
#dmv_test = []
#sv_test = []
#ground_truth = []
#get data for training
data_path_1 = '/scratch/emcf/segmentation_inputs/'
names = ['s4a2_t002', 's4a2_t008', 's4a2_t018', 's4a2_t024', 's4a2_t028']
for name in names: