Exemplo n.º 1
0
    def setUp(self):
        os.makedirs(self.tmp_folder, exist_ok=True)
        os.makedirs(self.config_folder, exist_ok=True)
        config = MulticutSegmentationWorkflow.get_config()

        global_config = config['global']
        global_config['shebang'] = self.shebang
        global_config['block_shape'] = self.block_shape
        with open(os.path.join(self.config_folder, 'global.config'), 'w') as f:
            json.dump(global_config, f)
Exemplo n.º 2
0
def workflow(target, max_jobs, max_threads):
    # write the global config
    global_config = MulticutSegmentationWorkflow.get_config()['global']
    os.makedirs('configs', exist_ok=True)
    shebang = "#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python"
    global_config.update({'block_shape': 3 * [256],
                          'shebang': shebang})
    with open('./configs/global.config', 'w') as f:
        json.dump(global_config, f)

    run_mc(target, max_jobs, max_threads)
Exemplo n.º 3
0
def run_wf(sample, max_jobs, target='local'):

    tmp_folder = './tmp_%s' % sample
    input_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_new/sample%s.n5' % sample
    exp_path = 'sample%s_exp.n5' % sample
    input_key = 'predictions/full_affs'
    mask_key = 'masks/original_mask'
    ws_key = 'segmentation/watershed'

    rf_path = ''

    configs = MulticutSegmentationWorkflow.get_config(False)

    if not os.path.exists('config'):
        os.mkdir('config')

    roi_begin, roi_end = None, None

    global_config = configs['global']
    global_config.update({
        'shebang':
        "#! /groups/saalfeld/home/papec/Work/software/conda/miniconda3/envs/cluster_env/bin/python",
        'roi_begin': roi_begin,
        'roi_end': roi_end
    })
    with open('./config/global.config', 'w') as f:
        json.dump(global_config, f)

    subprob_config = configs['solve_subproblems']
    subprob_config.update({'weight_edges': True, 'threads_per_job': max_jobs})
    with open('./config/solve_subproblems.config', 'w') as f:
        json.dump(subprob_config, f)

    feat_config = configs['block_edge_features']
    feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]})
    with open('./config/block_edge_features.config', 'w') as f:
        json.dump(feat_config, f)

    # set number of threads for sum jobs
    tasks = [
        'merge_sub_graphs', 'merge_edge_features', 'probs_to_costs',
        'solve_subproblems', 'reduce_problem', 'solve_global'
    ]

    for tt in tasks:
        config = configs[tt]
        n_threads = max_jobs if tt != 'reduce_problem' else 4
        config.update({'threads_per_job': n_threads})
        with open('./config/%s.config' % tt, 'w') as f:
            json.dump(config, f)

    ret = luigi.build([
        MulticutSegmentationWorkflow(input_path=input_path,
                                     input_key=input_key,
                                     mask_path=input_path,
                                     mask_key=mask_key,
                                     ws_path=input_path,
                                     ws_key=ws_key,
                                     graph_path=exp_path,
                                     features_path=exp_path,
                                     costs_path=exp_path,
                                     problem_path=exp_path,
                                     node_labels_path=exp_path,
                                     node_labels_key='node_labels',
                                     output_path=input_path,
                                     output_key='segmentation/multicut',
                                     use_decomposition_multicut=False,
                                     skip_ws=False,
                                     rf_path=rf_path,
                                     n_scales=1,
                                     config_dir='./config',
                                     tmp_folder=tmp_folder,
                                     target=target,
                                     max_jobs=max_jobs)
    ],
                      local_scheduler=True)
    assert ret, "Sample %s failed" % sample
Exemplo n.º 4
0
def initial_mc(max_jobs, max_threads, tmp_folder, target='slurm'):

    n_scales = 1
    input_path = '/g/kreshuk/data/FIB25/cutout.n5'
    exp_path = './exp_data/exp_data.n5'
    input_key = 'volumes/affinities'

    ws_key = 'volumes/segmentation/watershed'
    out_key = 'volumes/segmentation/multicut'
    node_labels_key = 'node_labels/multicut'

    configs = MulticutSegmentationWorkflow.get_config()

    config_folder = './config'
    if not os.path.exists(config_folder):
        os.mkdir(config_folder)

    shebang = "#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python"
    global_config = configs['global']
    global_config.update({'shebang': shebang})
    with open(os.path.join(config_folder, 'global.config'), 'w') as f:
        json.dump(global_config, f)

    ws_config = configs['watershed']
    ws_config.update({
        'threshold': .3,
        'apply_presmooth_2d': False,
        'sigma_weights': 2.,
        'apply_dt_2d': False,
        'sigma_seeds': 2.,
        'apply_ws_2d': False,
        'two_pass': False,
        'alpha': .85,
        'halo': [25, 25, 25],
        'time_limit': 90,
        'mem_limit': 8,
        'size_filter': 100,
        'channel_begin': 0,
        'channel_end': 3
    })
    with open(os.path.join(config_folder, 'watershed.config'), 'w') as f:
        json.dump(ws_config, f)

    subprob_config = configs['solve_subproblems']
    subprob_config.update({
        'weight_edges': True,
        'threads_per_job': max_threads,
        'time_limit': 180,
        'mem_limit': 16
    })
    with open(os.path.join(config_folder, 'solve_subproblems.config'),
              'w') as f:
        json.dump(subprob_config, f)

    feat_config = configs['block_edge_features']
    feat_config.update({
        'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-4, 0, 0], [0, -4, 0],
                    [0, 0, -4]]
    })
    with open(os.path.join(config_folder, 'block_edge_features.config'),
              'w') as f:
        json.dump(feat_config, f)

    # set number of threads for sum jobs
    beta = .5
    tasks = [
        'merge_sub_graphs', 'merge_edge_features', 'probs_to_costs',
        'reduce_problem'
    ]
    for tt in tasks:
        config = configs[tt]
        config.update({
            'threads_per_job': max_threads,
            'mem_limit': 64,
            'time_limit': 260,
            'weight_edges': True,
            'beta': beta
        })
        with open(os.path.join(config_folder, '%s.config' % tt), 'w') as f:
            json.dump(config, f)

    time_limit_solve = 24 * 60 * 60
    config = configs['solve_global']
    config.update({
        'threads_per_job': max_threads,
        'mem_limit': 64,
        'time_limit': time_limit_solve / 60 + 240,
        'time_limit_solver': time_limit_solve
    })
    with open(os.path.join(config_folder, 'solve_global.config'), 'w') as f:
        json.dump(config, f)

    task = MulticutSegmentationWorkflow(input_path=input_path,
                                        input_key=input_key,
                                        ws_path=input_path,
                                        ws_key=ws_key,
                                        problem_path=exp_path,
                                        node_labels_key=node_labels_key,
                                        output_path=input_path,
                                        output_key=out_key,
                                        n_scales=n_scales,
                                        config_dir=config_folder,
                                        tmp_folder=tmp_folder,
                                        target=target,
                                        max_jobs=max_jobs,
                                        max_jobs_multicut=1,
                                        skip_ws=False)
    ret = luigi.build([task], local_scheduler=True)
    assert ret, "Multicut segmentation failed"
Exemplo n.º 5
0
def run_mc(target, max_jobs, max_threads):
    input_path = '/g/kreshuk/data/arendt/sponge/data.n5'
    input_key = 'volumes/predictions/nn/affs'

    exp_path = '/g/kreshuk/data/arendt/sponge/exp_data/mc.n5'
    ws_key = 'volumes/segmentation/nn/watershed'
    seg_key = 'volumes/segmentation/nn/multicut'
    assignment_key = 'node_labels/nn/multicut'

    config_folder = './configs'
    configs = MulticutSegmentationWorkflow.get_config()

    graph_config = configs['initial_sub_graphs']
    graph_config.update({'qos': 'normal',  'mem_limit': 4})

    ws_config = configs['watershed']
    ws_config.update({'threshold': .25, 'apply_dt_2d': False, 'apply_ws_2d': False,
                      'size_filter': 100, 'alpha': .9, 'non_maximum_suppression': True,
                      'mem_limit': 8})
    with open(os.path.join(config_folder, 'watershed.config'), 'w') as f:
        json.dump(ws_config, f)

    subprob_config = configs['solve_subproblems']
    subprob_config.update({'threads_per_job': max_threads,
                           'time_limit': 720,
                           'mem_limit': 64,
                           'qos': 'normal',
                           'time_limit_solver': 60*60*6})
    with open(os.path.join(config_folder, 'solve_subproblems.config'), 'w') as f:
        json.dump(subprob_config, f)

    feat_config = configs['block_edge_features']
    feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]})
    with open(os.path.join(config_folder, 'block_edge_features.config'), 'w') as f:
        json.dump(feat_config, f)

    weight_edges = True
    exponent = 1.
    costs_config = configs['probs_to_costs']
    costs_config.update({'weight_edges': weight_edges,
                         'weighting_exponent': exponent,
                         'mem_limit': 16, 'qos': 'normal',
                         'beta': 0.5})
    with open(os.path.join(config_folder, 'probs_to_costs.config'), 'w') as f:
        json.dump(costs_config, f)

    # set number of threads for sum jobs
    tasks = ['merge_sub_graphs', 'merge_edge_features', 'map_edge_ids',
             'reduce_problem', 'solve_global']
    for tt in tasks:
        config = configs[tt]
        config.update({'threads_per_job': max_threads if tt != 'reduce_problem' else 8,
                       'mem_limit': 128,
                       'time_limit': 1440,
                       'qos': 'normal',
                       'agglomerator': 'decomposition-gaec',
                       'time_limit_solver': 60*60*15})
        with open(os.path.join(config_folder, '%s.config' % tt), 'w') as f:
            json.dump(config, f)

    n_scales = 1
    max_jobs_mc = 6
    tmp_folder = './tmp_mc'
    task = MulticutSegmentationWorkflow(input_path=input_path, input_key=input_key,
                                        ws_path=input_path, ws_key=ws_key,
                                        problem_path=exp_path,
                                        node_labels_key=assignment_key,
                                        output_path=input_path,
                                        output_key=seg_key,
                                        n_scales=n_scales,
                                        config_dir=config_folder,
                                        tmp_folder=tmp_folder,
                                        target=target,
                                        max_jobs=max_jobs,
                                        max_jobs_multicut=max_jobs_mc,
                                        sanity_checks=False,
                                        skip_ws=False)
    ret = luigi.build([task], local_scheduler=True)
    assert ret, "Multicut failed"
Exemplo n.º 6
0
def run_mc(input_path,
           tmp_folder,
           max_jobs,
           n_scales=1,
           have_watershed=True,
           target='local',
           from_affinities=False,
           invert_inputs=False):
    """ Run multicut on cremi sample or similar data.

    You can obtain the data used for this examle from
    https://drive.google.com/file/d/1E_Wpw9u8E4foYKk7wvx5RPSWvg_NCN7U/view?usp=sharing

    Args:
        input_path: n5 or hdf5 container with input data
            (boundary maps or affinity maps)
        tmp_folder: temporary folder to store job files
        max_jobs: maximal number of jobs
        n_scales: number of scales for hierarchical solver (0 will perform vanilla multicut)
        have_watershed: flag to indicate if the watershed is computed already
        target: target platform, either 'local' (computation on local host),
                                        'slurm' (cluster running slurm)
                                     or 'lsf' (cluster running lsf)
        from_affinities: whether to use affinity maps or boundary maps
        invert_inputs: whether to invert the inputs; this needs to be set to true
            if HIGH boundary evidence correponds to LOWER values in boundary /
            affinity maps
    """

    # path with the watershed data, can be the same as input_path
    ws_path = input_path

    # key for input, and watershed
    input_key = 'volumes/affinities'
    ws_key = 'volumes/segmentation/watershed'

    # path to n5 or hdf5 container to which the output segmentation should be written
    # can be the same as input_path
    out_path = input_path
    out_key = 'volumes/segmentation/multicut'

    # path and key for mask
    # mask can be used to exclude parts of the volume from segmentation
    # leave blank if you don't have a mask
    mask_path = ''
    mask_key = ''

    # n5 container for intermediate results like graph-structure or features
    exp_path = './sampleA_exp.n5'

    # config folder holds configurations for workflow steps stored as json
    configs = MulticutSegmentationWorkflow.get_config()
    config_folder = 'configs'
    os.makedirs(config_folder, exist_ok=True)

    # global workflow config
    # python interpreter of conda environment with dependencies, see
    # https://github.com/constantinpape/cluster_tools/blob/master/environment.yml
    shebang = "#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python"

    # block shape used for parallelization
    block_shape = [30, 256, 256]
    global_config = configs['global']
    global_config.update({'shebang': shebang, 'block_shape': block_shape})
    with open('./configs/global.config', 'w') as f:
        json.dump(global_config, f)

    # config for the watershed calculation
    ws_config = configs['watershed']
    ws_config.update({
        'threshold': 0.25,
        'apply_ws_2d': True,
        'apply_dt_2d': True
    })
    if from_affinities:
        ws_config.update({
            'channel_begin': 0,
            'channel_end': 3,
            'agglomerate_channels': 'max'
        })
    with open('./configs/watershed.config', 'w') as f:
        json.dump(ws_config, f)

    # config for edge feature calculation
    feat_config = configs['block_edge_features']
    # specify offsets if you have affinity features.
    if from_affinities:
        feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]})
    with open('./configs/block_edge_features.config', 'w') as f:
        json.dump(feat_config, f)

    # config for converting edge probabilities to edge costs
    costs_config = configs['probs_to_costs']
    costs_config.update({
        'threads_per_job': max_jobs,
        'weight_edges': True,
        'invert_inputs': invert_inputs
    })
    with open('./configs/probs_to_costs.config', 'w') as f:
        json.dump(costs_config, f)

    # set number of threads for sum jobs
    tasks = [
        'merge_sub_graphs', 'merge_edge_features', 'probs_to_costs',
        'solve_subproblems', 'reduce_problem', 'solve_global'
    ]
    for tt in tasks:
        config = configs[tt]
        config.update({'threads_per_job': max_jobs, 'mem_limit': 8})
        with open('./configs/%s.config' % tt, 'w') as f:
            json.dump(config, f)

    luigi.build([
        MulticutSegmentationWorkflow(input_path=input_path,
                                     input_key=input_key,
                                     ws_path=ws_path,
                                     ws_key=ws_key,
                                     mask_path=mask_path,
                                     mask_key=mask_key,
                                     problem_path=exp_path,
                                     node_labels_key='node_labels',
                                     output_path=out_path,
                                     output_key=out_key,
                                     n_scales=n_scales,
                                     config_dir=config_folder,
                                     tmp_folder=tmp_folder,
                                     target=target,
                                     skip_ws=have_watershed,
                                     max_jobs=max_jobs)
    ],
                local_scheduler=True)
Exemplo n.º 7
0
def run_wf(block_id, tmp_folder, max_jobs, target='local', with_rf=False):

    input_path = '/g/kreshuk/data/arendt/platyneris_v1/membrane_training_data/validation/predictions/val_block_0%i_unet_lr_v3_bmap.n5' % block_id
    # input_path = '/g/kreshuk/data/arendt/platyneris_v1/membrane_training_data/validation/predictions/val_block_0%i_unet_lr_v3_ds122.n5' % block_id
    input_key = 'data'

    mask_path = './test_val_mask.h5'
    mask_key = 'data'

    # mask_path = ''
    # mask_key = ''

    if with_rf:
        exp_path = '/g/kreshuk/data/arendt/platyneris_v1/membrane_training_data/validation/segmentation/val_block_0%i_rf.n5' % block_id
        rf_path = '/g/kreshuk/data/arendt/platyneris_v1/membrane_training_data/predictions/rf_v1.pkl'

    else:
        exp_path = '/g/kreshuk/data/arendt/platyneris_v1/membrane_training_data/validation/segmentation/val_block_0%i.n5' % block_id
        rf_path = ''

    use_decomposer = False
    configs = MulticutSegmentationWorkflow.get_config(use_decomposer)

    if not os.path.exists('config'):
        os.mkdir('config')

    roi_begin, roi_end = None, None
    # roi_begin = [50, 0, 0]
    # roi_end = [100, 2048, 2048]

    global_config = configs['global']
    global_config.update({
        'shebang':
        "#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env/bin/python",
        'roi_begin': roi_begin,
        'roi_end': roi_end
    })
    with open('./config/global.config', 'w') as f:
        json.dump(global_config, f)

    ws_config = configs['watershed']
    # TODO completely debug the two-pass watershed
    two_pass_ws = False
    ws_config.update({
        'threshold': .25,
        'apply_presmooth_2d': False,
        'sigma_weights': (1., 2., 2.),
        'apply_dt_2d': False,
        'pixel_pitch': (2, 1, 1),
        'two_pass': two_pass_ws,
        'halo': [0, 50, 50]
    })
    with open('./config/watershed.config', 'w') as f:
        json.dump(ws_config, f)

    subprob_config = configs['solve_subproblems']
    subprob_config.update({'weight_edges': False, 'threads_per_job': max_jobs})
    with open('./config/solve_subproblems.config', 'w') as f:
        json.dump(subprob_config, f)

    feat_config = configs['block_edge_features']
    if with_rf:
        feat_config.update({
            'filters': ['gaussianSmoothing'],
            'sigmas': [(0.5, 1., 1.), (1., 2., 2.), (2., 4., 4.),
                       (4., 8., 8.)],
            'halo': (8, 16, 16),
            'channel_agglomeration':
            'mean'
        })

    else:
        # feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]})
        feat_config.update({'offsets': None})
        # feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
        #                                 [-2, 0, 0], [0, -4, 0], [0, 0, -4],
        #                                 [-4, 0, 0], [0, -8, 0], [0, 0, -8],
        #                                 [-12, 0, 0], [0, -24, 0], [0, 0, -24]]})
        # feat_config.update({'filters': ['gaussianSmoothing'],
        #                     'sigmas': [(2., 4., 4.)],
        #                     'halo': (8, 16, 16),
        #                     'channel_agglomeration': 'max'})
    with open('./config/block_edge_features.config', 'w') as f:
        json.dump(feat_config, f)

    # set number of threads for sum jobs
    if use_decomposer:
        tasks = [
            'merge_sub_graphs', 'merge_edge_features', 'probs_to_costs',
            'solve_subproblems', 'decompose', 'insert'
        ]
    else:
        tasks = [
            'merge_sub_graphs', 'merge_edge_features', 'probs_to_costs',
            'solve_subproblems', 'reduce_problem', 'solve_global'
        ]

    for tt in tasks:
        config = configs[tt]
        config.update({'threads_per_job': max_jobs})
        with open('./config/%s.config' % tt, 'w') as f:
            json.dump(config, f)

    ret = luigi.build(
        [
            MulticutSegmentationWorkflow(
                input_path=input_path,
                input_key=input_key,
                mask_path=mask_path,
                mask_key=mask_key,
                # ws_path=exp_path, ws_key='volumes/watershed',
                ws_path=input_path,
                ws_key='watershed',
                graph_path=exp_path,
                features_path=exp_path,
                costs_path=exp_path,
                problem_path=exp_path,
                node_labels_path=exp_path,
                node_labels_key='node_labels',
                output_path=exp_path,
                output_key='volumes/segmentation',
                use_decomposition_multicut=use_decomposer,
                rf_path=rf_path,
                n_scales=2,
                config_dir='./config',
                tmp_folder=tmp_folder,
                target=target,
                skip_ws=True,
                max_jobs=max_jobs)
        ],
        local_scheduler=True)
    # ret = True
    # view the results if we are local and the
    # tasks were successfull
    if ret and target == 'local':
        print("Starting viewer")
        from cremi_tools.viewer.volumina import view

        with z5py.File(input_path) as f:
            ds = f[input_key]
            ds.n_threads = max_jobs
            affs = ds[:]
            if affs.ndim == 4:
                affs = affs.transpose((1, 2, 3, 0))

            ds = f['watershed']
            ds.n_threads = max_jobs
            ws = ds[:]

        data = [affs, ws]

        with z5py.File(exp_path) as f:
            # ds = f['volumes/watershed']
            # ds.n_threads = max_jobs
            # ws = ds[:]
            # data.append(ws)
            # shape = ds.shape

            if 'volumes/segmentation' in f:
                ds = f['volumes/segmentation']
                ds.n_threads = max_jobs
                seg = ds[:]
                data.append(seg)

        # with h5py.File('./test_val_mask.h5') as f:
        #     ds_mask = f['data'][:]
        # interp_mask = InterpolatedVolume(ds_mask, shape)
        # full_mask = interp_mask[:]
        # assert full_mask.shape == shape
        # data.append(full_mask)

        view(data)
Exemplo n.º 8
0
def run_mc(timepoint, skip_ws):

    target = 'local'
    max_jobs = 48
    max_threads = 16

    config_folder = './configs'
    os.makedirs(config_folder, exist_ok=True)
    tmp_folder = './tmp_mc_t%03i' % timepoint

    path = '/g/kreshuk/pape/Work/data/lifted_priors/plant_data/t%03i.n5' % timepoint
    exp_path = os.path.join(
        '/g/kreshuk/pape/Work/data/lifted_priors/plant_data/exp_data',
        'mc_t%03i.n5' % timepoint)

    input_key = 'volumes/predictions/boundaries'
    ws_key = 'volumes/segmentation/watershed'
    assignment_key = 'node_labels/multicut'
    out_key = 'volumes/segmentation/multicut'

    configs = MulticutSegmentationWorkflow.get_config()

    global_conf = configs['global']
    shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python'
    block_shape = [50, 512, 512]
    global_conf.update({'shebang': shebang, 'block_shape': block_shape})
    with open(os.path.join(config_folder, 'global.config'), 'w') as f:
        json.dump(global_conf, f)

    conf = configs['watershed']
    conf.update({
        'threshold': .25,
        'apply_dt_2d': False,
        'apply_ws_2d': False,
        'sigma_seeds': 2.,
        'sigma_weights': 2.,
        'alpha': .9,
        'min_seg_size': 100,
        'non_maximum_suppression': True
    })
    with open(os.path.join(config_folder, 'watershed.config'), 'w') as f:
        json.dump(conf, f)

    exponent = 1.
    weight_edges = True
    costs_config = configs['probs_to_costs']
    costs_config.update({
        'weight_edges': weight_edges,
        'weighting_exponent': exponent
    })
    with open(os.path.join(config_folder, 'probs_to_costs.config'), 'w') as f:
        json.dump(costs_config, f)

    # set number of threads for sum jobs
    tasks = [
        'merge_sub_graphs', 'merge_edge_features', 'map_edge_ids',
        'reduce_problem', 'solve_global'
    ]
    for tt in tasks:
        config = configs[tt]
        config.update({
            'threads_per_job': max_threads,
            'agglomerator': 'kernighan-lin'
        })
        with open(os.path.join(config_folder, '%s.config' % tt), 'w') as f:
            json.dump(config, f)

    n_scales = 0
    task = MulticutSegmentationWorkflow(tmp_folder=tmp_folder,
                                        config_dir=config_folder,
                                        target=target,
                                        max_jobs=max_jobs,
                                        max_jobs_multicut=1,
                                        input_path=path,
                                        input_key=input_key,
                                        ws_path=path,
                                        ws_key=ws_key,
                                        problem_path=exp_path,
                                        node_labels_key=assignment_key,
                                        output_path=path,
                                        output_key=out_key,
                                        n_scales=n_scales,
                                        sanity_checks=False,
                                        skip_ws=skip_ws)
    ret = luigi.build([task], local_scheduler=True)
    assert ret, "Multicut failed"
Exemplo n.º 9
0
def run(shebang, with_rf=False):
    input_path = '/home/cpape/Work/data/isbi2012/cluster_example/isbi_train.n5'
    example_path = './isbi_exp.n5'
    input_key = 'volumes/affinties'

    max_jobs = 8
    configs = MulticutSegmentationWorkflow.get_config()

    global_conf = configs['global']
    global_conf.update({'shebang': shebang, 'block_shape': (25, 256, 256)})
    with open('./configs/global.config', 'w') as f:
        json.dump(global_conf, f)

    ws_conf = configs['watershed']
    ws_conf.update({'sigma_weights': 0, 'channel_begin': 1, 'channel_end': 3})
    with open('./configs/watershed.config', 'w') as f:
        json.dump(ws_conf, f)

    if with_rf:
        feat_config = configs['block_edge_features']
        feat_config.update({
            'filters': ['gaussianSmoothing', 'laplacianOfGaussian'],
            'sigmas': [1., 2., 4.],
            'apply_in_2d':
            True
        })
        rf_path = './rf.pkl'
    else:
        feat_config = configs['block_edge_features']
        feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]})
        rf_path = ''

    with open('./configs/block_edge_features.config', 'w') as f:
        json.dump(feat_config, f)

    ret = luigi.build([
        MulticutSegmentationWorkflow(input_path=input_path,
                                     input_key='volumes/affinities',
                                     ws_path=example_path,
                                     ws_key='volumes/watersheds',
                                     problem_path=example_path,
                                     node_labels_path=example_path,
                                     node_labels_key='node_labels',
                                     output_path=example_path,
                                     output_key='volumes/segmentation',
                                     rf_path=rf_path,
                                     n_scales=1,
                                     config_dir='./configs',
                                     tmp_folder='./tmp',
                                     target='local',
                                     max_jobs=max_jobs)
    ],
                      local_scheduler=True)
    if ret:
        from cremi_tools.viewer.volumina import view
        with z5py.File(input_path) as f:
            affs = f['volumes/affinities'][:3].transpose((1, 2, 3, 0))
        with z5py.File(example_path) as f:
            ws = f['volumes/watersheds'][:]
            data = [affs, ws]
            if 'volumes/segmentation' in f:
                seg = f['volumes/segmentation'][:]
                data.append(seg)
        view(data)