コード例 #1
0
    daisy.call(cmd, log_out=log_out, log_err=log_err)

    logger.info('Predict worker finished')


def check_block(blocks_predicted, block):
    done = blocks_predicted.count({'block_id': block.block_id}) >= 1
    return done


if __name__ == "__main__":
    predict_config = sys.argv[1]
    worker_config = sys.argv[2]
    data_config = sys.argv[3]

    predict_config_dict = read_predict_config(predict_config)
    worker_config_dict = read_worker_config(worker_config)
    data_config_dict = read_data_config(data_config)

    full_config = predict_config_dict
    full_config.update(worker_config_dict)
    full_config.update(data_config_dict)

    start = time.time()

    predict_blockwise(**full_config)

    end = time.time()
    seconds = end - start

    logger.info('Total time to predict: %f seconds' % seconds)
コード例 #2
0
ファイル: prepare_solve.py プロジェクト: nilsec/micron
def set_up_environment(base_dir, experiment, train_number, predict_number,
                       graph_number, solve_number, mount_dirs, singularity,
                       queue, clean_up, reset):

    input_params = locals()
    train_files = {}

    graph_setup_dir = os.path.join(
        os.path.join(base_dir, experiment),
        "03_graph/setup_t{}_p{}_g{}".format(train_number, predict_number,
                                            graph_number))
    solve_setup_dir = os.path.join(
        os.path.join(base_dir, experiment),
        "04_solve/setup_t{}_p{}_g{}_s{}".format(train_number, predict_number,
                                                graph_number, solve_number))

    predict_cfg_dict = read_predict_config(
        os.path.join(graph_setup_dir, "predict_config.ini"))
    data_cfg_dict = read_data_config(
        os.path.join(graph_setup_dir, "data_config.ini"))
    roi = Roi(data_cfg_dict["in_offset"], data_cfg_dict["in_size"])
    db_name = predict_cfg_dict["db_name"]
    db_host = predict_cfg_dict["db_host"]

    selected_attr = "selected_{}".format(solve_number)
    solved_attr = "solved_{}".format(solve_number)
    edge_collection = "edges_g{}".format(graph_number)
    node_collection = "nodes"

    solved_before = attr_exists(db_name, db_host, edge_collection, solved_attr)
    if not solved_before:
        print("Graph not solved before, build attributes...")
        reset_solve(db_name, db_host, edge_collection, node_collection,
                    selected_attr, solved_attr)

    if clean_up:
        if __name__ == "__main__":
            if click.confirm(
                    'Are you sure you want to remove {} and all its contents?'.
                    format(solve_setup_dir),
                    default=False):
                rmtree(solve_setup_dir)
            else:
                print("Abort clean up")
        else:
            rmtree(solve_setup_dir)

    if reset:
        if __name__ == "__main__":
            if click.confirm(
                    'Are you sure you want to reset solve and selected status in {}_solve_s{}?'
                    .format(db_name, solve_number),
                    default=False):
                reset_solve(db_name, db_host, edge_collection, node_collection,
                            selected_attr, solved_attr)
                reset_step("solve_s{}_g{}".format(solve_number, graph_number),
                           db_name, db_host)

            else:
                print("Abort reset")
        else:
            reset_solve(db_name, db_host, edge_collection, node_collection,
                        selected_attr, solved_attr)
            reset_step("solve_s{}_g{}".format(solve_number, graph_number),
                       db_name, db_host)

    if not os.path.exists(graph_setup_dir):
        raise ValueError("No graph at {}".format(graph_setup_dir))

    if not os.path.exists(solve_setup_dir):
        os.makedirs(solve_setup_dir)

    else:
        if __name__ == "__main__":
            if click.confirm(
                    'Solve setup {} exists already, overwrite?'.format(
                        solve_setup_dir),
                    default=False):
                rmtree(solve_setup_dir)
                os.makedirs(solve_setup_dir)
            else:
                print("Abort.")
                return
        else:
            raise ValueError(
                "Solve setup exists already, choose different predict number or clean up."
            )

    copyfile(os.path.join(graph_setup_dir, "predict_config.ini"),
             os.path.join(solve_setup_dir, "predict_config.ini"))
    copyfile(os.path.join(graph_setup_dir, "data_config.ini"),
             os.path.join(solve_setup_dir, "data_config.ini"))
    copyfile(os.path.join(graph_setup_dir, "graph_config.ini"),
             os.path.join(solve_setup_dir, "graph_config.ini"))
    copyfile(
        os.path.join(os.path.abspath(os.path.dirname(__file__)),
                     "solve/solve.py"),
        os.path.join(solve_setup_dir, "solve.py"))

    worker_config = create_worker_config(mount_dirs, singularity, queue)
    solve_config = create_solve_config(solve_number, selected_attr,
                                       solved_attr)

    with open(os.path.join(solve_setup_dir, "worker_config.ini"), "w+") as f:
        worker_config.write(f)
    with open(os.path.join(solve_setup_dir, "solve_config.ini"), "w+") as f:
        solve_config.write(f)

    return solve_setup_dir
コード例 #3
0
ファイル: predict.py プロジェクト: nilsec/cosem_experiments
import os
from subprocess import check_call
from funlib.run import run, run_singularity
import logging
from micron import read_predict_config, read_worker_config, read_data_config
import time
import json

predict_config = read_predict_config("predict_config.ini")
worker_config = read_worker_config("worker_config.ini")
data_config = read_data_config("data_config.ini")

start = time.time()

if worker_config["singularity_container"] != "None":
    run_singularity("python mknet.py",
                    singularity_image=worker_config["singularity_container"],
                    mount_dirs=worker_config["mount_dirs"],
                    execute=True)

else:
    check_call("python mknet.py", shell=True)

check_call("python {} {} {} {}".format(predict_config["blockwise"],
                                       os.path.abspath("predict_config.ini"),
                                       os.path.abspath("worker_config.ini"),
                                       os.path.abspath("data_config.ini")),
           shell=True)

end = time.time()
with open("./time_prediction.json", "w") as f:
コード例 #4
0
ファイル: prepare_evaluation.py プロジェクト: nilsec/micron
def set_up_environment(base_dir,
                       experiment,
                       train_number,
                       predict_number,
                       graph_number,
                       solve_number,
                       eval_number,
                       tracing_file=None,
                       tracing_offset=None,
                       tracing_size=None,
                       subsample_factor=None,
                       max_edges=None,
                       distance_threshold=None,
                       optimality_gap=0.0,
                       time_limit=None,
                       voxel_size=None,
                       mount_dirs=None,
                       singularity=None,
                       queue=None,
                       num_cpus=5,
                       num_block_workers=1,
                       num_cache_workers=5):

    input_params = locals()
    train_files = {}

    graph_setup_dir = os.path.join(
        os.path.join(base_dir, experiment),
        "03_graph/setup_t{}_p{}_g{}".format(train_number, predict_number,
                                            graph_number))
    solve_setup_dir = os.path.join(
        os.path.join(base_dir, experiment),
        "04_solve/setup_t{}_p{}_g{}_s{}".format(train_number, predict_number,
                                                graph_number, solve_number))
    eval_setup_dir = os.path.join(
        os.path.join(base_dir, experiment),
        "05_eval/setup_t{}_p{}_g{}_s{}_e{}".format(train_number,
                                                   predict_number,
                                                   graph_number, solve_number,
                                                   eval_number))

    predict_cfg_dict = read_predict_config(
        os.path.join(graph_setup_dir, "predict_config.ini"))
    data_cfg_dict = read_data_config(
        os.path.join(graph_setup_dir, "data_config.ini"))
    roi = Roi(data_cfg_dict["in_offset"], data_cfg_dict["in_size"])
    db_name = predict_cfg_dict["db_name"]
    db_host = predict_cfg_dict["db_host"]

    selected_attr = "selected_{}".format(solve_number)
    solved_attr = "solved_{}".format(solve_number)
    edge_collection = "edges_g{}".format(graph_number)
    node_collection = "nodes"

    solved_before = attr_exists(db_name, db_host, edge_collection, solved_attr)
    if not solved_before:
        raise ValueError("Graph not solved, run solve before evaluation.")

    if not os.path.exists(graph_setup_dir):
        raise ValueError("No graph at {}".format(graph_setup_dir))

    if not os.path.exists(solve_setup_dir):
        raise ValueError("No solve setup at {}".format(solve_setup_dir))

    if os.path.exists(eval_setup_dir):
        raise ValueError(
            "Eval setup already exists at {}".format(eval_setup_dir))

    os.makedirs(eval_setup_dir)

    copyfile(os.path.join(graph_setup_dir, "predict_config.ini"),
             os.path.join(eval_setup_dir, "predict_config.ini"))
    copyfile(os.path.join(graph_setup_dir, "data_config.ini"),
             os.path.join(eval_setup_dir, "data_config.ini"))
    copyfile(os.path.join(graph_setup_dir, "graph_config.ini"),
             os.path.join(eval_setup_dir, "graph_config.ini"))
    copyfile(os.path.join(solve_setup_dir, "solve_config.ini"),
             os.path.join(eval_setup_dir, "solve_config.ini"))

    copyfile(
        os.path.join(os.path.abspath(os.path.dirname(__file__)),
                     "post/evaluate.py"),
        os.path.join(eval_setup_dir, "evaluate.py"))

    worker_config = create_worker_config(mount_dirs, singularity, queue,
                                         num_cpus, num_block_workers,
                                         num_cache_workers)

    eval_config = create_eval_config(eval_number, tracing_file, tracing_offset,
                                     tracing_size, subsample_factor, max_edges,
                                     distance_threshold, optimality_gap,
                                     time_limit, voxel_size)

    with open(os.path.join(eval_setup_dir, "worker_config.ini"), "w+") as f:
        worker_config.write(f)
    with open(os.path.join(eval_setup_dir, "eval_config.ini"), "w+") as f:
        eval_config.write(f)

    return eval_setup_dir