Example #1
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: tv-train --hypes hypes.json")
        exit(1)

    if FLAGS.gpus is None:
        if 'TV_USE_GPUS' in os.environ:
            if os.environ['TV_USE_GPUS'] == 'force':
                logging.error('Please specify a GPU.')
                logging.error('Usage tv-train --gpus <ids>')
                exit(1)
            else:
                gpus = os.environ['TV_USE_GPUS']
                logging.info("GPUs are set to: %s", gpus)
                os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    else:
        logging.info("GPUs are set to: %s", FLAGS.gpus)
        os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    logging.info("Initialize training folder")
    initialize_training_folder(hypes)
    maybe_download_and_extract(hypes)
    logging.info("Start training")
    do_training(hypes)
Example #2
0
def main(_):
    utils.set_gpus_to_use()

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)

    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start finetuning")
    do_finetuning(hypes)
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    # logging.info("Output images will be saved to {}".format)
    ana.do_analyze(logdir)

    logging.info("Creating output on test data.")
    kitti_test.do_inference(logdir)

    logging.info("Analysis for pretrained model complete.")
    logging.info("For evaluating your own models I recommend using:"
                 "`tv-analyze --logdir /path/to/run`.")
    logging.info("tv-analysis has a much cleaner interface.")
Example #4
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    #utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiBox')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    #train.maybe_download_and_extract(hypes)
    logging.info("Start training")
    train.do_training(hypes)
def test_set_dirs():
    """Test if setting plugins works."""
    hype_file = "examples/cifar10_minimal.json"
    with open(hype_file, 'r') as f:
        import json
        hypes = json.load(f)

    from tensorvision.utils import set_dirs

    set_dirs(hypes, hype_file)
Example #6
0
def main():
    with open('../config/fcn8_seg.json', 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)

    utils.set_dirs(hypes, '../config/fcn8_seg.json')
    utils._add_paths_to_sys(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    print('start')
    train.do_training(hypes)
    print('end')
Example #7
0
def test_set_dirs():
    """Test if setting plugins works."""
    hype_file = "examples/cifar10_minimal.json"
    with open(hype_file, 'r') as f:
        import json
        hypes = json.load(f)

    from tensorvision.utils import set_dirs
    import tensorflow as tf
    flags = tf.app.flags
    flags.DEFINE_string('name', 'debug', 'Append a name Tag to run.')

    tf.app.flags.DEFINE_boolean('save', True, (''))

    set_dirs(hypes, hype_file)
Example #8
0
def test_set_dirs():
    """Test if setting plugins works."""
    hype_file = "examples/cifar10_minimal.json"
    with open(hype_file, 'r') as f:
        import json
        hypes = json.load(f)

    from tensorvision.utils import set_dirs
    import tensorflow as tf
    flags = tf.app.flags
    flags.DEFINE_string('name', 'debug',
                        'Append a name Tag to run.')

    tf.app.flags.DEFINE_boolean(
        'save', True, (''))

    set_dirs(hypes, hype_file)
def main(_):
    utils.set_gpus_to_use()

    load_weights = tf.app.flags.FLAGS.logdir is not None

    if not load_weights:
        with open(tf.app.flags.FLAGS.hypes, 'r') as f:
            logging.info("f: %s", f)
            hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'MultiNet')

    # with tf.Session() as sess:
    # king
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        if not load_weights:
            utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
            utils._add_paths_to_sys(hypes)

            # Build united Model
            subhypes, submodules, subgraph, tv_sess = build_united_model(hypes)
            start_step = 0
        else:
            logdir = tf.app.flags.FLAGS.logdir
            logging_file = os.path.join(logdir, "output.log")
            utils.create_filewrite_handler(logging_file, mode='a')
            hypes, subhypes, submodules, subgraph, tv_sess, start_step = \
                load_united_model(logdir)
            if start_step is None:
                start_step = 0

        # Run united training
        run_united_training(hypes,
                            subhypes,
                            submodules,
                            subgraph,
                            tv_sess,
                            start_step=start_step)

        # stopping input Threads
        tv_sess['coord'].request_stop()
        tv_sess['coord'].join(tv_sess['threads'])
Example #10
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
        hypes['dist'] = FLAGS.dist
        if FLAGS.layers:
            hypes['arch']['layers'] = FLAGS.layers
        if FLAGS.lr:
            hypes['solver']['learning_rate'] = FLAGS.lr
        if FLAGS.optimizer:
            hypes['solver']['opt'] = FLAGS.optimizer
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    train.do_training(hypes)
Example #11
0
def main(_):
    utils.set_gpus_to_use()

    sys.path.append("submodules/tensorflow-fcn")
    sys.path.append("submodules/TensorVision")

    import tensorvision.train
    import tensorflow_fcn.utils

    # try:
    #     import tensorvision.train
    #     import tensorflow_fcn.utils
    # except ImportError:
    #     logging.error("Could not import the submodules.")
    #     logging.error("Please execute:"
    #                   "'git submodule update --init --recursive'")
    #     exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
Example #12
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
  
    encoder_path = hypes['model']['architecture_file']
    hypes['model']['architecture_file'] = '../encoder/stub.py'
    hypes['ga_data'] = 'ga_data.json'

    run_genetic_algorithm(hypes, encoder_path)
Example #13
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                'KittiSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)

    maybe_download_and_extract(runs_dir)
    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    # logging.info("Output images will be saved to {}".format)
    ana.do_analyze(logdir)

    logging.info("Creating output on test data.")
    kitti_test.do_inference(logdir)

    logging.info("Analysis for pretrained model complete.")
    logging.info("For evaluating your own models I recommend using:"
                 "`tv-analyze --logdir /path/to/run`.")
    logging.info("tv-analysis has a much cleaner interface.")
Example #14
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: tv-train --hypes hypes.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.set_gpus_to_use()
    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    logging.info("Initialize training folder")
    initialize_training_folder(hypes)
    maybe_download_and_extract(hypes)
    logging.info("Start training")
    do_training(hypes)
Example #15
0
def main(_):
    utils.set_gpus_to_use()

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'MediSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    train.maybe_download_and_extract(hypes)
    logging.info("Start training")
    train.do_training(hypes)
Example #16
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: tv-train --hypes hypes.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.set_gpus_to_use()
    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    logging.info("Initialize training folder")
    initialize_training_folder(hypes)
    maybe_download_and_extract(hypes)
    logging.info("Start training")
    do_training(hypes)
Example #17
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    os.environ["TV_DIR_DATA"] = "../../SemSeg_DATA/DATA"
    os.environ["TV_DIR_RUNS"] = "../../SemSeg_DATA/RUNS"

    # print(os.environ["TV_DIR_DATA"])

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
Example #18
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: python train.py --hypes hypes.json")
        logging.error("   tf: tv-train --hypes hypes.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    logging.info(
        "Initializing GPUs, plugins and creating the essential folders")
    utils.set_gpus_to_use()
    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
    initialize_training_folder(hypes)

    logging.info("Training settings")
    do_training(hypes)
Example #19
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiBox')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)

    maybe_download_and_extract(runs_dir)
    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    # logging.info("Output images will be saved to {}".format)
    ana.do_analyze(logdir, base_path='hypes')

    logging.info("Analysis for pretrained model complete.")
    logging.info("For evaluating your own models I recommend using:"
                 "`tv-analyze --logdir /path/to/run`.")
    logging.info("")
    logging.info(
        "Output images can be found in {}/analyse/images.".format(logdir))
Example #20
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
Example #21
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
    else:
        runs_dir = 'RUNS'

    logdir = os.path.join(runs_dir, FLAGS.RUN)

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)
    train.continue_training(logdir)
Example #22
0
def main(_):
    logging.info(
        "Initializing GPUs, plugins and creating the essential folders")
    utils.set_gpus_to_use()

    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: python train.py --hypes hypes.json")
        logging.error("   tf: tv-train --hypes hypes.json")
        exit(1)

    with open(FLAGS.hypes) as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)

    if FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(FLAGS.mod)
        dict_merge(hypes, mod_dict)

    logging.info("Loading plugins")
    utils.load_plugins()

    logging.info("Set dirs")
    utils.set_dirs(hypes, FLAGS.hypes)

    logging.info("Add paths to sys")
    utils._add_paths_to_sys(hypes)

    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)

    tf.reset_default_graph()

    logging.info("Start training")
    train.do_training(hypes)
Example #23
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'VOCSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)

    maybe_download_and_extract(runs_dir, "VOCSeg_2017_04_14_00.49")
    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, "VOCSeg_2017_04_14_00.49")
    # logging.info("Output images will be saved to {}".format)
    # ana.do_analyze(logdir)

    logging.info("Creating output on test data.")
    voc_test.do_inference(logdir)

    logging.info("Analysis for pretrained model complete.")
Example #24
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    hypes_path = FLAGS.logdir
    hypes_path = os.path.join(hypes_path, "model_files/hypes.json")

    with open(hypes_path, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'FacadeSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, FLAGS.hypes)
    utils._add_paths_to_sys(hypes)

    logging.info("Evaluating on Validation data.")
    ana.do_analyze(FLAGS.logdir)

    logging.info("Segmenting and test data. Creating output.")
    ana.do_inference(FLAGS.logdir)

    logging.info("Analysis for pretrained model complete.")
Example #25
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
    utils._add_paths_to_sys(hypes)
    train.maybe_download_and_extract(hypes)
    maybe_download_and_extract(runs_dir)

    logging.info("Trimming weights.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    modules = utils.load_modules_from_hypes(hypes)

    with tf.Graph().as_default():

        # build the graph based on the loaded modules
        with tf.name_scope("Queues"):
            queue = modules['input'].create_queues(hypes, 'train')

        tv_graph = core.build_training_graph(hypes, queue, modules)

        # prepare the tv session
        with tf.Session().as_default():
            tv_sess = core.start_tv_session(hypes)
        sess = tv_sess['sess']
        saver = tv_sess['saver']

        cur_step = core.load_weights(logdir, sess, saver)
        if cur_step is None:
            logging.warning("Loaded global_step is None.")
            logging.warning("This could mean,"
                            " that no weights have been loaded.")
            logging.warning("Starting Training with step 0.")
            cur_step = 0

        with tf.name_scope('Validation'):
            tf.get_variable_scope().reuse_variables()
            image_pl = tf.placeholder(tf.float32)
            image = tf.expand_dims(image_pl, 0)
            image.set_shape([1, None, None, 3])
            inf_out = core.build_inference_graph(hypes, modules, image=image)
            tv_graph['image_pl'] = image_pl
            tv_graph['inf_out'] = inf_out

        # prepaire the tv session
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        image.set_shape([1, None, None, 3])
        inf_out = core.build_inference_graph(hypes, modules, image=image)

        # Create a session for running Ops on the Graph.
        trim_dir = 'RUNS/trimmed'
        shutil.copytree(logdir, trim_dir)
        shutil.copy(tf.app.flags.FLAGS.hypes,
                    os.path.join(trim_dir, 'model_files', 'hypes.json'))
        sess = tf.Session()
        saver = tf.train.Saver()
        core.load_weights(trim_dir, sess, saver)

        for weight in tf.contrib.model_pruning.get_masks():
            if any([
                    layer in weight.name
                    for layer in hypes['layer_pruning']['layers']
            ]):
                weight_value = tv_sess['sess'].run(weight)
                kernel_count = int(weight_value.shape[3] *
                                   hypes['layer_pruning']['layer_sparsity'])

                l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2))
                toss_kernels = l1_values.argsort()[:kernel_count]
                weight_value[:, :, :, toss_kernels] = 0
                assign_op = tf.assign(weight, tf.constant(weight_value))
                tv_sess['sess'].run(assign_op)

        checkpoint_path = os.path.join(trim_dir, 'model.ckpt')
        tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step)

    train.continue_training(trim_dir)
Example #26
0
def build_united_model(meta_hypes):

    logging.info("Initialize training folder")

    subhypes = {}
    subgraph = {}
    submodules = {}
    subqueues = {}

    subgraph['debug_ops'] = {}

    base_path = meta_hypes['dirs']['base_path']
    first_iter = True

    for model in meta_hypes['model_list']:
        subhypes_file = os.path.join(base_path, meta_hypes['models'][model])
        with open(subhypes_file, 'r') as f:
            logging.info("f: %s", f)
            subhypes[model] = json.load(f)

        hypes = subhypes[model]
        utils.set_dirs(hypes, subhypes_file)
        #output directories
        hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
        #data directories
        hypes['dirs']['data_dir'] = meta_hypes['dirs']['data_dir']
        #initialize the training folders
        train.initialize_training_folder(hypes, files_dir=model,
                                         logging=first_iter)
        #get the image directory from hypes
        meta_hypes['dirs']['image_dir'] = hypes['dirs']['image_dir']
        #load the modules from hypes
        submodules[model] = utils.load_modules_from_hypes(
            hypes, postfix="_%s" % model)
        modules = submodules[model]

        logging.info("Build %s computation Graph.", model)
        with tf.name_scope("Queues_%s" % model):
            subqueues[model] = modules['input'].create_queues(hypes, 'train')

        logging.info('Building Model: %s' % model)

        subgraph[model] = build_training_graph(hypes,
                                               subqueues[model],
                                               modules,
                                               first_iter)

        first_iter = False
    #if model list is having detection and segmentation
    if len(meta_hypes['models']) == 2:
        #calculate the losses 
        _recombine_2_losses(meta_hypes, subgraph, subhypes, submodules)
    #if model list is having detection, segmentation and classification
    else:
        #calculate combined losses
        _recombine_3_losses(meta_hypes, subgraph, subhypes, submodules)

    hypes = subhypes[meta_hypes['model_list'][0]]

    tv_sess = core.start_tv_session(hypes)
    sess = tv_sess['sess']
    #for each of the models in model list expand the image dimension
    for model in meta_hypes['model_list']:
        hypes = subhypes[model]
        modules = submodules[model]
        optimizer = modules['solver']

        with tf.name_scope('Validation_%s' % model):
            tf.get_variable_scope().reuse_variables()
            #returns the tensor that may be used as handle for feeding a value
            image_pl = tf.placeholder(tf.float32)
            #expand the shape of the array by inserting new axes in 0th positon
            image = tf.expand_dims(image_pl, 0)
            inf_out = core.build_inference_graph(hypes, modules,
                                                 image=image)
            subgraph[model]['image_pl'] = image_pl
            subgraph[model]['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, subqueues[model],
                                                 'train', sess)

    #join paths for the output result
    target_file = os.path.join(meta_hypes['dirs']['output_dir'], 'hypes.json')
    with open(target_file, 'w') as outfile:
        json.dump(meta_hypes, outfile, indent=2, sort_keys=True)

    return subhypes, submodules, subgraph, tv_sess
def build_united_model(meta_hypes):

    logging.info("Initialize training folder")

    subhypes = {}
    subgraph = {}
    submodules = {}
    subqueues = {}

    subgraph['debug_ops'] = {}

    base_path = meta_hypes['dirs']['base_path']
    first_iter = True

    for model in meta_hypes['model_list']:
        subhypes_file = os.path.join(base_path, meta_hypes['models'][model])
        with open(subhypes_file, 'r') as f:
            logging.info("f: %s", f)
            subhypes[model] = json.load(f)

        hypes = subhypes[model]
        utils.set_dirs(hypes, subhypes_file)
        hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
        hypes['dirs']['data_dir'] = meta_hypes['dirs']['data_dir']
        train.initialize_training_folder(hypes,
                                         files_dir=model,
                                         logging=first_iter)
        meta_hypes['dirs']['image_dir'] = hypes['dirs']['image_dir']
        submodules[model] = utils.load_modules_from_hypes(hypes,
                                                          postfix="_%s" %
                                                          model)
        modules = submodules[model]

        logging.info("Build %s computation Graph.", model)
        with tf.name_scope("Queues_%s" % model):
            subqueues[model] = modules['input'].create_queues(hypes, 'train')

        logging.info('Building Model: %s' % model)

        subgraph[model] = build_training_graph(hypes, subqueues[model],
                                               modules, first_iter)

        first_iter = False

    if len(meta_hypes['models']) == 2:
        _recombine_2_losses(meta_hypes, subgraph, subhypes, submodules)
    else:
        _recombine_3_losses(meta_hypes, subgraph, subhypes, submodules)

    hypes = subhypes[meta_hypes['model_list'][0]]

    tv_sess = core.start_tv_session(hypes)
    sess = tv_sess['sess']
    for model in meta_hypes['model_list']:
        hypes = subhypes[model]
        modules = submodules[model]
        optimizer = modules['solver']

        with tf.name_scope('Validation_%s' % model):
            tf.get_variable_scope().reuse_variables()
            image_pl = tf.placeholder(tf.float32)
            image = tf.expand_dims(image_pl, 0)
            inf_out = core.build_inference_graph(hypes, modules, image=image)
            subgraph[model]['image_pl'] = image_pl
            subgraph[model]['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, subqueues[model],
                                                 'train', sess)

    target_file = os.path.join(meta_hypes['dirs']['output_dir'], 'hypes.json')
    with open(target_file, 'w') as outfile:
        json.dump(meta_hypes, outfile, indent=2, sort_keys=True)

    return subhypes, submodules, subgraph, tv_sess