def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    # logging.info("Output images will be saved to {}".format)
    ana.do_analyze(logdir)

    logging.info("Creating output on test data.")
    kitti_test.do_inference(logdir)

    logging.info("Analysis for pretrained model complete.")
    logging.info("For evaluating your own models I recommend using:"
                 "`tv-analyze --logdir /path/to/run`.")
    logging.info("tv-analysis has a much cleaner interface.")
示例#2
0
def main(_):
    utils.set_gpus_to_use()

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)

    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start finetuning")
    do_finetuning(hypes)
示例#3
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    #utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiBox')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    #train.maybe_download_and_extract(hypes)
    logging.info("Start training")
    train.do_training(hypes)
示例#4
0
def main(_):
    utils.set_gpus_to_use()

    if tf.app.flags.FLAGS.logdir is None:
        logging.error("No logdir is given.")
        logging.info("Usage: python train.py --logdir rundir")
        exit(1)

    logging.info("Continuing training...")
    train.continue_training(tf.app.flags.FLAGS.logdir)
示例#5
0
文件: analyze.py 项目: ARC2020/arc-cv
def main(_):
    """Run main function."""
    if FLAGS.logdir is None:
        logging.error("No logdir is given.")
        logging.error("Usage: tv-analyze --logdir dir")
        exit(1)

    utils.set_gpus_to_use()
    utils.load_plugins()

    logging.info("Starting to analyze model in '%s'", FLAGS.logdir)
    do_analyze(FLAGS.logdir)
示例#6
0
def main(_):
    tv_utils.set_gpus_to_use()

    runs_dir = 'RUNS'
    logdir = os.path.join(runs_dir, default_run)

    # Loading hyperparameters from logdir
    hypes_i = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    print("Info: Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    print("Info: Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl_i = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl_i, 0)

        # build Tensorflow graph using the model from logdir
        prediction_i = core.build_inference_graph(hypes_i,
                                                  modules,
                                                  image=image)

        print("Info: Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess_i = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess_i, saver)

        print("Info: Weights loaded successfully.")

    # input_image = "DATA/demo/demo.png"
    # print("Info: Starting inference using {} as input".format(input_image))
    global hypes
    hypes = hypes_i
    global image_pl
    image_pl = image_pl_i
    global prediction
    prediction = prediction_i
    global sess
    sess = sess_i

    from moviepy.editor import VideoFileClip
    myclip = VideoFileClip('project_video.mp4')  #.subclip(40,43)
    output_vid = 'output.mp4'
    clip = myclip.fl_image(inference)
    clip.write_videofile(output_vid, audio=False)
示例#7
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    #logging.info("Starting inference using {} as input".format(input_image))
    print("Tensorflow model initialized.")

    #ros node class
    path_detect = PathDetect(hypes, sess, image_pl, prediction)
    rospy.spin()
def main(_):
    utils.set_gpus_to_use()

    load_weights = tf.app.flags.FLAGS.logdir is not None

    if not load_weights:
        with open(tf.app.flags.FLAGS.hypes, 'r') as f:
            logging.info("f: %s", f)
            hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'MultiNet')

    # with tf.Session() as sess:
    # king
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:
        if not load_weights:
            utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
            utils._add_paths_to_sys(hypes)

            # Build united Model
            subhypes, submodules, subgraph, tv_sess = build_united_model(hypes)
            start_step = 0
        else:
            logdir = tf.app.flags.FLAGS.logdir
            logging_file = os.path.join(logdir, "output.log")
            utils.create_filewrite_handler(logging_file, mode='a')
            hypes, subhypes, submodules, subgraph, tv_sess, start_step = \
                load_united_model(logdir)
            if start_step is None:
                start_step = 0

        # Run united training
        run_united_training(hypes,
                            subhypes,
                            submodules,
                            subgraph,
                            tv_sess,
                            start_step=start_step)

        # stopping input Threads
        tv_sess['coord'].request_stop()
        tv_sess['coord'].join(tv_sess['threads'])
示例#9
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
        hypes['dist'] = FLAGS.dist
        if FLAGS.layers:
            hypes['arch']['layers'] = FLAGS.layers
        if FLAGS.lr:
            hypes['solver']['learning_rate'] = FLAGS.lr
        if FLAGS.optimizer:
            hypes['solver']['opt'] = FLAGS.optimizer
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    train.do_training(hypes)
示例#10
0
def main(_):
    utils.set_gpus_to_use()

    sys.path.append("submodules/tensorflow-fcn")
    sys.path.append("submodules/TensorVision")

    import tensorvision.train
    import tensorflow_fcn.utils

    # try:
    #     import tensorvision.train
    #     import tensorflow_fcn.utils
    # except ImportError:
    #     logging.error("Could not import the submodules.")
    #     logging.error("Please execute:"
    #                   "'git submodule update --init --recursive'")
    #     exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
示例#11
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
  
    encoder_path = hypes['model']['architecture_file']
    hypes['model']['architecture_file'] = '../encoder/stub.py'
    hypes['ga_data'] = 'ga_data.json'

    run_genetic_algorithm(hypes, encoder_path)
示例#12
0
def main(_):
    tv_utils.set_gpus_to_use()

    # Download and use weights from the MultiNet Paper
    runs_dir = '.'
    logdir = os.path.join(runs_dir, default_run)

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32, name='input_image')
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)
        tf.identity(prediction['logits'], name='output_logits')
        tf.identity(prediction['softmax'], name='output_softmax')

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

        # Save model again with renamed tensors
        saver.save(sess, 'renamed/KittiSeg_pretrained')

        logging.info("Model saved successfully.")
示例#13
0
def main(_):
    utils.set_gpus_to_use()

    # try:
    #     import tensorvision.train
    #     import tensorflow_fcn.utils
    # except ImportError:
    #     logging.error("Could not import the submodules.")
    #     logging.error("Please execute:"
    #                   "'git submodule update --init --recursive'")
    #     exit(1)

    if tf.app.flags.FLAGS.logdir is None:
        logging.error("No logdir is given.")
        logging.info("Usage: python train.py --logdir dir")
        exit(1)

    logging.info("Continuing training...")
    train.continue_training(tf.app.flags.FLAGS.logdir)
示例#14
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                'KittiSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)

    maybe_download_and_extract(runs_dir)
    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    # logging.info("Output images will be saved to {}".format)
    ana.do_analyze(logdir)

    logging.info("Creating output on test data.")
    kitti_test.do_inference(logdir)

    logging.info("Analysis for pretrained model complete.")
    logging.info("For evaluating your own models I recommend using:"
                 "`tv-analyze --logdir /path/to/run`.")
    logging.info("tv-analysis has a much cleaner interface.")
示例#15
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: tv-train --hypes hypes.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.set_gpus_to_use()
    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    logging.info("Initialize training folder")
    initialize_training_folder(hypes)
    maybe_download_and_extract(hypes)
    logging.info("Start training")
    do_training(hypes)
示例#16
0
文件: train.py 项目: afcarl/MediSeg
def main(_):
    utils.set_gpus_to_use()

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'MediSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    train.maybe_download_and_extract(hypes)
    logging.info("Start training")
    train.do_training(hypes)
示例#17
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: tv-train --hypes hypes.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.set_gpus_to_use()
    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    logging.info("Initialize training folder")
    initialize_training_folder(hypes)
    maybe_download_and_extract(hypes)
    logging.info("Start training")
    do_training(hypes)
示例#18
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    os.environ["TV_DIR_DATA"] = "../../SemSeg_DATA/DATA"
    os.environ["TV_DIR_RUNS"] = "../../SemSeg_DATA/RUNS"

    # print(os.environ["TV_DIR_DATA"])

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
示例#19
0
def main(_):
    """Run main function."""
    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: python train.py --hypes hypes.json")
        logging.error("   tf: tv-train --hypes hypes.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    logging.info(
        "Initializing GPUs, plugins and creating the essential folders")
    utils.set_gpus_to_use()
    utils.load_plugins()
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
    initialize_training_folder(hypes)

    logging.info("Training settings")
    do_training(hypes)
示例#20
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiBox')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)

    maybe_download_and_extract(runs_dir)
    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    # logging.info("Output images will be saved to {}".format)
    ana.do_analyze(logdir, base_path='hypes')

    logging.info("Analysis for pretrained model complete.")
    logging.info("For evaluating your own models I recommend using:"
                 "`tv-analyze --logdir /path/to/run`.")
    logging.info("")
    logging.info(
        "Output images can be found in {}/analyse/images.".format(logdir))
示例#21
0
def main(_):
    utils.set_gpus_to_use()

    load_weights = tf.app.flags.FLAGS.logdir is not None

    if not load_weights:
        print("You must specify --logdir path/to/trained/model")
        exit(1)

    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'MultiNet')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    config = tf.ConfigProto(gpu_options=gpu_options)

    with tf.Session(config=config) as sess:
        logdir = tf.app.flags.FLAGS.logdir
        logging_file = os.path.join(logdir, "output.log")
        utils.create_filewrite_handler(logging_file, mode='a')

        hypes, subhypes, submodules, subgraph, tv_sess, start_step = load_united_model(
            logdir)

        if start_step is None:
            start_step = 0

        # Run united training
        run_united_evaluation(hypes,
                              subhypes,
                              submodules,
                              subgraph,
                              tv_sess,
                              step=start_step)

        # stopping input Threads
        tv_sess['coord'].request_stop()
        tv_sess['coord'].join(tv_sess['threads'])
示例#22
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiSeg')
    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)
    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)
    logging.info("Start training")
    train.do_training(hypes)
示例#23
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    if tf.app.flags.FLAGS.hypes is None:
        logging.error("No hype file is given.")
        logging.info("Usage: python train.py --hypes hypes/KittiClass.json")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)
    utils.load_plugins()

    if tf.app.flags.FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(tf.app.flags.FLAGS.mod)
        dict_merge(hypes, mod_dict)

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
    else:
        runs_dir = 'RUNS'

    logdir = os.path.join(runs_dir, FLAGS.RUN)

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)
    train.continue_training(logdir)
示例#24
0
def main(_):
    utils.set_gpus_to_use()

    logdir = FLAGS.logdir
    data_file = FLAGS.data
    #if input is not given pass the error message
    if logdir is None:
        logging.error('Usage python predict_joint --logdir /path/to/logdir'
                      '--data /path/to/data/txt')
        exit(1)

    output_folder = os.path.join(logdir, res_folder)

    if not os.path.exists(output_folder):
        os.mkdir(output_folder)

    logdir = logdir
    utils.load_plugins()
    #if data directory exist, join the path
    if 'TV_DIR_DATA' in os.environ:
        data_file = os.path.join(os.environ['TV_DIR_DATA'], data_file)
    else:
        #else create a directory DATA
        data_file = os.path.join('DATA', data_file)

    if not os.path.exists(data_file):
        logging.error('Please provide a valid data_file.')
        logging.error('Use --data_file')
        exit(1)

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'UnitedVision2')
    logging_file = os.path.join(output_folder, "analysis.log")  #log file
    utils.create_filewrite_handler(logging_file, mode='a')
    load_out = load_united_model(logdir)

    run_eval(load_out, output_folder, data_file)
示例#25
0
def load(logdir):
    import tensorflow as tf
    import tensorvision.utils as tv_utils
    import tensorvision.core as core

    tv_utils.set_gpus_to_use()

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32,
                                  shape=(hypes["image_height"],
                                         hypes["image_width"], 3))
        image = tf.expand_dims(image_pl, 0)
        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    return image_pl, prediction, sess, hypes
示例#26
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train2
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        os.environ['TV_DIR_RUNS'] = os.path.join(os.environ['TV_DIR_RUNS'],
                                                 'KittiBox')

    if FLAGS.logdir is None:
        print(" Download and use weights from the MultiNet Paper")
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiBox')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir
    print("----------------")
    print(logdir)

    hypes = utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Start training")
    print(logdir)

    train.continue_training(logdir)
示例#27
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    hypes_path = FLAGS.logdir
    hypes_path = os.path.join(hypes_path, "model_files/hypes.json")

    with open(hypes_path, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)

    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'FacadeSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, FLAGS.hypes)
    utils._add_paths_to_sys(hypes)

    logging.info("Evaluating on Validation data.")
    ana.do_analyze(FLAGS.logdir)

    logging.info("Segmenting and test data. Creating output.")
    ana.do_inference(FLAGS.logdir)

    logging.info("Analysis for pretrained model complete.")
示例#28
0
def main(_):
    logging.info(
        "Initializing GPUs, plugins and creating the essential folders")
    utils.set_gpus_to_use()

    if FLAGS.hypes is None:
        logging.error("No hypes are given.")
        logging.error("Usage: python train.py --hypes hypes.json")
        logging.error("   tf: tv-train --hypes hypes.json")
        exit(1)

    with open(FLAGS.hypes) as f:
        logging.info("f: %s", f)
        hypes = commentjson.load(f)

    if FLAGS.mod is not None:
        import ast
        mod_dict = ast.literal_eval(FLAGS.mod)
        dict_merge(hypes, mod_dict)

    logging.info("Loading plugins")
    utils.load_plugins()

    logging.info("Set dirs")
    utils.set_dirs(hypes, FLAGS.hypes)

    logging.info("Add paths to sys")
    utils._add_paths_to_sys(hypes)

    logging.info("Initialize training folder")
    train.initialize_training_folder(hypes)

    tf.reset_default_graph()

    logging.info("Start training")
    train.do_training(hypes)
示例#29
0
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'VOCSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)

    utils._add_paths_to_sys(hypes)

    train.maybe_download_and_extract(hypes)

    maybe_download_and_extract(runs_dir, "VOCSeg_2017_04_14_00.49")
    logging.info("Evaluating on Validation data.")
    logdir = os.path.join(runs_dir, "VOCSeg_2017_04_14_00.49")
    # logging.info("Output images will be saved to {}".format)
    # ana.do_analyze(logdir)

    logging.info("Creating output on test data.")
    voc_test.do_inference(logdir)

    logging.info("Analysis for pretrained model complete.")
示例#30
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    image = scp.misc.imread(input_image)
    if hypes['jitter']['reseize_image']:
        # Resize input only, if specified in hypes
        image_height = hypes['jitter']['image_height']
        image_width = hypes['jitter']['image_width']
        image = scp.misc.imresize(image, size=(image_height, image_width),
                                  interp='cubic')

    # Run KittiSeg model on image
    feed = {image_pl: image}
    softmax = prediction['softmax']
    output = sess.run([softmax], feed_dict=feed)

    # Reshape output from flat vector to 2D Image
    shape = image.shape
    output_image = output[0][:, 1].reshape(shape[0], shape[1])

    # Plot confidences as red-blue overlay
    rb_image = seg.make_overlay(image, output_image)

    # Accept all pixel with conf >= 0.5 as positive prediction
    # This creates a `hard` prediction result for class street
    threshold = 0.5
    street_prediction = output_image > threshold

    # Plot the hard prediction as green overlay
    green_image = tv_utils.fast_overlay(image, street_prediction)

    # Save output images to disk.
    if FLAGS.output_image is None:
        output_base_name = input_image
    else:
        output_base_name = FLAGS.output_image

    raw_image_name = output_base_name.split('.')[0] + '_raw.png'
    rb_image_name = output_base_name.split('.')[0] + '_rb.png'
    green_image_name = output_base_name.split('.')[0] + '_green.png'

    scp.misc.imsave(raw_image_name, output_image)
    scp.misc.imsave(rb_image_name, rb_image)
    scp.misc.imsave(green_image_name, green_image)

    logging.info("")
    logging.info("Raw output image has been saved to: {}".format(
        os.path.realpath(raw_image_name)))
    logging.info("Red-Blue overlay of confs have been saved to: {}".format(
        os.path.realpath(rb_image_name)))
    logging.info("Green plot of predictions have been saved to: {}".format(
        os.path.realpath(green_image_name)))

    logging.info("")
    logging.warning("Do NOT use this Code to evaluate multiple images.")

    logging.warning("Demo.py is **very slow** and designed "
                    "to be a tutorial to show how the KittiSeg works.")
    logging.warning("")
    logging.warning("Please see this comment, if you like to apply demo.py to"
                    "multiple images see:")
    logging.warning("https://github.com/MarvinTeichmann/KittiBox/"
                    "issues/15#issuecomment-301800058")
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
 #   config.gpu_options.per_process_gpu_memory_fraction = 0.7

    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")
    num_classes = 5

    # Create tf graph and build module.
    with tf.Graph().as_default():




        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=config)
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    #Dealing with video segmentation with frame by frame
    #TODO: build a commandline
    loaded_video = skvideo.io.vread('t1.avi')[:100]
    writer = skvideo.io.FFmpegWriter("outputvideo.avi")
    for image in loaded_video:
        # Load and resize input image
        if hypes['jitter']['reseize_image']:
                # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image, size=(image_height, image_width),
                                                                  interp='cubic')

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run(softmax, feed_dict=feed)

        print(len(output), type(output), output.shape)
        # Reshape output from flat vector to 2D Image
        output = np.transpose(output)
        shape = image.shape
        output = output.reshape(num_classes, shape[0], shape[1])
        output_image = output[0].reshape(shape[0], shape[1])
       
        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold
        street_predictions = output > threshold
        # Plot the hard prediction as green overlay
        green_image = tv_utils.fast_overlay(image, street_prediction)

        green_images = []
        rb_images = []
        output_images = []
        for c in range(0,5):          
            green_images.append(tv_utils.fast_overlay(image, street_predictions[c]))
            rb_images.append(seg.make_overlay(image, output[c]))
            output_images.append(output[c].reshape(shape[0], shape[1]))

        # Save output images to disk.
        if FLAGS.output_image is None:
            output_base_name = input_image
        else:
            output_base_name = FLAGS.output_image

        #Name and save the the red blue segmentation for first 5 frames as png to debug.
        green_image_names = []
        rb_image_names = []
        raw_image_names = []
        for c in range(1,6):
            green_image_names.append(output_base_name.split('.')[0] + str(c) + '_green.png')
            rb_image_names.append(output_base_name.split('.')[0] + str(c) + '_rb.png')
            raw_image_names.append(output_base_name.split('.')[0] + str(c) + '_raw.png')
  
        for c in range(0,5):
            print(green_image_names[c], green_images[c].shape)
            scp.misc.imsave(raw_image_names[c], output_images[c])
            scp.misc.imsave(rb_image_names[c], rb_images[c])
            scp.misc.imsave(green_image_names[c], green_images[c])

        #Output the green masked video as a file and show it to screen
        writer.writeFrame(green_images[4])
        cv2.imshow('frame', green_images[4])

        #user can press p to quit during processing.
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    writer.close()
示例#32
0
def main(_):
    tv_utils.set_gpus_to_use()

    # if FLAGS.input_image is None:
    #     logging.error("No input_image was given.")
    #     logging.info(
    #         "Usage: python demo.py --input_image data/test.png "
    #         "[--output_image output_image] [--logdir /path/to/weights] "
    #         "[--gpus GPUs_to_use] ")
    #     exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    dataset = kitti_object(
        os.path.join(ROOT_DIR, 'free-space/dataset/KITTI/object'))

    point_pub = rospy.Publisher('cloud', PointCloud2, queue_size=50)
    # picture_pub = rospy.Publisher("kitti_image",newImage,queue_size=50)
    rospy.init_node('point-cloud', anonymous=True)
    # h = std_msgs.msg.Header()
    # h.frame_id="base_link"
    # h.stamp=rospy.Time.now()
    #rate = rospy.Rate(10)
    #point_msg=PointCloud2()

    video_dir = '/home/user/Data/lrx_work/free-space/kitti.avi'
    fps = 10
    num = 4541
    img_size = (1241, 376)
    fourcc = 'mp4v'
    videoWriter = cv2.VideoWriter(video_dir, cv2.VideoWriter_fourcc(*fourcc),
                                  fps, img_size)

    calib = dataset.get_calibration(0)
    for data_idx in range(len(dataset)):
        #objects = dataset.get_label_objects(data_idx)

        # Load and resize input image
        image = dataset.get_image(data_idx)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #scp.misc.imsave('new.png', image)
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='cubic')
        img_height, img_width, img_channel = image.shape
        print("picture-shape")
        print(len(image))
        print(len(image[0]))
        pc_velo = dataset.get_lidar(data_idx)[:, 0:3]
        print(len(pc_velo))
        velo_len = len(pc_velo)
        #calib = dataset.get_calibration(data_idx)

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)
        # scp.misc.imsave('new0.png', rb_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        index = np.where(street_prediction == True)
        chang = len(index[0])
        print(chang)
        # test = np.zeros((velo_len,2),dtype=np.int)
        # for tmp0 in range(chang):
        #     test[tmp0][0]=index[0][tmp0]
        #     test[tmp0][1]=index[1][tmp0]
        print("suoyindayin")
        # if (chang>0):
        #     print(test[0][0])
        #     print(test[0][1])

        pts_2d = calib.project_velo_to_image(pc_velo)
        print(pts_2d.shape)
        # print(pts_2d[1][0])
        # print(pts_2d[1][1])

        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        fov_inds = (pts_2d[:,0]<1242) & (pts_2d[:,0]>=0) & \
            (pts_2d[:,1]<370) & (pts_2d[:,1]>=0)
        print(fov_inds.shape)
        # print(fov_inds[1000])
        # print(pc_velo.shape)
        print("okok")
        fov_inds = fov_inds & (pc_velo[:, 0] > 0)
        print(fov_inds.shape)
        imgfov_pts_2d = pts_2d[fov_inds, :]
        imgfov_pc_velo = pc_velo[fov_inds, :]
        pts_2d0 = calib.project_velo_to_image(imgfov_pc_velo)
        fov_inds0 = (pts_2d0[:,0]<len(image[0])) & (pts_2d0[:,0]>=0) & \
            (pts_2d0[:,1]<len(image)) & (pts_2d0[:,1]>=0)
        fov_inds0 = fov_inds0 & (imgfov_pc_velo[:, 0] > 2.0)
        print(fov_inds0.shape)
        print(street_prediction.shape)
        print(pts_2d0.shape)
        # if(chang>0):
        #     print(int(imgfov_pts_2d[5,0]))
        #     print(int(imgfov_pts_2d[5,1]))
        #     print(street_prediction[int(imgfov_pts_2d[5,1]),int(imgfov_pts_2d[5,0])])

        if (chang > 0):
            for i in range(len(fov_inds0)):
                if ((pts_2d0[i, 1] < len(street_prediction)) &
                    (pts_2d0[i, 0] < len(street_prediction[0]))):
                    fov_inds0[i] = fov_inds0[i] & (
                        street_prediction[int(pts_2d0[i, 1]),
                                          int(pts_2d0[i, 0])] == True)
        imgfov_pc_velo0 = imgfov_pc_velo[fov_inds0, :]
        print("number")
        green_image = tv_utils.fast_overlay(image, street_prediction)
        # pub point-cloud topic
        print(imgfov_pc_velo0.shape)
        number = len(imgfov_pc_velo0)

        header = std_msgs.msg.Header()
        header.stamp = rospy.Time.now()
        header.frame_id = "velodyne"
        points = pc2.create_cloud_xyz32(header, imgfov_pc_velo0)

        # point=Point()

        # for t in range(0,number):
        #     point_x=imgfov_pc_velo0[t][0]
        #     point_y=imgfov_pc_velo0[t][1]
        #     point_z=imgfov_pc_velo0[t][2]
        #     point_msg.points[t].point.x=point_x
        #     point_msg.points[t].point.y=point_y
        #     point_msg.points[t].point.z=point_z

        # point_pub.publish(points)
        # videoWriter.write(green_image)

        # bridge=CvBridge()
        # picture_pub.publish(bridge.cv2_to_imgmsg(green_image,"rgb8"))

        # minx=imgfov_pc_velo0[0][0]
        # miny=imgfov_pc_velo0[0][1]
        # minz=imgfov_pc_velo0[0][2]
        # maxx=imgfov_pc_velo0[0][0]
        # maxy=imgfov_pc_velo0[0][1]
        # maxz=imgfov_pc_velo0[0][2]

        # for t in range(len(imgfov_pc_velo0)):
        #     minx=min(minx,imgfov_pc_velo0[t][0])
        #     miny=min(miny,imgfov_pc_velo0[t][1])
        #     minz=min(minz,imgfov_pc_velo0[t][2])
        #     maxx=max(maxx,imgfov_pc_velo0[t][0])
        #     maxy=max(maxy,imgfov_pc_velo0[t][1])
        #     maxz=max(maxz,imgfov_pc_velo0[t][2])
        # print(minx,miny,minz,maxx,maxy,maxz)
        # width=1024
        # height=1024
        # img_res=np.zeros([width,height,3],dtype=np.uint8)
        # for p in range(len(imgfov_pc_velo0)):
        #     velo_x=5*(int(imgfov_pc_velo0[p][0])+50)
        #     velo_y=5*(int(imgfov_pc_velo0[p][1])+50)
        #     img_res[velo_x][velo_y]=255
        #     scale=25
        #     if((velo_x>scale)&(velo_x+scale<1024)&(velo_y>scale)&(velo_y+scale<1024)):
        #         for q in range(scale):
        #             for m in range(scale):
        #                 img_res[velo_x-q][velo_y-m]=255
        #                 img_res[velo_x-q][velo_y+m]=255
        #                 img_res[velo_x+q][velo_y-m]=255
        #                 img_res[velo_x+q][velo_y+m]=255
        # scp.misc.imsave('res.png',img_res)

        draw_lidar(imgfov_pc_velo0, fig=fig)

        # for obj in objects:
        #     if obj.type == 'DontCare': continue
        #     # Draw 3d bounding box
        #     box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        #     box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        #     # Draw heading arrow
        #     ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
        #     ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        #     x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        #     x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        #     draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
        #     mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.5, 0.5, 0.5),
        #                 tube_radius=None, line_width=1, figure=fig)

        #mlab.show(1)

        # Plot the hard prediction as green overlay

        # Save output images to disk.
        if FLAGS.output_image is None:
            output_base_name = image
        else:
            output_base_name = FLAGS.output_image

        # raw_image_name = output_base_name.split('.')[0] + '_raw.png'
        # rb_image_name = output_base_name.split('.')[0] + '_rb.png'
        # green_image_name = output_base_name.split('.')[0] + '_green.png'

        # scp.misc.imsave('1.png', output_image)
        # scp.misc.imsave('2.png', rb_image)
        # scp.misc.imsave('3.png', green_image)
        raw_input()
示例#33
0
文件: run.py 项目: zzhang87/KittiSeg
def main(_):
    tv_utils.set_gpus_to_use()

    # if FLAGS.input_image is None:
    #     logging.error("No input_image was given.")
    #     logging.info(
    #         "Usage: python demo.py --input_image data/test.png "
    #         "[--output_image output_image] [--logdir /path/to/weights] "
    #         "[--gpus GPUs_to_use] ")
    #     exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        image.set_shape([1, None, None, 3])

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    count = 1
    duration = 0
    video = cv2.VideoCapture(
        "/home/zzhang52/Data/south_farm/front/VID_20170426_144800.mp4")
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    writer = cv2.VideoWriter("/home/zzhang52/Videos/visual_nav.avi", fourcc,
                             12.0, (960, 540))

    while video.grab():
        start = time()
        _, image = video.retrieve()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='bilinear')

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)
        duration += time() - start
        count += 1

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        # rb_image = seg.make_overlay(image, output_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        thresh = (255 * street_prediction).astype(np.uint8)

        _, thresh = cv2.threshold(thresh, 100, 255, 0)

        _, contours, _ = cv2.findContours(thresh,
                                          mode=cv2.RETR_EXTERNAL,
                                          method=cv2.CHAIN_APPROX_NONE)

        contours.sort(key=cv2.contourArea, reverse=True)
        mask = np.zeros(shape, np.uint8)
        cv2.drawContours(mask, contours, 0, (0, 255, 0), -1)

        cnt = np.asarray(contours[0])
        cnt = np.squeeze(cnt)
        cnt = cnt[np.argsort(cnt[:, 1])]
        cnt = np.expand_dims(cnt, axis=1)

        points = []
        idx = 0
        x_sum = 0
        x_count = 0

        for i in range(shape[0]):
            while idx < cnt.shape[0] and cnt[idx][0][1] == i:
                x_sum += float(cnt[idx][0][0])
                x_count += 1
                idx += 1

            if x_count != 0:
                avg = int(x_sum / x_count)
                points.append(np.array([[avg, i]]))
                x_sum = 0
                x_count = 0

        points = np.asarray(points)

        # p1, p2, [vx,vy,x,y] = fitLine(points, shape[0], shape[1])

        # [vx,vy,x,y] = cv2.fitLine(points, cv2.DIST_L2,0,0.01,0.01)
        # lefty = int((-x*vy/vx) + y)
        # righty = int(((shape[1]-x)*vy/vx)+y)

        p1 = Point(points[0][0][0], points[0][0][1])
        p2 = Point(points[-1][0][0], points[-1][0][1])

        dx = float(p1.x - p2.x)
        dy = float(p1.y - p2.y)
        r = np.arctan(dx / dy) * 180 / np.pi
        t = float(shape[1] / 2 - p2.x) / shape[1]
        # r = np.sign(r) * (90 - abs(r))

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        alpha = 0.3
        overlay = cv2.addWeighted(image, 1 - alpha, mask, alpha, 0)

        # Plot the hard prediction as green overlay
        # green_image = tv_utils.fast_overlay(image, street_prediction)
        # green_image = cv2.cvtColor(green_image, cv2.COLOR_RGB2BGR)
        cv2.line(overlay, (p1.x, p1.y), (p2.x, p2.y), (0, 0, 200), 2)
        cv2.putText(overlay, "Heading: {:.2f}".format(r), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
        cv2.putText(overlay, "Offset: {:.3e}".format(t), (10, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
        cv2.imshow("show", overlay)
        writer.write(overlay)
        if cv2.waitKey(15) == 27:
            break

    cv2.destroyAllWindows()
    video.release()
    writer.release()

    # Save output images to disk.
    # if FLAGS.output_image is None:
    #     output_base_name = input_image
    # else:
    #     output_base_name = FLAGS.output_image

    # raw_image_name = output_base_name.split('.')[0] + '_raw.png'
    # rb_image_name = output_base_name.split('.')[0] + '_rb.png'
    # green_image_name = output_base_name.split('.')[0] + '_green.png'

    # scp.misc.imsave(raw_image_name, street_prediction)
    # scp.misc.imsave(rb_image_name, rb_image)
    # scp.misc.imsave(green_image_name, green_image)

    # logging.info("")
    # logging.info("Raw output image has been saved to: {}".format(
    #     os.path.realpath(raw_image_name)))
    # logging.info("Red-Blue overlay of confs have been saved to: {}".format(
    #     os.path.realpath(rb_image_name)))
    # logging.info("Green plot of predictions have been saved to: {}".format(
    #     os.path.realpath(green_image_name)))

    print("FPS: {}".format(count / duration))