コード例 #1
0
def run_detection(img):
    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        print("starting KittiSeg inference")
        print("la1")
        image_pl = tf.placeholder(tf.float32)
        print("la21")
        image = tf.expand_dims(image_pl, 0)
        print("la")

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        print("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        print("Weights loaded successfully.")

        print("Starting inference")

        # Load and resize input image
        image = img
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='cubic')

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        # Plot the hard prediction as green overlay
        green_image = tv_utils.fast_overlay(image, street_prediction)

        logging.info("--> Done with detection")

        return green_image
コード例 #2
0
ファイル: kitti_test.py プロジェクト: new-2017/KittiSeg
def do_inference(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        with tf.name_scope('Validation'):
            image_pl, label_pl = _create_input_placeholder()
            image = tf.expand_dims(image_pl, 0)
            softmax = core.build_inference_graph(hypes, modules,
                                                 image=image)

        sess = tf.Session()
        saver = tf.train.Saver()

        core.load_weights(logdir, sess, saver)

        create_test_output(hypes, sess, image_pl, softmax)
    return
コード例 #3
0
def infer(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        with tf.name_scope('Validation'):
            image_pl, label_pl = _create_input_placeholder()
            image = tf.expand_dims(image_pl, 0)
            softmax = core.build_inference_graph(hypes, modules, image=image)

        sess = tf.Session()
        saver = tf.train.Saver()

        core.load_weights(logdir, sess, saver)

        create_test_output(hypes, sess, image_pl, softmax)
    return
コード例 #4
0
ファイル: evaluate_recog.py プロジェクト: IAMLabUMD/tpami2020
def load_model(model_dir, hypes, modules, image_width=450, image_height=450):
    # set to allocate memory on GPU as needed
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    # Create tf graph and build module.
    with tf.Graph().as_default():
        # with tf.device('/cpu:0'):
        # Create placeholder for input
        num_channels = hypes['arch']['num_channels']
        input_pl = tf.placeholder(tf.float32, [None, None, num_channels])
        image = tf.expand_dims(input_pl, 0)
        # set the pre-defined image size here
        image.set_shape([1, image_height, image_width, num_channels])

        # build Tensorflow graph using the model from logdir
        output_operation = core.build_inference_graph(hypes,
                                                      modules,
                                                      image=image)
        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=config)
        # self.sess = tf.Session()
        saver = tf.train.Saver()
        # Load weights from logdir
        core.load_weights(model_dir, sess, saver)
        logging.info("Weights loaded successfully.")

        model = {}
        model["in"] = input_pl
        model["out"] = output_operation
        model["sess"] = sess
        return model
コード例 #5
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    #logging.info("Starting inference using {} as input".format(input_image))
    print("Tensorflow model initialized.")

    #ros node class
    path_detect = PathDetect(hypes, sess, image_pl, prediction)
    rospy.spin()
コード例 #6
0
def main(_):
    tv_utils.set_gpus_to_use()

    runs_dir = 'RUNS'
    logdir = os.path.join(runs_dir, default_run)

    # Loading hyperparameters from logdir
    hypes_i = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    print("Info: Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    print("Info: Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl_i = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl_i, 0)

        # build Tensorflow graph using the model from logdir
        prediction_i = core.build_inference_graph(hypes_i,
                                                  modules,
                                                  image=image)

        print("Info: Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess_i = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess_i, saver)

        print("Info: Weights loaded successfully.")

    # input_image = "DATA/demo/demo.png"
    # print("Info: Starting inference using {} as input".format(input_image))
    global hypes
    hypes = hypes_i
    global image_pl
    image_pl = image_pl_i
    global prediction
    prediction = prediction_i
    global sess
    sess = sess_i

    from moviepy.editor import VideoFileClip
    myclip = VideoFileClip('project_video.mp4')  #.subclip(40,43)
    output_vid = 'output.mp4'
    clip = myclip.fl_image(inference)
    clip.write_videofile(output_vid, audio=False)
コード例 #7
0
ファイル: analyze.py プロジェクト: ARC2020/arc-cv
def do_analyze(logdir, base_path=None):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)

    if base_path is not None:
        hypes['dirs']['base_path'] = base_path

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        image.set_shape([1, None, None, 3])
        inf_out = core.build_inference_graph(hypes, modules,
                                             image=image)

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        core.load_weights(logdir, sess, saver)

        logging.info("Graph loaded succesfully. Starting evaluation.")

        output_dir = os.path.join(logdir, 'analyse')

        logging.info("Output Images will be written to: {}".format(
            os.path.join(output_dir, "images/")))

        logging_file = os.path.join(logdir, "analyse/output.log")
        utils.create_filewrite_handler(logging_file)

        eval_dict, images = modules['eval'].evaluate(
            hypes, sess, image_pl, inf_out)

        logging.info("Evaluation Succesfull. Results:")

        utils.print_eval_dict(eval_dict)
        _write_images_to_logdir(images, output_dir)
コード例 #8
0
ファイル: train.py プロジェクト: watsonkm/PruneSeg
def continue_training(logdir):
    """
    Continues training of a model.

    This will load model files and weights found in logdir and continues
    an aborted training.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Session() as sess:

        # build the graph based on the loaded modules
        with tf.name_scope("Queues"):
            queue = modules['input'].create_queues(hypes, 'train')

        tv_graph = core.build_training_graph(hypes, queue, modules)

        # prepaire the tv session
        tv_sess = core.start_tv_session(hypes)
        sess = tv_sess['sess']
        saver = tv_sess['saver']

        logging_file = os.path.join(logdir, 'output.log')
        utils.create_filewrite_handler(logging_file, mode='a')

        logging.info("Continue training.")

        cur_step = core.load_weights(logdir, sess, saver)
        if cur_step is None:
            logging.warning("Loaded global_step is None.")
            logging.warning("This could mean,"
                            " that no weights have been loaded.")
            logging.warning("Starting Training with step 0.")
            cur_step = 0

        with tf.name_scope('Validation'):
            tf.get_variable_scope().reuse_variables()
            image_pl = tf.placeholder(tf.float32)
            image = tf.expand_dims(image_pl, 0)
            image.set_shape([1, None, None, 3])
            inf_out = core.build_inference_graph(hypes, modules, image=image)
            tv_graph['image_pl'] = image_pl
            tv_graph['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, queue, 'train', sess)

        # And then after everything is built, start the training loop.
        run_training(hypes, modules, tv_graph, tv_sess, cur_step)

        # stopping input Threads
        tv_sess['coord'].request_stop()
        tv_sess['coord'].join(tv_sess['threads'])
コード例 #9
0
def load_united_model(logdir):
    subhypes = {}
    subgraph = {}
    submodules = {}
    subqueues = {}

    first_iter = True

    #load the hypes from login directory
    meta_hypes = utils.load_hypes_from_logdir(logdir,
                                              subdir="",
                                              base_path='hypes')
    #for all the models in meta-hypes get the directory of output and input images
    for model in meta_hypes['models']:
        subhypes[model] = utils.load_hypes_from_logdir(logdir, subdir=model)
        hypes = subhypes[model]
        hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
        hypes['dirs']['image_dir'] = meta_hypes['dirs']['image_dir']
        submodules[model] = utils.load_modules_from_logdir(logdir,
                                                           dirname=model,
                                                           postfix=model)

        modules = submodules[model]

    image_pl = tf.placeholder(tf.float32)
    #expand the shape of the array by inserting new axes in 0th positon
    image = tf.expand_dims(image_pl, 0)
    #set the shape of an array
    image.set_shape([1, 384, 1248, 3])
    decoded_logits = {}

    hypes = subhypes['segmentation']
    modules = submodules['segmentation']
    logits = modules['arch'].inference(hypes, image, train=False)
    #for all the models in hypes
    for model in meta_hypes['models']:
        hypes = subhypes[model]  #get the model
        modules = submodules[model]
        # solver- max steps of iteration and batch size and etc
        optimizer = modules['solver']
        #This context manager validates that the given values are from the same graph, makes that graph the default graph,
        #and pushes a name scope in that graph
        with tf.name_scope('Validation_%s' % model):
            reuse = {True: False, False: True}[first_iter]
            #Returns the current variable scope.
            scope = tf.get_variable_scope()
            decoded_logits[model] = modules['objective'].decoder(hypes,
                                                                 logits,
                                                                 train=False)

        first_iter = False
    #using the context manager launch the graph in session
    sess = tf.Session()
    #saves and restores variables
    saver = tf.train.Saver()
    #loads the weights of the model from a HDF5 file
    cur_step = core.load_weights(logdir, sess, saver)

    return meta_hypes, subhypes, submodules, decoded_logits, sess, image_pl
コード例 #10
0
ファイル: seg_net.py プロジェクト: ClovisChen/LearningCNN
 def build_net(self, file_params):
     root_path = file_params.root_path
     logdir = file_params.log_directory
     hypes = tv_utils.load_hypes_from_logdir(root_path,
                                             json_file='dhypes.json')
     self.image_pl = tf.placeholder(tf.float32)
     image = tf.expand_dims(self.image_pl, 0)
     logits = arch.inference(hypes, image, train=False)
     prediction = objective.decoder(hypes, logits, train=False)
     self.sess = tf.Session()
     saver = tf.train.Saver()
     self.sess.run(tf.global_variables_initializer())
     self.sess.run(tf.local_variables_initializer())
     coordinator = tf.train.Coordinator()
     threads = tf.train.start_queue_runners(sess=self.sess,
                                            coord=coordinator)
     tv_core.load_weights(logdir, self.sess, saver)
     self.softmax = prediction['softmax']
コード例 #11
0
def main(_):
    tv_utils.set_gpus_to_use()

    # Download and use weights from the MultiNet Paper
    runs_dir = '.'
    logdir = os.path.join(runs_dir, default_run)

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32, name='input_image')
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)
        tf.identity(prediction['logits'], name='output_logits')
        tf.identity(prediction['softmax'], name='output_softmax')

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

        # Save model again with renamed tensors
        saver.save(sess, 'renamed/KittiSeg_pretrained')

        logging.info("Model saved successfully.")
コード例 #12
0
ファイル: train.py プロジェクト: Candice-X/TensorVision
def continue_training(logdir):
    """
    Continues training of a model.

    This will load model files and weights found in logdir and continues
    an aborted training.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    # append output to output.log
    logging_file = os.path.join(logdir, 'output.log')
    utils.create_filewrite_handler(logging_file, mode='a')

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default() as graph:

        # build the graph based on the loaded modules
        graph_ops = build_training_graph(hypes, modules)
        q = graph_ops[0]

        # prepaire the tv session
        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        if hasattr(objective, 'evaluate'):
            with tf.name_scope('Validation'):
                image_pl, label_pl = _create_input_placeholder()
                image = tf.expand_dims(image_pl, 0)
                softmax = core.build_inference_graph(hypes, modules,
                                                     image=image,
                                                     label=label_pl)

        # Load weights from logdir
        cur_step = core.load_weights(logdir, sess, saver)

        # Start the data load
        _start_enqueuing_threads(hypes, q, sess, data_input)

        # And then after everything is built, start the training loop.
        start_time = time.time()
        for step in xrange(cur_step+1, hypes['solver']['max_steps']):
            start_time = run_training_step(hypes, step, start_time,
                                           graph_ops, sess_coll, objective,
                                           image_pl, softmax)

        # stopping input Threads
        coord.request_stop()
        coord.join(threads)
コード例 #13
0
ファイル: train.py プロジェクト: nicolasbolanos/TensorVision
def continue_training(logdir):
    """
    Continues training of a model.

    This will load model files and weights found in logdir and continues
    an aborted training.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    # append output to output.log
    logging_file = os.path.join(logdir, 'output.log')
    utils.create_filewrite_handler(logging_file, mode='a')

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default() as graph:

        # build the graph based on the loaded modules
        graph_ops = build_training_graph(hypes, modules)
        q = graph_ops[0]

        # prepaire the tv session
        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        if hasattr(objective, 'evaluate'):
            with tf.name_scope('Validation'):
                image_pl, label_pl = _create_input_placeholder()
                image = tf.expand_dims(image_pl, 0)
                softmax = core.build_inference_graph(hypes, modules,
                                                     image=image,
                                                     label=label_pl)

        # Load weights from logdir
        cur_step = core.load_weights(logdir, sess, saver)

        # Start the data load
        _start_enqueuing_threads(hypes, q, sess, data_input)

        # And then after everything is built, start the training loop.
        start_time = time.time()
        for step in xrange(cur_step+1, hypes['solver']['max_steps']):
            start_time = run_training_step(hypes, step, start_time,
                                           graph_ops, sess_coll, modules,
                                           image_pl, softmax)

        # stopping input Threads
        coord.request_stop()
        coord.join(threads)
コード例 #14
0
def do_analyze(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        with tf.name_scope('Validation'):
            image_pl, label_pl = _create_input_placeholder()
            image = tf.expand_dims(image_pl, 0)
            softmax = core.build_inference_graph(hypes, modules,
                                                 image=image,
                                                 label=label_pl)

        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        core.load_weights(logdir, sess, saver)

        eval_dict, images = objective.tensor_eval(hypes, sess, image_pl,
                                                  softmax)

        logging_file = os.path.join(logdir, "eval/analysis.log")
        utils.create_filewrite_handler(logging_file)

        utils.print_eval_dict(eval_dict)
        _write_images_to_logdir(images, logdir)
    return
コード例 #15
0
ファイル: analyze.py プロジェクト: Candice-X/TensorVision
def do_analyze(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        with tf.name_scope('Validation'):
            image_pl, label_pl = _create_input_placeholder()
            image = tf.expand_dims(image_pl, 0)
            softmax = core.build_inference_graph(hypes, modules,
                                                 image=image,
                                                 label=label_pl)

        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        core.load_weights(logdir, sess, saver)

        eval_dict, images = objective.tensor_eval(hypes, sess, image_pl,
                                                  softmax)

        logging_file = os.path.join(logdir, "eval/analysis.log")
        utils.create_filewrite_handler(logging_file)

        utils.print_eval_dict(eval_dict)
        _write_images_to_logdir(images, logdir)
    return
def load_united_model(logdir):
    subhypes = {}
    subgraph = {}
    submodules = {}
    subqueues = {}

    first_iter = True

    meta_hypes = utils.load_hypes_from_logdir(logdir,
                                              subdir="",
                                              base_path='hypes')
    for model in meta_hypes['models']:
        subhypes[model] = utils.load_hypes_from_logdir(logdir, subdir=model)
        hypes = subhypes[model]
        hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
        hypes['dirs']['image_dir'] = meta_hypes['dirs']['image_dir']
        submodules[model] = utils.load_modules_from_logdir(logdir,
                                                           dirname=model,
                                                           postfix=model)

        modules = submodules[model]

    image_pl = tf.placeholder(tf.float32)
    image = tf.expand_dims(image_pl, 0)
    image.set_shape([1, 384, 1248, 3])
    decoded_logits = {}

    hypes = subhypes['segmentation']
    modules = submodules['segmentation']
    logits = modules['arch'].inference(hypes, image, train=False)
    for model in meta_hypes['models']:
        hypes = subhypes[model]
        modules = submodules[model]
        optimizer = modules['solver']

        with tf.name_scope('Validation_%s' % model):
            reuse = {True: False, False: True}[first_iter]

            scope = tf.get_variable_scope()

            decoded_logits[model] = modules['objective'].decoder(hypes,
                                                                 logits,
                                                                 train=False)

        first_iter = False
    sess = tf.Session()
    saver = tf.train.Saver()
    cur_step = core.load_weights(logdir, sess, saver)

    return meta_hypes, subhypes, submodules, decoded_logits, sess, image_pl
コード例 #17
0
def do_analyze(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    logging_file = os.path.join(logdir, "eval/analysis.log")
    utils.create_filewrite_handler(logging_file)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # build the graph based on the loaded modules
        graph_ops = core.build_graph(hypes, modules, train=False)
        q, train_op, loss, eval_lists = graph_ops
        q = graph_ops[0]

        # prepaire the tv session
        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        core.load_weights(logdir, sess, saver)
        # Start the data load
        data_input.start_enqueuing_threads(hypes, q['val'], 'val', sess,
                                           hypes['dirs']['data_dir'])

    return core.do_eval(hypes, eval_lists, 'val', sess)
コード例 #18
0
def load(logdir):
    import tensorflow as tf
    import tensorvision.utils as tv_utils
    import tensorvision.core as core

    tv_utils.set_gpus_to_use()

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32,
                                  shape=(hypes["image_height"],
                                         hypes["image_width"], 3))
        image = tf.expand_dims(image_pl, 0)
        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    return image_pl, prediction, sess, hypes
コード例 #19
0
def model_path():
    tv_utils.set_gpus_to_use()

    # if FLAGS.input_image is None:
    #     logging.error("No input_image was given.")
    #     logging.info(
    #         "Usage: python demo.py --input_image data/test.png "
    #         "[--output_image output_image] [--logdir /path/to/weights] "
    #         "[--gpus GPUs_to_use] ")
    #     exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")
    cv2.namedWindow("visual_image", flags=cv2.WINDOW_FREERATIO)
    while True:
        frame = queue.get()
        cv2.imshow("image", frame)
        #cv2.waitKey(1)
        image = frame
        # logging.info("Starting inference using {} as input".format(input_image))

        # Load and resize input image
        #image = scp.misc.imread(input_image)
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='cubic')

    # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        # Plot the hard prediction as green overlay
        green_image = tv_utils.fast_overlay(image, street_prediction)
        cv2.imshow("visual_image", green_image)
        cv2.waitKey(1)
コード例 #20
0
def load_united_model(logdir):
    subhypes = {}
    subgraph = {}
    submodules = {}
    subqueues = {}

    subgraph['debug_ops'] = {}

    first_iter = True

    meta_hypes = utils.load_hypes_from_logdir(logdir,
                                              subdir="",
                                              base_path='hypes')
    for model in meta_hypes['model_list']:
        subhypes[model] = utils.load_hypes_from_logdir(logdir, subdir=model)
        hypes = subhypes[model]
        hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
        hypes['dirs']['image_dir'] = meta_hypes['dirs']['image_dir']
        hypes['dirs']['data_dir'] = meta_hypes['dirs']['data_dir']
        submodules[model] = utils.load_modules_from_logdir(logdir,
                                                           dirname=model,
                                                           postfix=model)

        modules = submodules[model]

        logging.info("Build %s computation Graph.", model)
        with tf.name_scope("Queues_%s" % model):
            subqueues[model] = modules['input'].create_queues(hypes, 'train')

        logging.info('Building Model: %s' % model)

        subgraph[model] = build_training_graph(hypes, subqueues[model],
                                               modules, first_iter)

        first_iter = False

    if len(meta_hypes['model_list']) == 2:
        _recombine_2_losses(meta_hypes, subgraph, subhypes, submodules)
    else:
        _recombine_3_losses(meta_hypes, subgraph, subhypes, submodules)

    hypes = subhypes[meta_hypes['model_list'][0]]

    tv_sess = core.start_tv_session(hypes)
    sess = tv_sess['sess']
    saver = tv_sess['saver']

    cur_step = core.load_weights(logdir, sess, saver)
    for model in meta_hypes['model_list']:
        hypes = subhypes[model]
        modules = submodules[model]
        optimizer = modules['solver']

        with tf.name_scope('Validation_%s' % model):
            tf.get_variable_scope().reuse_variables()
            image_pl = tf.placeholder(tf.float32)
            image = tf.expand_dims(image_pl, 0)
            inf_out = core.build_inference_graph(hypes, modules, image=image)
            subgraph[model]['image_pl'] = image_pl
            subgraph[model]['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, subqueues[model],
                                                 'train', sess)

    target_file = os.path.join(meta_hypes['dirs']['output_dir'], 'hypes.json')
    with open(target_file, 'w') as outfile:
        json.dump(meta_hypes, outfile, indent=2, sort_keys=True)

    return meta_hypes, subhypes, submodules, subgraph, tv_sess, cur_step
コード例 #21
0
def main(_):
    tv_utils.set_gpus_to_use()
    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')

        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    #create tf graph and build net

    with tf.Graph().as_default():
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        sess = tf.Session()
        saver = tf.train.Saver()

        core.load_weights(logdir, sess, saver)
    input_image = FLAGS.input_image
    #logging.info("start inference using {} as input".format(input_image))

    cap = cv2.VideoCapture(
        0)  #live camera and change as video path if you have save video
    while True:
        #image_bgr = cv2.imread(input_image)
        #b, g, r = cv2.split(image_bgr)  # get b,g,r
        #image = cv2.merge([r, g, b])  # switch it to rgb
        ret, image = cap.read()

        if hypes['jitter']['reseize_image']:
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      inerp='cubic')

        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)

        #reshape
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        rb_image = seg.make_overlay(image, output_image)
        threshold = 0.5

        street_prediction = output_image > threshold

        green_image = tv_utils.fast_overlay(image, street_prediction)
        cv2.imshow('ori', green_image)  #live camera: imshow or video:save
        if cv2.waitKey(25) & 0xFF == ord(
                'q'):  #live camera imshow, delete if occur error in video show
            cv2.destroyAllWindows()
            break
コード例 #22
0
ファイル: demo_images.py プロジェクト: nghiattran/ngnet
def main():
    args = parser.parse_args()
    tv_utils.set_gpus_to_use()
    logdir = args.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32, shape=(hypes["image_height"], hypes["image_width"], 3))
        image = tf.expand_dims(image_pl, 0)
        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    if args.save:
        save_file = args.save
    else:
        save_file = 'video.mp4'

    if os.path.isfile(save_file):
        os.remove(save_file)

    # Get all images to make video
    image_names =  sorted(os.listdir(args.image_dir))[:args.limit]

    if len(image_names) == 0:
        logging.error("No image found in given image_dir.")
        exit(1)

    start = time.time()

    if os.path.isfile(save_file):
        os.remove(save_file)

    logging.info("Making video")
    with imageio.get_writer(save_file, mode='I', fps=args.framerate) as writer:
        for i, image_name in enumerate(image_names):
            input_image = os.path.join(args.image_dir, image_name)
            # logging.info("Starting inference using %s as input %d/%d" % (input_image, i, len(image_names)))

            # Load and resize input image
            oimage = scp.misc.imread(input_image)

            oshape = oimage.shape[:2]
            rh = oshape[0] / float(hypes["image_height"])
            rw = oshape[1] / float(hypes["image_width"])

            image = scp.misc.imresize(oimage, (hypes["image_height"],
                                              hypes["image_width"]),
                                      interp='cubic')
            feed = {image_pl: image}

            # Run KittiBox model on image
            pred_boxes = prediction['pred_boxes_new']
            pred_confidences = prediction['pred_confidences']
            (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes,
                                                             pred_confidences],
                                                            feed_dict=feed)

            # np_pred_boxes[:, :, 0] *= rw
            # np_pred_boxes[:, :, 2] *= rw
            # np_pred_boxes[:, :, 1] *= rh
            # np_pred_boxes[:, :, 3] *= rh

            # Apply non-maximal suppression
            # and draw predictions on the image
            threshold = 0.5
            output_image, rectangles = kittibox_utils.add_rectangles(
                hypes, [image], np_pred_confidences,
                np_pred_boxes, show_removed=False,
                use_stitching=True, rnn_len=1,
                min_conf=threshold, tau=hypes['tau'], color_acc=(0, 255, 0))

            output_image = scp.misc.imresize(output_image, oshape, interp='cubic')
            writer.append_data(output_image)

    time_taken = time.time() - start
    logging.info('Video saved as %s' % save_file)
    logging.info('Number of images: %d' % len(image_names))
    logging.info('Time takes: %.2f s' % (time_taken))
    logging.info('Frequency: %.2f fps' % (len(image_names) / time_taken))
コード例 #23
0
ファイル: demo.py プロジェクト: chen116/KittiBox
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiBox')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    image = scp.misc.imread(input_image)
#     image = scp.misc.imresize(image, (hypes["image_height"], hypes["image_width"]),interp='cubic')
    feed = {image_pl: image}

    # Run KittiBox model on image
    pred_boxes = prediction['pred_boxes_new']
    pred_confidences = prediction['pred_confidences']
    print('asdffffffffffffffffffffffffffffffffffff')
    print(pred_boxes,pred_confidences,hypes["image_height"],hypes["image_width"])

    start = timer()
    for i in range(11):
        if i==1:
            start = timer()
        (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes,pred_confidences],feed_dict=feed)
    end = timer()
    print((end - start)/10 )
    # Apply non-maximal suppression
    # and draw predictions on the image
    output_image, rectangles = kittibox_utils.add_rectangles(
        hypes, [image], np_pred_confidences,
        np_pred_boxes, show_removed=False,
        use_stitching=True, rnn_len=1,
        min_conf=0.50, tau=hypes['tau'], color_acc=(0, 255, 0))

    threshold = 0.5
    accepted_predictions = []
    # removing predictions <= threshold
    for rect in rectangles:
        if rect.score >= threshold:
            accepted_predictions.append(rect)

    print('')
    logging.info("{} Cars detected".format(len(accepted_predictions)))

    # Printing coordinates of predicted rects.
    for i, rect in enumerate(accepted_predictions):
        logging.info("")
        logging.info("Coordinates of Box {}".format(i))
        logging.info("    x1: {}".format(rect.x1))
        logging.info("    x2: {}".format(rect.x2))
        logging.info("    y1: {}".format(rect.y1))
        logging.info("    y2: {}".format(rect.y2))
        logging.info("    Confidence: {}".format(rect.score))

    # save Image
    if FLAGS.output_image is None:
        output_name = input_image.split('.')[0] + '_rects.png'
    else:
        output_name = FLAGS.output_image

    scp.misc.imsave(output_name, output_image)
    logging.info("")
    logging.info("Output image saved to {}".format(output_name))

    logging.info("")
    logging.warning("Do NOT use this Code to evaluate multiple images.")

    logging.warning("Demo.py is **very slow** and designed "
                    "to be a tutorial to show how the KittiBox works.")
    logging.warning("")
    logging.warning("Please see this comment, if you like to apply demo.py to"
                    "multiple images see:")
    logging.warning("https://github.com/MarvinTeichmann/KittiBox/"
                    "issues/15#issuecomment-301800058")
    np.load = np_load_old
    print((end - start)/10 )
コード例 #24
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    image = scp.misc.imread(input_image)
    if hypes['jitter']['reseize_image']:
        # Resize input only, if specified in hypes
        image_height = hypes['jitter']['image_height']
        image_width = hypes['jitter']['image_width']
        image = scp.misc.imresize(image,
                                  size=(image_height, image_width),
                                  interp='cubic')

    # Run KittiSeg model on image
    feed = {image_pl: image}
    softmax = prediction['softmax']
    output = sess.run([softmax], feed_dict=feed)

    # Reshape output from flat vector to 2D Image
    shape = image.shape
    output_image = output[0][:, 1].reshape(shape[0], shape[1])
    print("pixe_value")
    print(output_image[0][1])

    # Plot confidences as red-blue overlay
    rb_image = seg.make_overlay(image, output_image)

    # Accept all pixel with conf >= 0.5 as positive prediction
    # This creates a `hard` prediction result for class street
    threshold = 0.5
    street_prediction = output_image > threshold

    print("predic_val")
    suoyin = np.where(street_prediction == True)
    chang = len(suoyin[0])
    test = np.zeros((chang, 2), dtype=np.int)
    for tmp0 in range(chang):
        test[tmp0][0] = suoyin[0][tmp0]
        test[tmp0][1] = suoyin[1][tmp0]
    print(test[0].shape)
    print(suoyin[0].shape)
    print(len(suoyin[0]))

    # Plot the hard prediction as green overlay
    green_image = tv_utils.fast_overlay(image, street_prediction)

    # Save output images to disk.
    if FLAGS.output_image is None:
        output_base_name = input_image
    else:
        output_base_name = FLAGS.output_image

    raw_image_name = output_base_name.split('.')[0] + '_raw.png'
    rb_image_name = output_base_name.split('.')[0] + '_rb.png'
    green_image_name = output_base_name.split('.')[0] + '_green.png'

    #scp.misc.imsave(raw_image_name, output_image)
    #scp.misc.imsave(rb_image_name, rb_image)
    scp.misc.imsave(green_image_name, green_image)

    logging.info("")
    logging.info("Raw output image has been saved to: {}".format(
        os.path.realpath(raw_image_name)))
    logging.info("Red-Blue overlay of confs have been saved to: {}".format(
        os.path.realpath(rb_image_name)))
    logging.info("Green plot of predictions have been saved to: {}".format(
        os.path.realpath(green_image_name)))
コード例 #25
0
ファイル: trim.py プロジェクト: watsonkm/PruneSeg
def main(_):
    utils.set_gpus_to_use()

    try:
        import tensorvision.train
        import tensorflow_fcn.utils
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    with open(tf.app.flags.FLAGS.hypes, 'r') as f:
        logging.info("f: %s", f)
        hypes = json.load(f)
    utils.load_plugins()

    if 'TV_DIR_RUNS' in os.environ:
        runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
    else:
        runs_dir = 'RUNS'

    utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
    utils._add_paths_to_sys(hypes)
    train.maybe_download_and_extract(hypes)
    maybe_download_and_extract(runs_dir)

    logging.info("Trimming weights.")
    logdir = os.path.join(runs_dir, FLAGS.RUN)
    modules = utils.load_modules_from_hypes(hypes)

    with tf.Graph().as_default():

        # build the graph based on the loaded modules
        with tf.name_scope("Queues"):
            queue = modules['input'].create_queues(hypes, 'train')

        tv_graph = core.build_training_graph(hypes, queue, modules)

        # prepare the tv session
        with tf.Session().as_default():
            tv_sess = core.start_tv_session(hypes)
        sess = tv_sess['sess']
        saver = tv_sess['saver']

        cur_step = core.load_weights(logdir, sess, saver)
        if cur_step is None:
            logging.warning("Loaded global_step is None.")
            logging.warning("This could mean,"
                            " that no weights have been loaded.")
            logging.warning("Starting Training with step 0.")
            cur_step = 0

        with tf.name_scope('Validation'):
            tf.get_variable_scope().reuse_variables()
            image_pl = tf.placeholder(tf.float32)
            image = tf.expand_dims(image_pl, 0)
            image.set_shape([1, None, None, 3])
            inf_out = core.build_inference_graph(hypes, modules, image=image)
            tv_graph['image_pl'] = image_pl
            tv_graph['inf_out'] = inf_out

        # prepaire the tv session
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        image.set_shape([1, None, None, 3])
        inf_out = core.build_inference_graph(hypes, modules, image=image)

        # Create a session for running Ops on the Graph.
        trim_dir = 'RUNS/trimmed'
        shutil.copytree(logdir, trim_dir)
        shutil.copy(tf.app.flags.FLAGS.hypes,
                    os.path.join(trim_dir, 'model_files', 'hypes.json'))
        sess = tf.Session()
        saver = tf.train.Saver()
        core.load_weights(trim_dir, sess, saver)

        for weight in tf.contrib.model_pruning.get_masks():
            if any([
                    layer in weight.name
                    for layer in hypes['layer_pruning']['layers']
            ]):
                weight_value = tv_sess['sess'].run(weight)
                kernel_count = int(weight_value.shape[3] *
                                   hypes['layer_pruning']['layer_sparsity'])

                l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2))
                toss_kernels = l1_values.argsort()[:kernel_count]
                weight_value[:, :, :, toss_kernels] = 0
                assign_op = tf.assign(weight, tf.constant(weight_value))
                tv_sess['sess'].run(assign_op)

        checkpoint_path = os.path.join(trim_dir, 'model.ckpt')
        tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step)

    train.continue_training(trim_dir)
コード例 #26
0
ファイル: train_recog.py プロジェクト: IAMLabUMD/tpami2020
def do_finetuning(hypes):
    """
    Finetune model for a number of steps.

    This finetunes the model for at most hypes['solver']['max_steps'].
    It shows an update every utils.cfg.step_show steps and writes
    the model to hypes['dirs']['output_dir'] every utils.cfg.step_eval
    steps.

    Paramters
    ---------
    hypes : dict
        Hyperparameters
    """
    # Get the sets of images and labels for training, validation, and
    # test on MNIST.

    try:
        import tensorvision.core as core
    except ImportError:
        logging.error("Could not import the submodules.")
        logging.error("Please execute:"
                      "'git submodule update --init --recursive'")
        exit(1)

    modules = utils.load_modules_from_hypes(hypes)

    # set to allocate memory on GPU as needed
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Session(config=config) as sess:
        # with tf.Session() as sess:

        # build the graph based on the loaded modules
        with tf.name_scope("Queues"):
            queue = modules['input'].create_queues(hypes, 'train')

        tv_graph = build_training_graph(hypes, queue, modules)

        # restoring vars
        vars_to_restore = restoring_vars(hypes)

        restorer = tf.train.Saver(vars_to_restore)

        # load pre-trained model of hand segmentation
        logging.info("Loading pretrained model's weights")
        model_dir = hypes['transfer']['model_folder']
        model_file = hypes['transfer']['model_name']
        # DEBUG: check the model file
        # check_model(os.path.join(model_dir, model_file))
        """
        # Get a list of vars to restore
        vars_to_restore = restoring_vars(sess)
        print("vars to restore:", vars_to_restore)
        # Create another Saver for restoring pre-trained vars
        saver = tf.train.Saver(vars_to_restore)
        """
        core.load_weights(model_dir, sess, restorer)
        # load_trained_model(sess, hypes)

        saver = tf.train.Saver(max_to_keep=int(utils.cfg.max_to_keep))

        # prepaire the tv session
        tv_sess = prepare_tv_session(hypes, sess, saver)

        # DEBUG: print weights
        # check_weights(tv_sess['sess'])
        # check_graph(tv_sess['sess'])

        with tf.name_scope('Validation'):
            tf.get_variable_scope().reuse_variables()
            num_channels = hypes['arch']['num_channels']
            image_pl = tf.placeholder(tf.float32, [None, None, num_channels])
            image = tf.expand_dims(image_pl, 0)
            if hypes['jitter']['resize_image']:
                height = hypes['jitter']['image_height']
                width = hypes['jitter']['image_width']
                # set the pre-defined image size here
                image.set_shape([1, height, width, num_channels])

            inf_out = core.build_inference_graph(hypes, modules, image=image)
            tv_graph['image_pl'] = image_pl
            tv_graph['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, queue, 'train', sess)

        # And then after everything is built, start the training loop.
        run_training(hypes, modules, tv_graph, tv_sess)

        # stopping input Threads
        tv_sess['coord'].request_stop()
        tv_sess['coord'].join(tv_sess['threads'])
コード例 #27
0
def main(_):
    tv_utils.set_gpus_to_use()

    # if FLAGS.input_image is None:
    #     logging.error("No input_image was given.")
    #     logging.info(
    #         "Usage: python demo.py --input_image data/test.png "
    #         "[--output_image output_image] [--logdir /path/to/weights] "
    #         "[--gpus GPUs_to_use] ")
    #     exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    dataset = kitti_object(
        os.path.join(ROOT_DIR, 'free-space/dataset/KITTI/object'))

    point_pub = rospy.Publisher('cloud', PointCloud2, queue_size=50)
    # picture_pub = rospy.Publisher("kitti_image",newImage,queue_size=50)
    rospy.init_node('point-cloud', anonymous=True)
    # h = std_msgs.msg.Header()
    # h.frame_id="base_link"
    # h.stamp=rospy.Time.now()
    #rate = rospy.Rate(10)
    #point_msg=PointCloud2()

    video_dir = '/home/user/Data/lrx_work/free-space/kitti.avi'
    fps = 10
    num = 4541
    img_size = (1241, 376)
    fourcc = 'mp4v'
    videoWriter = cv2.VideoWriter(video_dir, cv2.VideoWriter_fourcc(*fourcc),
                                  fps, img_size)

    calib = dataset.get_calibration(0)
    for data_idx in range(len(dataset)):
        #objects = dataset.get_label_objects(data_idx)

        # Load and resize input image
        image = dataset.get_image(data_idx)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #scp.misc.imsave('new.png', image)
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='cubic')
        img_height, img_width, img_channel = image.shape
        print("picture-shape")
        print(len(image))
        print(len(image[0]))
        pc_velo = dataset.get_lidar(data_idx)[:, 0:3]
        print(len(pc_velo))
        velo_len = len(pc_velo)
        #calib = dataset.get_calibration(data_idx)

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)
        # scp.misc.imsave('new0.png', rb_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        index = np.where(street_prediction == True)
        chang = len(index[0])
        print(chang)
        # test = np.zeros((velo_len,2),dtype=np.int)
        # for tmp0 in range(chang):
        #     test[tmp0][0]=index[0][tmp0]
        #     test[tmp0][1]=index[1][tmp0]
        print("suoyindayin")
        # if (chang>0):
        #     print(test[0][0])
        #     print(test[0][1])

        pts_2d = calib.project_velo_to_image(pc_velo)
        print(pts_2d.shape)
        # print(pts_2d[1][0])
        # print(pts_2d[1][1])

        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        fov_inds = (pts_2d[:,0]<1242) & (pts_2d[:,0]>=0) & \
            (pts_2d[:,1]<370) & (pts_2d[:,1]>=0)
        print(fov_inds.shape)
        # print(fov_inds[1000])
        # print(pc_velo.shape)
        print("okok")
        fov_inds = fov_inds & (pc_velo[:, 0] > 0)
        print(fov_inds.shape)
        imgfov_pts_2d = pts_2d[fov_inds, :]
        imgfov_pc_velo = pc_velo[fov_inds, :]
        pts_2d0 = calib.project_velo_to_image(imgfov_pc_velo)
        fov_inds0 = (pts_2d0[:,0]<len(image[0])) & (pts_2d0[:,0]>=0) & \
            (pts_2d0[:,1]<len(image)) & (pts_2d0[:,1]>=0)
        fov_inds0 = fov_inds0 & (imgfov_pc_velo[:, 0] > 2.0)
        print(fov_inds0.shape)
        print(street_prediction.shape)
        print(pts_2d0.shape)
        # if(chang>0):
        #     print(int(imgfov_pts_2d[5,0]))
        #     print(int(imgfov_pts_2d[5,1]))
        #     print(street_prediction[int(imgfov_pts_2d[5,1]),int(imgfov_pts_2d[5,0])])

        if (chang > 0):
            for i in range(len(fov_inds0)):
                if ((pts_2d0[i, 1] < len(street_prediction)) &
                    (pts_2d0[i, 0] < len(street_prediction[0]))):
                    fov_inds0[i] = fov_inds0[i] & (
                        street_prediction[int(pts_2d0[i, 1]),
                                          int(pts_2d0[i, 0])] == True)
        imgfov_pc_velo0 = imgfov_pc_velo[fov_inds0, :]
        print("number")
        green_image = tv_utils.fast_overlay(image, street_prediction)
        # pub point-cloud topic
        print(imgfov_pc_velo0.shape)
        number = len(imgfov_pc_velo0)

        header = std_msgs.msg.Header()
        header.stamp = rospy.Time.now()
        header.frame_id = "velodyne"
        points = pc2.create_cloud_xyz32(header, imgfov_pc_velo0)

        # point=Point()

        # for t in range(0,number):
        #     point_x=imgfov_pc_velo0[t][0]
        #     point_y=imgfov_pc_velo0[t][1]
        #     point_z=imgfov_pc_velo0[t][2]
        #     point_msg.points[t].point.x=point_x
        #     point_msg.points[t].point.y=point_y
        #     point_msg.points[t].point.z=point_z

        # point_pub.publish(points)
        # videoWriter.write(green_image)

        # bridge=CvBridge()
        # picture_pub.publish(bridge.cv2_to_imgmsg(green_image,"rgb8"))

        # minx=imgfov_pc_velo0[0][0]
        # miny=imgfov_pc_velo0[0][1]
        # minz=imgfov_pc_velo0[0][2]
        # maxx=imgfov_pc_velo0[0][0]
        # maxy=imgfov_pc_velo0[0][1]
        # maxz=imgfov_pc_velo0[0][2]

        # for t in range(len(imgfov_pc_velo0)):
        #     minx=min(minx,imgfov_pc_velo0[t][0])
        #     miny=min(miny,imgfov_pc_velo0[t][1])
        #     minz=min(minz,imgfov_pc_velo0[t][2])
        #     maxx=max(maxx,imgfov_pc_velo0[t][0])
        #     maxy=max(maxy,imgfov_pc_velo0[t][1])
        #     maxz=max(maxz,imgfov_pc_velo0[t][2])
        # print(minx,miny,minz,maxx,maxy,maxz)
        # width=1024
        # height=1024
        # img_res=np.zeros([width,height,3],dtype=np.uint8)
        # for p in range(len(imgfov_pc_velo0)):
        #     velo_x=5*(int(imgfov_pc_velo0[p][0])+50)
        #     velo_y=5*(int(imgfov_pc_velo0[p][1])+50)
        #     img_res[velo_x][velo_y]=255
        #     scale=25
        #     if((velo_x>scale)&(velo_x+scale<1024)&(velo_y>scale)&(velo_y+scale<1024)):
        #         for q in range(scale):
        #             for m in range(scale):
        #                 img_res[velo_x-q][velo_y-m]=255
        #                 img_res[velo_x-q][velo_y+m]=255
        #                 img_res[velo_x+q][velo_y-m]=255
        #                 img_res[velo_x+q][velo_y+m]=255
        # scp.misc.imsave('res.png',img_res)

        draw_lidar(imgfov_pc_velo0, fig=fig)

        # for obj in objects:
        #     if obj.type == 'DontCare': continue
        #     # Draw 3d bounding box
        #     box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        #     box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        #     # Draw heading arrow
        #     ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
        #     ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        #     x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        #     x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        #     draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
        #     mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.5, 0.5, 0.5),
        #                 tube_radius=None, line_width=1, figure=fig)

        #mlab.show(1)

        # Plot the hard prediction as green overlay

        # Save output images to disk.
        if FLAGS.output_image is None:
            output_base_name = image
        else:
            output_base_name = FLAGS.output_image

        # raw_image_name = output_base_name.split('.')[0] + '_raw.png'
        # rb_image_name = output_base_name.split('.')[0] + '_rb.png'
        # green_image_name = output_base_name.split('.')[0] + '_green.png'

        # scp.misc.imsave('1.png', output_image)
        # scp.misc.imsave('2.png', rb_image)
        # scp.misc.imsave('3.png', green_image)
        raw_input()
コード例 #28
0
def main(_):
    tv_utils.set_gpus_to_use()

    if (FLAGS.input_image is None) and (FLAGS.output_image is None):
        logging.error("No input_image or output_image were given.")
        logging.info(
            "Usage: python demo_modified.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    # Load and resize input image
    input_image = FLAGS.input_image
    input_images = open(input_image, 'r').read().splitlines()
    # Run KittiSeg model on image
    n = 0
    for list_images in input_images:
        if (n % 3) == 0:
            image = scp.misc.imread(list_images)
            name_split = list_images.split('/')
            image_name = name_split[len(name_split) - 1].split('.')[0]
            if hypes['jitter']['reseize_image']:
                # Resize input only, if specified in hypes
                image_height = hypes['jitter']['image_height']
                image_width = hypes['jitter']['image_width']
                image = scp.misc.imresize(image,
                                          size=(image_height, image_width),
                                          interp='cubic')

            feed = {image_pl: image}
            softmax = prediction['softmax']
            #time of net foward
            start_time = timeit.default_timer()
            output = sess.run([softmax], feed_dict=feed)
            elapsed = timeit.default_timer() - start_time
            print(elapsed)

            #exit(1)
            #feed = {image_pl: image}
            #softmax = prediction['softmax']
            #output = sess.run([softmax], feed_dict=feed)

            # Reshape output from flat vector to 2D Image
            shape = image.shape
            output_image = output[0][:, 1].reshape(shape[0], shape[1])

            # Plot confidences as red-blue overlay
            rb_image = seg.make_overlay(image, output_image)

            # Accept all pixel with conf >= 0.5 as positive prediction
            # This creates a `hard` prediction result for class street
            threshold = 0.01
            street_prediction = output_image > threshold

            # Plot the hard prediction as green overlay
            green_image = tv_utils.fast_overlay(image, street_prediction)

            # Save output images to disk.
            output_base_name = FLAGS.output_image

            raw_image_name = output_base_name + image_name + '_raw.png'
            #rb_image_name = output_base_name + image_name + '_rb.png'
            green_image_name = output_base_name + image_name + '_green.png'
            scp.misc.imsave(raw_image_name, output_image)
            #scp.misc.imsave(rb_image_name, rb_image)
            scp.misc.imsave(green_image_name, green_image)

            logging.info("")
            logging.info("Raw output image has been saved to: {}".format(
                os.path.realpath(raw_image_name)))
            #logging.info("Red-Blue overlay of confs have been saved to: {}".format(
            #   os.path.realpath(rb_image_name)))
            logging.info(
                "Green plot of predictions have been saved to: {}".format(
                    os.path.realpath(green_image_name)))
        n = n + 1
コード例 #29
0
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
 #   config.gpu_options.per_process_gpu_memory_fraction = 0.7

    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")
    num_classes = 5

    # Create tf graph and build module.
    with tf.Graph().as_default():




        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=config)
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    #Dealing with video segmentation with frame by frame
    #TODO: build a commandline
    loaded_video = skvideo.io.vread('t1.avi')[:100]
    writer = skvideo.io.FFmpegWriter("outputvideo.avi")
    for image in loaded_video:
        # Load and resize input image
        if hypes['jitter']['reseize_image']:
                # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image, size=(image_height, image_width),
                                                                  interp='cubic')

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run(softmax, feed_dict=feed)

        print(len(output), type(output), output.shape)
        # Reshape output from flat vector to 2D Image
        output = np.transpose(output)
        shape = image.shape
        output = output.reshape(num_classes, shape[0], shape[1])
        output_image = output[0].reshape(shape[0], shape[1])
       
        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold
        street_predictions = output > threshold
        # Plot the hard prediction as green overlay
        green_image = tv_utils.fast_overlay(image, street_prediction)

        green_images = []
        rb_images = []
        output_images = []
        for c in range(0,5):          
            green_images.append(tv_utils.fast_overlay(image, street_predictions[c]))
            rb_images.append(seg.make_overlay(image, output[c]))
            output_images.append(output[c].reshape(shape[0], shape[1]))

        # Save output images to disk.
        if FLAGS.output_image is None:
            output_base_name = input_image
        else:
            output_base_name = FLAGS.output_image

        #Name and save the the red blue segmentation for first 5 frames as png to debug.
        green_image_names = []
        rb_image_names = []
        raw_image_names = []
        for c in range(1,6):
            green_image_names.append(output_base_name.split('.')[0] + str(c) + '_green.png')
            rb_image_names.append(output_base_name.split('.')[0] + str(c) + '_rb.png')
            raw_image_names.append(output_base_name.split('.')[0] + str(c) + '_raw.png')
  
        for c in range(0,5):
            print(green_image_names[c], green_images[c].shape)
            scp.misc.imsave(raw_image_names[c], output_images[c])
            scp.misc.imsave(rb_image_names[c], rb_images[c])
            scp.misc.imsave(green_image_names[c], green_images[c])

        #Output the green masked video as a file and show it to screen
        writer.writeFrame(green_images[4])
        cv2.imshow('frame', green_images[4])

        #user can press p to quit during processing.
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    writer.close()
コード例 #30
0
ファイル: run.py プロジェクト: zzhang87/KittiSeg
def main(_):
    tv_utils.set_gpus_to_use()

    # if FLAGS.input_image is None:
    #     logging.error("No input_image was given.")
    #     logging.info(
    #         "Usage: python demo.py --input_image data/test.png "
    #         "[--output_image output_image] [--logdir /path/to/weights] "
    #         "[--gpus GPUs_to_use] ")
    #     exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        image.set_shape([1, None, None, 3])

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    count = 1
    duration = 0
    video = cv2.VideoCapture(
        "/home/zzhang52/Data/south_farm/front/VID_20170426_144800.mp4")
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    writer = cv2.VideoWriter("/home/zzhang52/Videos/visual_nav.avi", fourcc,
                             12.0, (960, 540))

    while video.grab():
        start = time()
        _, image = video.retrieve()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='bilinear')

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)
        duration += time() - start
        count += 1

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        # rb_image = seg.make_overlay(image, output_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        thresh = (255 * street_prediction).astype(np.uint8)

        _, thresh = cv2.threshold(thresh, 100, 255, 0)

        _, contours, _ = cv2.findContours(thresh,
                                          mode=cv2.RETR_EXTERNAL,
                                          method=cv2.CHAIN_APPROX_NONE)

        contours.sort(key=cv2.contourArea, reverse=True)
        mask = np.zeros(shape, np.uint8)
        cv2.drawContours(mask, contours, 0, (0, 255, 0), -1)

        cnt = np.asarray(contours[0])
        cnt = np.squeeze(cnt)
        cnt = cnt[np.argsort(cnt[:, 1])]
        cnt = np.expand_dims(cnt, axis=1)

        points = []
        idx = 0
        x_sum = 0
        x_count = 0

        for i in range(shape[0]):
            while idx < cnt.shape[0] and cnt[idx][0][1] == i:
                x_sum += float(cnt[idx][0][0])
                x_count += 1
                idx += 1

            if x_count != 0:
                avg = int(x_sum / x_count)
                points.append(np.array([[avg, i]]))
                x_sum = 0
                x_count = 0

        points = np.asarray(points)

        # p1, p2, [vx,vy,x,y] = fitLine(points, shape[0], shape[1])

        # [vx,vy,x,y] = cv2.fitLine(points, cv2.DIST_L2,0,0.01,0.01)
        # lefty = int((-x*vy/vx) + y)
        # righty = int(((shape[1]-x)*vy/vx)+y)

        p1 = Point(points[0][0][0], points[0][0][1])
        p2 = Point(points[-1][0][0], points[-1][0][1])

        dx = float(p1.x - p2.x)
        dy = float(p1.y - p2.y)
        r = np.arctan(dx / dy) * 180 / np.pi
        t = float(shape[1] / 2 - p2.x) / shape[1]
        # r = np.sign(r) * (90 - abs(r))

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        alpha = 0.3
        overlay = cv2.addWeighted(image, 1 - alpha, mask, alpha, 0)

        # Plot the hard prediction as green overlay
        # green_image = tv_utils.fast_overlay(image, street_prediction)
        # green_image = cv2.cvtColor(green_image, cv2.COLOR_RGB2BGR)
        cv2.line(overlay, (p1.x, p1.y), (p2.x, p2.y), (0, 0, 200), 2)
        cv2.putText(overlay, "Heading: {:.2f}".format(r), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
        cv2.putText(overlay, "Offset: {:.3e}".format(t), (10, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
        cv2.imshow("show", overlay)
        writer.write(overlay)
        if cv2.waitKey(15) == 27:
            break

    cv2.destroyAllWindows()
    video.release()
    writer.release()

    # Save output images to disk.
    # if FLAGS.output_image is None:
    #     output_base_name = input_image
    # else:
    #     output_base_name = FLAGS.output_image

    # raw_image_name = output_base_name.split('.')[0] + '_raw.png'
    # rb_image_name = output_base_name.split('.')[0] + '_rb.png'
    # green_image_name = output_base_name.split('.')[0] + '_green.png'

    # scp.misc.imsave(raw_image_name, street_prediction)
    # scp.misc.imsave(rb_image_name, rb_image)
    # scp.misc.imsave(green_image_name, green_image)

    # logging.info("")
    # logging.info("Raw output image has been saved to: {}".format(
    #     os.path.realpath(raw_image_name)))
    # logging.info("Red-Blue overlay of confs have been saved to: {}".format(
    #     os.path.realpath(rb_image_name)))
    # logging.info("Green plot of predictions have been saved to: {}".format(
    #     os.path.realpath(green_image_name)))

    print("FPS: {}".format(count / duration))
コード例 #31
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiBox')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for inputclassLabel
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # yang.
        image.set_shape([1, None, None, 3])
        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))
    # yang.
    for img in os.listdir(input_image):
        # print(input_image)
    # filelist = ['100k/train/a9c4268f-b97f4325.jpg','100k/train/1f416cf1-10a9e2ed.jpg','100k/train/4d22354a-6ce691da.jpg','100k/train/2786c637-b689c08a.jpg','100k/train/1c56c68b-b520ae6d.jpg','100k/train/7170cba6-dd72fe05.jpg','100k/train/71cf580c-d819c189.jpg','100k/train/70b5dbac-87ba4de7.jpg','100k/train/219da1a7-030143c1.jpg','100k/train/2f45c868-4399445f.jpg']
    # for img in filelist:
    #if img.split('.')[-1] == 'png':
    #if img.split('.')[-1] == 'png':
# Load and resize input image
        # image = scp.misc.imread('/home/yang/home/yang/MultiNet/submodules/KittiBox/DATA/KittiBox/' + img)
        image = scp.misc.imread(input_image + img)
       # if hypes["resize_image"]:
        #    image = scp.misc.imresize(image, (hypes["image_height"],hypes["image_width"]),interp='cubic')
        image = image[8:,:,:]
        # print(image.shape)
        # break
        # image = scp.misc.imread(input_image)
        
        feed = {image_pl: image}

        # Run KittiBox model on image
        pred_boxes = prediction['pred_boxes']
        pred_confidences = prediction['pred_confidences']
        # yang.
        pred_logits = prediction['pred_logits']
        # print(pred_boxes,pred_confidences,pred_logits)
        # yang.
        #print('----------------------demo.time------------------------')
        t0 = time.time()
        # (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes,
        #                                                 pred_confidences],
        #                                                 feed_dict=feed)
        (np_pred_boxes, np_pred_confidences, np_pred_logits) = sess.run([pred_boxes,
                                                        pred_confidences,
                                                        pred_logits],
                                                        feed_dict=feed)
        # print(np_pred_boxes,np_pred_confidences,np_pred_logits)
        print(time.time() - t0)
        # # print('--------------------------logits-----------------------')
        # print('-------------------------------------------------------')
        # print(np_pred_confidences)
        # # print(np_pred_logits)
        # print('-------------------------------------------------------')
        # break
        #print('-------------------------------------------------------')

        # Apply non-maximal suppression
        # and draw predictions on the image
        # yang.
        # print(pred_logits.shape)
        # break
        # output_image, rectangles = kittibox_utils.add_rectangles(
        #     hypes, [image], np_pred_confidences,
        #     np_pred_boxes, np_pred_logits, show_removed=False,
        #     use_stitching=True, rnn_len=1,
        #     min_conf=0.50, tau=hypes['tau'], color_acc=(0, 255, 0))
        
        output_image, rectangles = kittibox_utils.add_rectangles(
            hypes, [image], np_pred_confidences,
            np_pred_boxes, show_removed=False,
            use_stitching=True, rnn_len=1,
            min_conf=0.75, tau=hypes['tau'], color_acc=(0, 255, 0))

        threshold = 0.75
        accepted_predictions = []
        # removing predictions <= threshold
        for rect in rectangles:
            if rect.score >= threshold:
                accepted_predictions.append(rect)

        print('')
        logging.info("{} Cars detected".format(len(accepted_predictions)))

        # Printing coordinates of predicted rects.
        for i, rect in enumerate(accepted_predictions):
            logging.info("")
            logging.info("Coordinates of Box {}".format(i))
            logging.info("    class: {}".format(rect.classID))
            logging.info("    x1: {}".format(rect.x1))
            logging.info("    x2: {}".format(rect.x2))
            logging.info("    y1: {}".format(rect.y1))
            logging.info("    y2: {}".format(rect.y2))
            logging.info("    Confidence: {}".format(rect.score))
            savedata(rect,img,FLAGS.output_image)

        # save Image
        output_name = os.path.join(FLAGS.output_image, img.split('.')[0] + '_rects.jpg')
        #output_image = scp.misc.imresize(output_image, (512,640),interp='cubic')
#        scp.misc.imsave(output_name, output_image)
        logging.info("")
        logging.info("Output image saved to {}".format(output_name))
コード例 #32
0
ファイル: train.py プロジェクト: TensorVision/TensorVision
def continue_training(logdir):
    """
    Continues training of a model.

    This will load model files and weights found in logdir and continues
    an aborted training.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Session() as sess:

        # build the graph based on the loaded modules
        with tf.name_scope("Queues"):
            queue = modules['input'].create_queues(hypes, 'train')

        tv_graph = core.build_training_graph(hypes, queue, modules)

        # prepaire the tv session
        tv_sess = core.start_tv_session(hypes)
        sess = tv_sess['sess']
        saver = tv_sess['saver']

        logging_file = os.path.join(logdir, 'output.log')
        utils.create_filewrite_handler(logging_file, mode='a')

        logging.info("Continue training.")

        cur_step = core.load_weights(logdir, sess, saver)
        if cur_step is None:
            logging.warning("Loaded global_step is None.")
            logging.warning("This could mean,"
                            " that no weights have been loaded.")
            logging.warning("Starting Training with step 0.")
            cur_step = 0

        with tf.name_scope('Validation'):
            tf.get_variable_scope().reuse_variables()
            image_pl = tf.placeholder(tf.float32)
            image = tf.expand_dims(image_pl, 0)
            image.set_shape([1, None, None, 3])
            inf_out = core.build_inference_graph(hypes, modules,
                                                 image=image)
            tv_graph['image_pl'] = image_pl
            tv_graph['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, queue, 'train', sess)

        # And then after everything is built, start the training loop.
        run_training(hypes, modules, tv_graph, tv_sess, cur_step)

        # stopping input Threads
        tv_sess['coord'].request_stop()
        tv_sess['coord'].join(tv_sess['threads'])
コード例 #33
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    image = scp.misc.imread(input_image)
    if hypes['jitter']['reseize_image']:
        # Resize input only, if specified in hypes
        image_height = hypes['jitter']['image_height']
        image_width = hypes['jitter']['image_width']
        image = scp.misc.imresize(image,
                                  size=(image_height, image_width),
                                  interp='cubic')

    # classes
    classes_colors = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
                      [0, 0, 128], [128, 0, 128], [0, 128,
                                                   128], [128, 128, 128],
                      [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
                      [64, 0, 128], [192, 0, 128], [64, 128, 128],
                      [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0],
                      [128, 192, 0], [0, 64, 128]]

    # Run KittiSeg model on image
    feed = {image_pl: image}
    softmax = prediction['softmax']
    output = sess.run(softmax, feed_dict=feed)

    # Reshape output from flat vector to 2D Image
    shape = image.shape
    output_image = output.reshape(shape[0], shape[1], -1)

    x = np.argmax(output_image, axis=2)
    im = np.zeros((shape[0], shape[1], 3), dtype=np.uint8)
    for i, _ in enumerate(x):
        for j, _ in enumerate(x[i]):
            value = x[i][j]
            color_code = classes_colors[value]
            im[i][j] = color_code

    # Save output images to disk.
    if FLAGS.output_image is None:
        output_base_name = input_image
    else:
        output_base_name = FLAGS.output_image

    raw_image_name = output_base_name.split('.')[0] + '_raw.png'

    scp.misc.imsave(raw_image_name, im)
コード例 #34
0
def main(_):
    tv_utils.set_gpus_to_use()

    if FLAGS.input_image is None:
        logging.error("No input_image was given.")
        logging.info(
            "Usage: python demo.py --input_image data/test.png "
            "[--output_image output_image] [--logdir /path/to/weights] "
            "[--gpus GPUs_to_use] ")
        exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
                                    'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules,
                                                image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    input_image = FLAGS.input_image
    logging.info("Starting inference using {} as input".format(input_image))

    # Load and resize input image
    image = scp.misc.imread(input_image)
    if hypes['jitter']['reseize_image']:
        # Resize input only, if specified in hypes
        image_height = hypes['jitter']['image_height']
        image_width = hypes['jitter']['image_width']
        image = scp.misc.imresize(image, size=(image_height, image_width),
                                  interp='cubic')

    # Run KittiSeg model on image
    feed = {image_pl: image}
    softmax = prediction['softmax']
    output = sess.run([softmax], feed_dict=feed)

    # Reshape output from flat vector to 2D Image
    shape = image.shape
    output_image = output[0][:, 1].reshape(shape[0], shape[1])

    # Plot confidences as red-blue overlay
    rb_image = seg.make_overlay(image, output_image)

    # Accept all pixel with conf >= 0.5 as positive prediction
    # This creates a `hard` prediction result for class street
    threshold = 0.5
    street_prediction = output_image > threshold

    # Plot the hard prediction as green overlay
    green_image = tv_utils.fast_overlay(image, street_prediction)

    # Save output images to disk.
    if FLAGS.output_image is None:
        output_base_name = input_image
    else:
        output_base_name = FLAGS.output_image

    raw_image_name = output_base_name.split('.')[0] + '_raw.png'
    rb_image_name = output_base_name.split('.')[0] + '_rb.png'
    green_image_name = output_base_name.split('.')[0] + '_green.png'

    scp.misc.imsave(raw_image_name, output_image)
    scp.misc.imsave(rb_image_name, rb_image)
    scp.misc.imsave(green_image_name, green_image)

    logging.info("")
    logging.info("Raw output image has been saved to: {}".format(
        os.path.realpath(raw_image_name)))
    logging.info("Red-Blue overlay of confs have been saved to: {}".format(
        os.path.realpath(rb_image_name)))
    logging.info("Green plot of predictions have been saved to: {}".format(
        os.path.realpath(green_image_name)))

    logging.info("")
    logging.warning("Do NOT use this Code to evaluate multiple images.")

    logging.warning("Demo.py is **very slow** and designed "
                    "to be a tutorial to show how the KittiSeg works.")
    logging.warning("")
    logging.warning("Please see this comment, if you like to apply demo.py to"
                    "multiple images see:")
    logging.warning("https://github.com/MarvinTeichmann/KittiBox/"
                    "issues/15#issuecomment-301800058")
コード例 #35
0
ファイル: train.py プロジェクト: shobi22/MultiNet
def load_united_model(logdir):
    subhypes = {}
    subgraph = {}
    submodules = {}
    subqueues = {}

    subgraph['debug_ops'] = {}

    first_iter = True
    #load the hypes
    meta_hypes = utils.load_hypes_from_logdir(logdir, subdir="",
                                              base_path='hypes')
    #get the models from model_list in hypes
    for model in meta_hypes['model_list']:
        subhypes[model] = utils.load_hypes_from_logdir(logdir, subdir=model)
        hypes = subhypes[model]
        #get the output directory
        hypes['dirs']['output_dir'] = meta_hypes['dirs']['output_dir']
        #image input directory
        hypes['dirs']['image_dir'] = meta_hypes['dirs']['image_dir']
        #training data directory
        hypes['dirs']['data_dir'] = meta_hypes['dirs']['data_dir']
        submodules[model] = utils.load_modules_from_logdir(logdir,
                                                           dirname=model,
                                                           postfix=model)

        modules = submodules[model]

        logging.info("Build %s computation Graph.", model)
        #build the computational graph
        with tf.name_scope("Queues_%s" % model):
            subqueues[model] = modules['input'].create_queues(hypes, 'train')

        logging.info('Building Model: %s' % model)
        #build the model
        subgraph[model] = build_training_graph(hypes,
                                               subqueues[model],
                                               modules,
                                               first_iter)

        first_iter = False
    #if model list is having detection and segmentation
    if len(meta_hypes['model_list']) == 2:
        #call the function to calculate the losses
        _recombine_2_losses(meta_hypes, subgraph, subhypes, submodules)
    else:
        _recombine_3_losses(meta_hypes, subgraph, subhypes, submodules)

    hypes = subhypes[meta_hypes['model_list'][0]]
    #using the context manager launch the graph in session
    tv_sess = core.start_tv_session(hypes)
    sess = tv_sess['sess']
    saver = tv_sess['saver']
    #load weights
    cur_step = core.load_weights(logdir, sess, saver)
    #for each of the models in model list expand the image dimension
    for model in meta_hypes['model_list']:
        hypes = subhypes[model]
        modules = submodules[model]
        optimizer = modules['solver']

        with tf.name_scope('Validation_%s' % model):
            tf.get_variable_scope().reuse_variables()
            #returns the tensor that may be used as handle for feeding a value
            image_pl = tf.placeholder(tf.float32)
            #expand the shape of the array by inserting new axes in 0th positon
            image = tf.expand_dims(image_pl, 0)
            inf_out = core.build_inference_graph(hypes, modules,
                                                 image=image)
            subgraph[model]['image_pl'] = image_pl
            subgraph[model]['inf_out'] = inf_out

        # Start the data load
        modules['input'].start_enqueuing_threads(hypes, subqueues[model],
                                                 'train', sess)

    target_file = os.path.join(meta_hypes['dirs']['output_dir'], 'hypes.json')
    with open(target_file, 'w') as outfile:
        json.dump(meta_hypes, outfile, indent=2, sort_keys=True)

    return meta_hypes, subhypes, submodules, subgraph, tv_sess, cur_step