Esempio n. 1
0
def _do_python_evaluation(hypes, step, sess_coll, objective,
                          image_pl, softmax):
    logging.info('Doing Python Evaluation.')
    sess, saver, summary_op, summary_writer, coord, threads = sess_coll
    eval_dict, images = objective.evaluate(hypes, sess, image_pl, softmax)

    utils.print_eval_dict(eval_dict)
    _write_eval_dict_to_summary(eval_dict, summary_writer, step)
    _write_images_to_summary(images, summary_writer, step)

    return
Esempio n. 2
0
def _do_python_evaluation(hypes, step, sess_coll, objective,
                          image_pl, softmax):
    logging.info('Doing Python Evaluation.')
    sess, saver, summary_op, summary_writer, coord, threads = sess_coll
    eval_dict, images = objective.evaluate(hypes, sess, image_pl, softmax)

    utils.print_eval_dict(eval_dict)
    _write_eval_dict_to_summary(eval_dict, summary_writer, step)
    _write_images_to_summary(images, summary_writer, step)

    return
Esempio n. 3
0
def do_analyze(logdir, base_path=None):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)

    if base_path is not None:
        hypes['dirs']['base_path'] = base_path

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)
        image.set_shape([1, None, None, 3])
        inf_out = core.build_inference_graph(hypes, modules,
                                             image=image)

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        core.load_weights(logdir, sess, saver)

        logging.info("Graph loaded succesfully. Starting evaluation.")

        output_dir = os.path.join(logdir, 'analyse')

        logging.info("Output Images will be written to: {}".format(
            os.path.join(output_dir, "images/")))

        logging_file = os.path.join(logdir, "analyse/output.log")
        utils.create_filewrite_handler(logging_file)

        eval_dict, images = modules['eval'].evaluate(
            hypes, sess, image_pl, inf_out)

        logging.info("Evaluation Succesfull. Results:")

        utils.print_eval_dict(eval_dict)
        _write_images_to_logdir(images, output_dir)
Esempio n. 4
0
def run_evaling(hypes, modules, tv_graph, tv_sess, start_step=0):
    """Run one iteration of training."""
    # Unpack operations for later use
    summary = tf.Summary()
    sess = tv_sess['sess']
    summary_writer = tv_sess['writer']

    solver = modules['solver']

    display_iter = hypes['logging']['display_iter']
    write_iter = hypes['logging'].get('write_iter', 5 * display_iter)
    eval_iter = hypes['logging']['eval_iter']
    save_iter = hypes['logging']['save_iter']
    image_iter = hypes['logging'].get('image_iter', 5 * save_iter)
    py_smoother = MedianSmoother(20)
    step = 0

    # Run the training Step

    logging.info('Running Evaluation Script.')

    eval_dict, images = modules['eval'].evaluate(hypes, sess,
                                                 tv_graph['image_pl'],
                                                 tv_graph['calib_pl'],
                                                 tv_graph['xy_scale_pl'],
                                                 tv_graph['inf_out'])

    #_write_images_to_summary(images, summary_writer, step)
    logging.info("Evaluation Finished. All results will be saved to:")
    logging.info(hypes['dirs']['output_dir'])

    logging.info('Raw Results:')
    utils.print_eval_dict(eval_dict, prefix='(raw)   ')
    _write_eval_dict_to_summary(eval_dict, 'Evaluation/raw', summary_writer,
                                step)

    logging.info('Smooth Results:')
    names, res = zip(*eval_dict)
    smoothed = py_smoother.update_weights(res)
    eval_dict = zip(names, smoothed)
    utils.print_eval_dict(eval_dict, prefix='(smooth)')
    _write_eval_dict_to_summary(eval_dict, 'Evaluation/smoothed',
                                summary_writer, step)
Esempio n. 5
0
def do_analyze(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        with tf.name_scope('Validation'):
            image_pl, label_pl = _create_input_placeholder()
            image = tf.expand_dims(image_pl, 0)
            softmax = core.build_inference_graph(hypes, modules,
                                                 image=image,
                                                 label=label_pl)

        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        core.load_weights(logdir, sess, saver)

        eval_dict, images = objective.tensor_eval(hypes, sess, image_pl,
                                                  softmax)

        logging_file = os.path.join(logdir, "eval/analysis.log")
        utils.create_filewrite_handler(logging_file)

        utils.print_eval_dict(eval_dict)
        _write_images_to_logdir(images, logdir)
    return
Esempio n. 6
0
def do_analyze(logdir):
    """
    Analyze a trained model.

    This will load model files and weights found in logdir and run a basic
    analysis.

    Parameters
    ----------
    logdir : string
        Directory with logs.
    """
    hypes = utils.load_hypes_from_logdir(logdir)
    modules = utils.load_modules_from_logdir(logdir)
    data_input, arch, objective, solver = modules

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        # prepaire the tv session

        with tf.name_scope('Validation'):
            image_pl, label_pl = _create_input_placeholder()
            image = tf.expand_dims(image_pl, 0)
            softmax = core.build_inference_graph(hypes, modules,
                                                 image=image,
                                                 label=label_pl)

        sess_coll = core.start_tv_session(hypes)
        sess, saver, summary_op, summary_writer, coord, threads = sess_coll

        core.load_weights(logdir, sess, saver)

        eval_dict, images = objective.tensor_eval(hypes, sess, image_pl,
                                                  softmax)

        logging_file = os.path.join(logdir, "eval/analysis.log")
        utils.create_filewrite_handler(logging_file)

        utils.print_eval_dict(eval_dict)
        _write_images_to_logdir(images, logdir)
    return
Esempio n. 7
0
def run_united_evaluation(meta_hypes,
                          subhypes,
                          submodules,
                          subgraph,
                          tv_sess,
                          step=0):
    logging.info('Running Evaluation Scripts.')
    #Limit GPU usage when running on shared environment
    summary_writer = tv_sess['writer']
    models = meta_hypes['model_list']
    sess = tv_sess['sess']

    n = 0

    for model in models:
        eval_dict, images = submodules[model]['eval'].evaluate(
            subhypes[model], sess, subgraph[model]['image_pl'],
            subgraph[model]['inf_out'])

        train._write_images_to_summary(images, summary_writer, step)

        logging.info("%s Evaluation Finished. Results" % model)

        logging.info('Raw Results:')
        utils.print_eval_dict(eval_dict, prefix='(raw)   ')
        train._write_eval_dict_to_summary(eval_dict,
                                          'Evaluation/%s/raw' % model,
                                          summary_writer, step)

        train._write_images_to_disk(meta_hypes, images, step)

    logging.info("Evaluation Finished. All results will be saved to:")
    logging.info(subhypes[model]['dirs']['output_dir'])

    # Reset timer
    start_time = time.time()
Esempio n. 8
0
def run_training(hypes, modules, tv_graph, tv_sess, start_step=0):
    """Run one iteration of training."""
    # Unpack operations for later use
    summary = tf.Summary()
    sess = tv_sess['sess']
    summary_writer = tv_sess['writer']

    solver = modules['solver']

    display_iter = hypes['logging']['display_iter']
    write_iter = hypes['logging'].get('write_iter', 5*display_iter)
    eval_iter = hypes['logging']['eval_iter']
    save_iter = hypes['logging']['save_iter']
    image_iter = hypes['logging'].get('image_iter', 5*save_iter)

    py_smoother = MedianSmoother(20)
    dict_smoother = ExpoSmoother(0.95)

    n = 0

    eval_names, eval_ops = zip(*tv_graph['eval_list'])
    # Run the training Step
    start_time = time.time()
    for step in xrange(start_step, hypes['solver']['max_steps']):

        lr = solver.get_learning_rate(hypes, step)
        feed_dict = {tv_graph['learning_rate']: lr}

        if step % display_iter:
            sess.run([tv_graph['train_op']], feed_dict=feed_dict)

        # Write the summaries and print an overview fairly often.
        elif step % display_iter == 0:
            # Print status to stdout.
            _, loss_value = sess.run([tv_graph['train_op'],
                                      tv_graph['losses']['total_loss']],
                                     feed_dict=feed_dict)

            _print_training_status(hypes, step, loss_value, start_time, lr)

            eval_results = sess.run(eval_ops, feed_dict=feed_dict)

            _print_eval_dict(eval_names, eval_results, prefix='   (raw)')

            dict_smoother.update_weights(eval_results)
            smoothed_results = dict_smoother.get_weights()

            _print_eval_dict(eval_names, smoothed_results, prefix='(smooth)')

            # Reset timer
            start_time = time.time()

        if step % write_iter == 0:
            # write values to summary
            if FLAGS.summary:
                summary_str = sess.run(tv_sess['summary_op'],
                                       feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, global_step=step)
            summary.value.add(tag='training/total_loss',
                              simple_value=float(loss_value))
            summary.value.add(tag='training/learning_rate',
                              simple_value=lr)
            summary_writer.add_summary(summary, step)
            # Convert numpy types to simple types.
            eval_results = np.array(eval_results)
            eval_results = eval_results.tolist()
            eval_dict = zip(eval_names, eval_results)
            _write_eval_dict_to_summary(eval_dict, 'Eval/raw',
                                        summary_writer, step)
            eval_dict = zip(eval_names, smoothed_results)
            _write_eval_dict_to_summary(eval_dict, 'Eval/smooth',
                                        summary_writer, step)

        # Do a evaluation and print the current state
        if (step) % eval_iter == 0 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            # write checkpoint to disk

            logging.info('Running Evaluation Script.')
            eval_dict, images = modules['eval'].evaluate(
                hypes, sess, tv_graph['image_pl'], tv_graph['inf_out'])

            _write_images_to_summary(images, summary_writer, step)
            logging.info("Evaluation Finished. All results will be saved to:")
            logging.info(hypes['dirs']['output_dir'])

            if images is not None and len(images) > 0:

                name = str(n % 10) + '_' + images[0][0]
                image_file = os.path.join(hypes['dirs']['image_dir'], name)
                scp.misc.imsave(image_file, images[0][1])
                n = n + 1

            logging.info('Raw Results:')
            utils.print_eval_dict(eval_dict, prefix='(raw)   ')
            _write_eval_dict_to_summary(eval_dict, 'Evaluation/raw',
                                        summary_writer, step)

            logging.info('Smooth Results:')
            names, res = zip(*eval_dict)
            smoothed = py_smoother.update_weights(res)
            eval_dict = zip(names, smoothed)
            utils.print_eval_dict(eval_dict, prefix='(smooth)')
            _write_eval_dict_to_summary(eval_dict, 'Evaluation/smoothed',
                                        summary_writer, step)

            # Reset timer
            start_time = time.time()

        # Save a checkpoint periodically.
        if (step) % save_iter == 0 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            # write checkpoint to disk
            checkpoint_path = os.path.join(hypes['dirs']['output_dir'],
                                           'model.ckpt')
            tv_sess['saver'].save(sess, checkpoint_path, global_step=step)
            # Reset timer
            start_time = time.time()

        if step % image_iter == 0 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            _write_images_to_disk(hypes, images, step)
Esempio n. 9
0
def run_training(hypes, modules, tv_graph, tv_sess, start_step=0):
    """Run one iteration of training."""
    # Unpack operations for later use
    summary = tf.Summary()
    sess = tv_sess['sess']
    summary_writer = tv_sess['writer']

    solver = modules['solver']

    display_iter = hypes['logging']['display_iter']
    write_iter = hypes['logging'].get('write_iter', 5*display_iter)
    eval_iter = hypes['logging']['eval_iter']
    save_iter = hypes['logging']['save_iter']
    image_iter = hypes['logging'].get('image_iter', 5*save_iter)

    py_smoother = MedianSmoother(20)
    dict_smoother = ExpoSmoother(0.95)

    n = 0

    eval_names, eval_ops = zip(*tv_graph['eval_list'])
    # Run the training Step
    start_time = time.time()
   # config = tf.ConfigProto()
   # config.gpu_options.allow_growth = True
   # config.gpu_options.per_process_gpu_memory_fraction = 0.7
    for step in xrange(start_step, hypes['solver']['max_steps']):
        logging.info("step %d", step)
        lr = solver.get_learning_rate(hypes, step)
        feed_dict = {tv_graph['learning_rate']: lr}

        if step % display_iter:
            
            sess.run([tv_graph['train_op']], feed_dict=feed_dict)

        # Write the summaries and print an overview fairly often.
        elif step % display_iter == 0:
            # Print status to stdout.
            _, loss_value = sess.run([tv_graph['train_op'],
                                      tv_graph['losses']['total_loss']],
                                     feed_dict=feed_dict)

            _print_training_status(hypes, step, loss_value, start_time, lr)

            eval_results = sess.run(eval_ops, feed_dict=feed_dict)

            _print_eval_dict(eval_names, eval_results, prefix='   (raw)')

            dict_smoother.update_weights(eval_results)
            smoothed_results = dict_smoother.get_weights()

            _print_eval_dict(eval_names, smoothed_results, prefix='(smooth)')

            # Reset timer
            start_time = time.time()

        if step % write_iter == 3:
            # write values to summary
            if FLAGS.summary:
                summary_str = sess.run(tv_sess['summary_op'],
                                       feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, global_step=step)
            summary.value.add(tag='training/total_loss',
                              simple_value=float(loss_value))
            summary.value.add(tag='training/learning_rate',
                              simple_value=lr)
            summary_writer.add_summary(summary, step)
            # Convert numpy types to simple types.
            eval_results = np.array(eval_results)
            eval_results = eval_results.tolist()
            eval_dict = zip(eval_names, eval_results)
            _write_eval_dict_to_summary(eval_dict, 'Eval/raw',
                                        summary_writer, step)
            eval_dict = zip(eval_names, smoothed_results)
            _write_eval_dict_to_summary(eval_dict, 'Eval/smooth',
                                        summary_writer, step)

        # Do a evaluation and print the current state
        if (step) % eval_iter == 5 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            # write checkpoint to disk

            logging.info('Running Evaluation Script.')
            eval_dict, images = modules['eval'].evaluate(
                hypes, sess, tv_graph['image_pl'], tv_graph['inf_out'])

            _write_images_to_summary(images, summary_writer, step)
            logging.info("Evaluation Finished. All results will be saved to:")
            logging.info(hypes['dirs']['output_dir'])

            if images is not None and len(images) > 0:

                name = str(n % 10) + '_' + images[0][0]
                image_file = os.path.join(hypes['dirs']['image_dir'], name)
                scp.misc.imsave(image_file, images[0][1])
                n = n + 1

            logging.info('Raw Results:')
            utils.print_eval_dict(eval_dict, prefix='(raw)   ')
            _write_eval_dict_to_summary(eval_dict, 'Evaluation/raw',
                                        summary_writer, step)

            logging.info('Smooth Results:')
            names, res = zip(*eval_dict)
            smoothed = py_smoother.update_weights(res)
            eval_dict = zip(names, smoothed)
            utils.print_eval_dict(eval_dict, prefix='(smooth)')
            _write_eval_dict_to_summary(eval_dict, 'Evaluation/smoothed',
                                        summary_writer, step)

            # Reset timer
            start_time = time.time()

        # Save a checkpoint periodically.
        if (step) % save_iter == 5 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            # write checkpoint to disk
            checkpoint_path = os.path.join(hypes['dirs']['output_dir'],
                                           'model.ckpt')
            tv_sess['saver'].save(sess, checkpoint_path, global_step=step)
            # Reset timer
            start_time = time.time()

        if step % image_iter == 10 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            _write_images_to_disk(hypes, images, step)
Esempio n. 10
0
def run_united_training(meta_hypes, subhypes, submodules, subgraph, tv_sess,
                        start_step=0):

    """Run one iteration of training."""
    # Unpack operations for later use
    summary = tf.Summary()
    #using the context manager launch the graph in session
    sess = tv_sess['sess']
    summary_writer = tv_sess['writer']

    solvers = {}
    for model in meta_hypes['models']:
        solvers[model] = submodules[model]['solver']
    #get the values from the hypes
    display_iter = meta_hypes['logging']['display_iter']
    write_iter = meta_hypes['logging'].get('write_iter', 5*display_iter)
    eval_iter = meta_hypes['logging']['eval_iter']
    save_iter = meta_hypes['logging']['save_iter']
    image_iter = meta_hypes['logging'].get('image_iter', 5*save_iter)
    
    models = meta_hypes['model_list']
    #get the number of models
    num_models = len(models)

    py_smoothers = {}
    dict_smoothers = {}
    for model in models:
        #smoothing pitch contours
        py_smoothers[model] = train.MedianSmoother(5)
        #exponetial smoothing to remove noises
        dict_smoothers[model] = train.ExpoSmoother(0.95)

    n = 0

    eval_names = {}
    eval_ops = {}
    for model in models:
        names, ops = zip(*subgraph[model]['eval_list'])
        eval_names[model] = names
        eval_ops[model] = ops

    weights = meta_hypes['selection']['weights']
    aweights = np.array([sum(weights[:i+1]) for i in range(len(weights))])
    # eval_names, eval_ops = zip(*tv_graph['eval_list'])
    # Run the training Step
    start_time = time.time()
    for step in xrange(start_step, meta_hypes['solver']['max_steps']):

        # select on which model to run the training step
        # select model randomly?
        if not meta_hypes['selection']['random']:
            if not meta_hypes['selection']['use_weights']:
                # non-random selection
                model = models[step % num_models]
            else:
                # non-random, some models are selected multiple times
                select = np.argmax((aweights > step % aweights[-1]))
                model = models[select]
        else:
            # random selection. Use weights
            # to increase chance
            r = random.random()
            select = np.argmax((aweights > r))
            model = models[select]

        lr = solvers[model].get_learning_rate(subhypes[model], step)
        feed_dict = {subgraph[model]['learning_rate']: lr}

        sess.run([subgraph[model]['train_op']], feed_dict=feed_dict)

        # Write the summaries and print an overview fairly often.
        if step % display_iter == 0:
            # Print status to stdout.
            loss_values = {}
            eval_results = {}
            lrs = {}
            if select == 1:
                logging.info("Detection Loss was used.")
            else:
                logging.info("Segmentation Loss was used.")
            for model in models:
                loss_values[model] = sess.run(subgraph[model]['losses']
                                              ['total_loss'])

                eval_results[model] = sess.run(eval_ops[model])
                #update the weights of the model
                dict_smoothers[model].update_weights(eval_results[model])
                #get the learning rate for the iteration of the model
                lrs[model] = solvers[model].get_learning_rate(subhypes[model],
                                                              step)
            #call the function to print the training status
            _print_training_status(meta_hypes, step,
                                   loss_values,
                                   start_time, lrs)
            #print evaluation result for each of the models
            for model in models:
                train._print_eval_dict(eval_names[model], eval_results[model],
                                       prefix='   (raw)')

                smoothed_results = dict_smoothers[model].get_weights()

                train._print_eval_dict(eval_names[model], smoothed_results,
                                       prefix='(smooth)')

            output = sess.run(subgraph['debug_ops'].values())
            
            for name, res in zip(subgraph['debug_ops'].keys(), output):
                logging.info("{} : {}".format(name, res))

            if step % write_iter == 0:
                # write values to summary
                summary_str = sess.run(tv_sess['summary_op'],
                                       feed_dict=feed_dict)
                summary_writer.add_summary(summary_str,
                                           global_step=step)
                for model in models:
                    summary.value.add(tag='training/%s/total_loss' % model,
                                      simple_value=float(loss_values[model]))
                    summary.value.add(tag='training/%s/learning_rate' % model,
                                      simple_value=lrs[model])
                summary_writer.add_summary(summary, step)
                # Convert numpy types to simple types.
                if False:
                    eval_results = np.array(eval_results)
                    eval_results = eval_results.tolist()
                    eval_dict = zip(eval_names[model], eval_results)
                    train._write_eval_dict_to_summary(eval_dict,
                                                      'Eval/%s/raw' % model,
                                                      summary_writer, step)
                    eval_dict = zip(eval_names[model], smoothed_results)
                    train._write_eval_dict_to_summary(eval_dict,
                                                      'Eval/%s/smooth' % model,
                                                      summary_writer, step)

            # Reset timer
            start_time = time.time()

        # Do a evaluation and print the current state
        if (step) % eval_iter == 0 and step > 0 or \
           (step + 1) == meta_hypes['solver']['max_steps']:
            # write checkpoint to disk
            logging.info('Running Evaluation Scripts.')
            for model in models:
                eval_dict, images = submodules[model]['eval'].evaluate(
                    subhypes[model], sess,
                    subgraph[model]['image_pl'],
                    subgraph[model]['inf_out'])

                train._write_images_to_summary(images, summary_writer, step)
                if images is not None and len(images) > 0:
                    #get the name of the image
                    name = str(n % 10) + '_' + images[0][0]
                    #image directory
                    image_dir = subhypes[model]['dirs']['image_dir']
                    #image file name
                    image_file = os.path.join(image_dir, name)
                    #save the image
                    scp.misc.imsave(image_file, images[0][1])
                    n = n + 1

                logging.info("%s Evaluation Finished. Results" % model)

                logging.info('Raw Results:')
                #print evaluation result
                utils.print_eval_dict(eval_dict, prefix='(raw)   ')
                #write the evalutation summary
                train._write_eval_dict_to_summary(
                    eval_dict, 'Evaluation/%s/raw' % model,
                    summary_writer, step)
                
                logging.info('Smooth Results:')
                names, res = zip(*eval_dict)
                #smoothing image
                smoothed = py_smoothers[model].update_weights(res)
                eval_dict = zip(names, smoothed)
                utils.print_eval_dict(eval_dict, prefix='(smooth)')
                train._write_eval_dict_to_summary(
                    eval_dict, 'Evaluation/%s/smoothed' % model,
                    summary_writer, step)

                if step % image_iter == 0 and step > 0 or \
                        (step + 1) == meta_hypes['solver']['max_steps']:
                        #save the image
                    train._write_images_to_disk(meta_hypes, images, step)
            logging.info("Evaluation Finished. All results will be saved to:")
            logging.info(subhypes[model]['dirs']['output_dir'])

            # Reset timer
            start_time = time.time()

        # Save a checkpoint periodically.
        if (step) % save_iter == 0 and step > 0 or \
           (step + 1) == meta_hypes['solver']['max_steps']:
            # write checkpoint to disk
            checkpoint_path = os.path.join(meta_hypes['dirs']['output_dir'],
                                           'model.ckpt')
            tv_sess['saver'].save(sess, checkpoint_path, global_step=step)
            # Reset timer
            start_time = time.time()
    return
Esempio n. 11
0
def run_training(hypes, modules, tv_graph, tv_sess, start_step=0):
    """Run one iteration of training."""
    # Unpack operations for later use
    summary = tf.Summary()
    sess = tv_sess['sess']
    summary_writer = tv_sess['writer']

    solver = modules['solver']

    display_iter = hypes['logging']['display_iter']
    write_iter = hypes['logging'].get('write_iter', 5*display_iter)
    eval_iter = hypes['logging']['eval_iter']
    save_iter = hypes['logging']['save_iter']
    image_iter = hypes['logging'].get('image_iter', 5*save_iter)
    dict_smoother = ExpoSmoother(0.95)
    py_smoother = MedianSmoother(20)
    n = 0

    eval_names, eval_ops = zip(*tv_graph['eval_list'])
    # Run the training Step
    start_time = time.time()
    for step in range(start_step, hypes['solver']['max_steps']):
        regression_weights = solver.get_regression_weights(step, 1.0)
        lr = solver.get_learning_rate(hypes, step)
        feed_dict = {tv_graph['learning_rate']: lr, 
                     hypes['solver']['regression_weights']: regression_weights}
        # if step % display_iter:
        #     sess.run([tv_graph['train_op']], feed_dict=feed_dict)

        # # Write the summaries and print an overview fairly often.
        # elif step % display_iter == 0:
        #     # Print status to stdout.
        #     _, loss_value, training_loss, eval_results = sess.run([tv_graph['train_op'],
        #                               tv_graph['losses']['total_loss'], tv_graph['losses'], 
        #                               eval_ops],
        #                              feed_dict=feed_dict)
        #     _print_training_status(hypes, step, loss_value, start_time, lr)    
        #     _print_eval_dict(eval_names, eval_results, prefix='   (raw)')

        #     dict_smoother.update_weights(eval_results)
        #     smoothed_results = dict_smoother.get_weights()

        #     _print_eval_dict(eval_names, smoothed_results, prefix='(smooth)')
             
        #     #logging.info('Regression Weights: Depth: %.2f, Location: %.2f, Corner: %.2f'%(regression_weights[0], \
        #     #              regression_weights[1], regression_weights[2]))
        #     # Reset timer
        #     start_time = time.time()
        
        # if step % write_iter == 0:
        #     # write values to summary
        #     if FLAGS.summary:
        #         summary_str = sess.run(tv_sess['summary_op'],
        #                                feed_dict=feed_dict)
        #         summary_writer.add_summary(summary_str, global_step=step)
        #     summary.value.add(tag='training/total_loss',
        #                       simple_value=float(loss_value))
        #     summary.value.add(tag='training/learning_rate',
        #                       simple_value=lr)
        #     summary_writer.add_summary(summary, step)
        #     # Convert numpy types to simple types.
        #     eval_results = np.array(eval_results)
        #     eval_results = eval_results.tolist()
        #     eval_dict = zip(eval_names, eval_results)
        #     _write_eval_dict_to_summary(eval_dict, 'Eval/raw',
        #                                 summary_writer, step)
        #     eval_dict = zip(eval_names, smoothed_results)
        #     _write_eval_dict_to_summary(eval_dict, 'Eval/smooth',
        #                                 summary_writer, step)

        # Do a evaluation and print the current state
        if (step) % eval_iter == 0 \
           or (step + 1) == hypes['solver']['max_steps']:
            # write checkpoint to disk

            logging.info('Running Evaluation Script.')
      
            eval_dict, images = modules['eval'].evaluate(
                hypes, sess, tv_graph['image_pl'], tv_graph['calib_pl'], tv_graph['xy_scale_pl'],  tv_graph['inf_out'], tv_graph['encoder_out'])

            _write_images_to_summary(images, summary_writer, step)
            logging.info("Evaluation Finished. All results will be saved to:")
            logging.info(hypes['dirs']['output_dir'])

            if images is not None and len(images) > 0:

                name = str(n % 10) + '_' + images[0][0]
                image_file = os.path.join(hypes['dirs']['image_dir'], name)
                scp.misc.imsave(image_file, images[0][1])
                n = n + 1

            logging.info('Raw Results:')
            utils.print_eval_dict(eval_dict, prefix='(raw)   ')
            _write_eval_dict_to_summary(eval_dict, 'Evaluation/raw',
                                        summary_writer, step)

            logging.info('Smooth Results:')
            names, res = zip(*eval_dict)
            smoothed = py_smoother.update_weights(res)
            eval_dict = zip(names, smoothed)
            utils.print_eval_dict(eval_dict, prefix='(smooth)')
            _write_eval_dict_to_summary(eval_dict, 'Evaluation/smoothed',
                                        summary_writer, step)

            # Reset timer
            start_time = time.time()
        break
        # Save a checkpoint periodically.
     
        if (step) % save_iter == 0 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            # write checkpoint to disk
            checkpoint_path = os.path.join(hypes['dirs']['output_dir'],
                                           'model.ckpt')
            tv_sess['saver'].save(sess, checkpoint_path, global_step=step)
            # Reset timer
            start_time = time.time()
      
        if step % image_iter == 0 and step > 0 or \
           (step + 1) == hypes['solver']['max_steps']:
            _write_images_to_disk(hypes, images, step)