Ejemplo n.º 1
0
import time

batch_size = tf.placeholder(dtype=tf.int32)
keep_prop = tf.placeholder(dtype=tf.float32, name='KeepProp')

# build input graph
with tf.name_scope('InputPipeline'):
    batch_size = tf.placeholder(dtype=tf.int32, name='BatchSize')
    with tf.device("/cpu:0"):
        batch = ki.create_batch(batch_size, 'Test')
    x = batch[0]
    input_filename = batch[5]

# CNN graph
network_output, _ = network.forget_squeeze_net(x, keep_prop, True, False)
class_scores, conf_scores, bbox_delta = interp.interpret(
    network_output, batch_size)

# Filter predictions
final_boxes, final_probs, final_class = fp.filter(class_scores, conf_scores,
                                                  bbox_delta)

test_saver = tf.train.Saver()
sess = tf.Session()

# restore from checkpoint
restore_path, _ = t.get_last_ckpt(p.PATH_TO_CKPT + 'sec/')
test_saver.restore(sess, restore_path)
print(
    "Restored from ImageNet and KITTI trained network. Ready for testing. Dir = "
    + restore_path)
Ejemplo n.º 2
0
keep_prop = tf.placeholder(dtype=tf.float32, name='KeepProp')

# Training
with tf.device("/cpu:0"):
    t_batch = ki.create_batch(batch_size=batch_size, mode='Train')
t_image = t_batch[0]
t_mask = t_batch[1]
t_delta = t_batch[2]
t_coord = t_batch[3]
t_class = t_batch[4]

t_network_output, variables_to_save = network.forget_squeeze_net(t_image,
                                                                 keep_prop,
                                                                 True,
                                                                 reuse=False)
t_class_scores, t_conf_scores, t_bbox_delta = interp.interpret(
    t_network_output, batch_size)
t_total_loss, t_bbox_loss, t_conf_loss, t_class_loss, t_l2_loss = l.loss_function\
                                (t_mask, t_delta, t_coord, t_class,  t_bbox_delta, t_conf_scores, t_class_scores, True)
l.add_loss_summaries('Train_', t_total_loss, t_bbox_loss, t_conf_loss,
                     t_class_loss, t_l2_loss)

# Optimisation
with tf.variable_scope('Optimisation'):
    global_step = tf.Variable(0, name='GlobalStep', trainable=False)
    train_step = tf.train.AdamOptimizer(p.LEARNING_RATE, name='TrainStep')
    grads_vars = train_step.compute_gradients(t_total_loss,
                                              tf.trainable_variables())
    for i, (grad, var) in enumerate(grads_vars):
        grads_vars[i] = (tf.clip_by_value(grad,
                                          -100,
                                          100,