Esempio n. 1
0
if __name__ == "__main__":
    if (os.path.isfile(logfile)):
        os.remove(logfile)
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG,
                        filename=logfile)

    [train_target, train_search, train_box] = load_train_test_set(test_txt)
    target_tensors = tf.convert_to_tensor(train_target, dtype=tf.string)
    search_tensors = tf.convert_to_tensor(train_search, dtype=tf.string)
    box_tensors = tf.convert_to_tensor(train_box, dtype=tf.float64)
    input_queue = tf.train.slice_input_producer(
        [search_tensors, target_tensors, box_tensors], shuffle=False)
    batch_queue = next_batch(input_queue)
    tracknet = goturn_net.TRACKNET(BATCH_SIZE, train=False)
    tracknet.build()

    sess = tf.Session()
    init = tf.global_variables_initializer()
    init_local = tf.local_variables_initializer()
    sess.run(init)
    sess.run(init_local)

    coord = tf.train.Coordinator()
    # start the threads
    tf.train.start_queue_runners(sess=sess, coord=coord)

    ckpt_dir = "./checkpoints"
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
Esempio n. 2
0
if __name__ == "__main__":
    if (os.path.isfile(logfile)):
        os.remove(logfile)
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG,
                        filename=logfile)

    [train_target, train_search, train_box] = load_training_set(train_txt)
    target_tensors = tf.convert_to_tensor(train_target, dtype=tf.string)
    search_tensors = tf.convert_to_tensor(train_search, dtype=tf.string)
    box_tensors = tf.convert_to_tensor(train_box, dtype=tf.float64)
    input_queue = tf.train.slice_input_producer(
        [search_tensors, target_tensors, box_tensors], shuffle=True)
    batch_queue = next_batch(input_queue)
    tracknet = goturn_net.TRACKNET(BATCH_SIZE)
    tracknet.build()

    global_step = tf.Variable(0, trainable=False, name="global_step")

    train_step = tf.train.AdamOptimizer(0.00001, 0.9).minimize( \
        tracknet.loss_wdecay, global_step=global_step)
    merged_summary = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter('./train_summary', sess.graph)
    init = tf.global_variables_initializer()
    init_local = tf.local_variables_initializer()
    sess.run(init)
    sess.run(init_local)

    coord = tf.train.Coordinator()
import cv2
import goturn_net
import matplotlib.pyplot as plt

import tensorflow as tf

current_img = cv2.imread('search1.jpg')
prev_img = cv2.imread('target1.jpg')
current_img_res = cv2.resize(current_img, (227, 227))
prev_img_res = cv2.resize(prev_img, (227, 227))
print(current_img_res.shape)
print(prev_img_res.shape)

goturn = goturn_net.TRACKNET(batch_size=1, train=False)
goturn.build()
sess = tf.Session()
saver = tf.train.Saver()
ckpt_dir = './checkpoints'
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
print(ckpt)
saver.restore(sess, ckpt.model_checkpoint_path)

predicted_bbox = sess.run(goturn.fc4, feed_dict={goturn.image: [current_img_res],
                                                 goturn.target: [prev_img_res],
                                                 }
                          )
print(predicted_bbox)
bbox = (predicted_bbox[0] / 10)
print(bbox)
h, w, _ = current_img.shape
p1 = (int(bbox[0] * w), int(bbox[1] * h))