def main(argv):

    flags = parser(
        description="freeze yolov3 graph from checkpoint file").parse_args()
    classes = utils.read_coco_names("./data/coco.names")
    num_classes = len(classes)
    SIZE = flags.image_size
    print("=> the input image size is [%d, %d]" % (SIZE, SIZE))
    model = yolov3.yolov3(num_classes)

    with tf.Graph().as_default() as graph:
        sess = tf.Session(graph=graph)
        inputs = tf.placeholder(
            tf.float32, [1, SIZE, SIZE, 3])  # placeholder for detector inputs

        with tf.variable_scope('yolov3'):
            feature_map = model.forward(inputs, is_training=False)

        boxes, confs, probs = model.predict(feature_map)
        scores = confs * probs
        print("=>", boxes, scores)
        boxes, scores, labels = utils.gpu_nms(
            boxes,
            scores,
            num_classes,
            score_thresh=flags.score_threshold,
            iou_thresh=flags.iou_threshold)
        print("=>", boxes, scores, labels)
        feature_map_1, feature_map_2, feature_map_3 = feature_map
        print("=>", feature_map_1, feature_map_2, feature_map_3)
        saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))

        if flags.convert:
            if not os.path.exists(flags.weights_path):
                url = 'https://github.com/YunYang1994/tensorflow-yolov3/releases/download/v1.0/yolov3.weights'
                for i in range(3):
                    time.sleep(1)
                    print("=> %s does not exists ! " % flags.weights_path)
                print("=> It will take a while to download it from %s" % url)
                print('=> Downloading yolov3 weights ... ')
                wget.download(url, flags.weights_path)

            load_ops = utils.load_weights(tf.global_variables(scope='yolov3'),
                                          flags.weights_path)
            sess.run(load_ops)
            save_path = saver.save(sess, save_path=flags.ckpt_file)
            print('=> model saved in path: {}'.format(save_path))

        if flags.freeze:
            saver.restore(sess, flags.ckpt_file)
            print('=> checkpoint file restored from ', flags.ckpt_file)
            utils.freeze_graph(sess, './checkpoint/yolov3_cpu_nms.pb',
                               ["concat_9", "mul_6"])
            utils.freeze_graph(sess, './checkpoint/yolov3_gpu_nms.pb',
                               ["concat_10", "concat_11", "concat_12"])
            utils.freeze_graph(sess, './checkpoint/yolov3_feature.pb', [
                "yolov3/yolo-v3/feature_map_1",
                "yolov3/yolo-v3/feature_map_2",
                "yolov3/yolo-v3/feature_map_3",
            ])
def main(argv):

    flags = parser(
        description="freeze yolov3 graph from checkpoint file").parse_args()
    print("=> the input image size is [%d, %d]" %
          (flags.image_h, flags.image_w))
    anchors = utils.get_anchors(flags.anchors_path, flags.image_h,
                                flags.image_w)
    model = yolov3.yolov3(flags.num_classes, anchors)

    with tf.Graph().as_default() as graph:
        sess = tf.Session(graph=graph)
        inputs = tf.placeholder(tf.float32,
                                [1, flags.image_h, flags.image_w, 3
                                 ])  # placeholder for detector inputs
        print("=>", inputs)

        with tf.variable_scope('yolov3'):
            feature_map = model.forward(inputs, is_training=False)

        boxes, confs, probs = model.predict(feature_map)
        scores = confs * probs
        print("=>", boxes.name[:-2], scores.name[:-2])
        cpu_out_node_names = [boxes.name[:-2], scores.name[:-2]]
        boxes, scores, labels = utils.gpu_nms(
            boxes,
            scores,
            flags.num_classes,
            score_thresh=flags.score_threshold,
            iou_thresh=flags.iou_threshold)
        print("=>", boxes.name[:-2], scores.name[:-2], labels.name[:-2])
        gpu_out_node_names = [
            boxes.name[:-2], scores.name[:-2], labels.name[:-2]
        ]
        feature_map_1, feature_map_2, feature_map_3 = feature_map
        saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))

        if flags.convert:
            if not os.path.exists(flags.weights_path):
                url = 'https://github.com/YunYang1994/tensorflow-yolov3/releases/download/v1.0/yolov3.weights'
                for i in range(3):
                    time.sleep(1)
                    print("=> %s does not exists ! " % flags.weights_path)
                print("=> It will take a while to download it from %s" % url)
                print('=> Downloading yolov3 weights ... ')
                wget.download(url, flags.weights_path)

            load_ops = utils.load_weights(tf.global_variables(scope='yolov3'),
                                          flags.weights_path)
            sess.run(load_ops)
            save_path = saver.save(sess, save_path=flags.ckpt_file)
            print('=> model saved in path: {}'.format(save_path))

        if flags.freeze:
            saver.restore(sess, flags.ckpt_file)
            print('=> checkpoint file restored from ', flags.ckpt_file)
            utils.freeze_graph(sess, './checkpoint/yolov3_cpu_nms.pb',
                               cpu_out_node_names)
            utils.freeze_graph(sess, './checkpoint/yolov3_gpu_nms.pb',
                               gpu_out_node_names)
Exemplo n.º 3
0
    def convert_weights(self):
        print(f"=> the input image size is [{self.img_h}, {self.img_w}]")
        anchors = utils.get_anchors(self.anchors_path, self.img_h, self.img_w)
        model = yolov3.yolov3(self.num_classes, anchors)

        with tf.Graph().as_default() as graph:
            sess = tf.Session(graph=graph)
            inputs = tf.placeholder(tf.float32,
                                    [1, self.img_h, self.img_w, 1
                                     ])  # placeholder for detector inputs
            print("=>", inputs)

            with tf.variable_scope('yolov3'):
                feature_map = model.forward(inputs,
                                            n_filters_dn=self.n_filters_dn,
                                            n_strides_dn=self.n_strides_dn,
                                            n_ksizes_dn=self.n_ksizes_dn,
                                            is_training=False)

            boxes, confs, probs = model.predict(feature_map)
            scores = confs * probs
            print("=>", boxes.name[:-2], scores.name[:-2])
            cpu_out_node_names = [boxes.name[:-2], scores.name[:-2]]
            boxes, scores, labels = utils.gpu_nms(boxes, scores,
                                                  self.num_classes)
            print("=>", boxes.name[:-2], scores.name[:-2], labels.name[:-2])
            gpu_out_node_names = [
                boxes.name[:-2], scores.name[:-2], labels.name[:-2]
            ]

            saver = tf.train.Saver(var_list=tf.global_variables(
                scope='yolov3'))

            if self.convert:
                load_ops = utils.load_weights(
                    tf.global_variables(scope='yolov3'), self.weights_dir)
                sess.run(load_ops)
                save_path = saver.save(sess, save_path=self.checkpoint_dir)
                print(f'=> model saved in path: {save_path}')

            if self.freeze:
                ckpt_idx = self.checkpoint_dir + '-' + str(
                    self.checkpoint_step)
                try:
                    saver.restore(sess, ckpt_idx)
                except:
                    print(
                        f"Error: you tried to restore a checkpoint ({self.checkpoint_dir}) that doesn't exist."
                    )
                    print(
                        "Please clear the network and retrain, or load a different checkpoint by changing the steps parameter."
                    )
                print('=> checkpoint file restored from ', ckpt_idx)
                utils.freeze_graph(sess,
                                   '../../data/checkpoint/yolov3_cpu_nms.pb',
                                   cpu_out_node_names)
                utils.freeze_graph(sess,
                                   '../../data/checkpoint/yolov3_gpu_nms.pb',
                                   gpu_out_node_names)
Exemplo n.º 4
0
ANCHORS          = utils.get_anchors('data/voc_anchors.txt', IMAGE_H, IMAGE_W)
NUM_CLASSES      = len(CLASSES)
EVAL_INTERNAL    = 100
SAVE_INTERNAL    = 500



parser   = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)
trainset = dataset(parser, train_tfrecord, BATCH_SIZE, shuffle=SHUFFLE_SIZE)
testset  = dataset(parser, test_tfrecord , BATCH_SIZE, shuffle=None)

is_training = tf.placeholder(tf.bool)
example = tf.cond(is_training, lambda: trainset.get_next(), lambda: testset.get_next())

images, *y_true = example
model = yolov3.yolov3(NUM_CLASSES, ANCHORS)

with tf.variable_scope('yolov3'):
    pred_feature_map = model.forward(images, is_training=is_training)
    loss             = model.compute_loss(pred_feature_map, y_true)
    y_pred           = model.predict(pred_feature_map)

tf.summary.scalar("loss/coord_loss",   loss[1])
tf.summary.scalar("loss/sizes_loss",   loss[2])
tf.summary.scalar("loss/confs_loss",   loss[3])
tf.summary.scalar("loss/class_loss",   loss[4])

global_step = tf.Variable(0, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
write_op = tf.summary.merge_all()
writer_train = tf.summary.FileWriter("./data/bdd100k_train")
writer_test  = tf.summary.FileWriter("./data/bdd100k_test")
Exemplo n.º 5
0
sess = tf.Session()
classes = utils.read_coco_names('./data/coco.names')
num_classes = len(classes)
file_pattern = "./data/train_data/quick_train_data/tfrecords/quick_train_data*.tfrecords"
anchors = utils.get_anchors('./data/yolo_anchors.txt')

dataset = tf.data.TFRecordDataset(filenames=tf.gfile.Glob(file_pattern))
dataset = dataset.map(utils.parser(anchors, num_classes).parser_example,
                      num_parallel_calls=10)
dataset = dataset.repeat().shuffle(SHUFFLE_SIZE).batch(BATCH_SIZE).prefetch(
    BATCH_SIZE)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
images, *y_true = example
model = yolov3.yolov3(num_classes)

with tf.variable_scope('yolov3'):
    y_pred = model.forward(images, is_training=False)
    loss = model.compute_loss(y_pred, y_true)
    y_pred = model.predict(y_pred)
    load_ops = utils.load_weights(tf.global_variables(scope='yolov3'),
                                  weights_path)
    sess.run(load_ops)

for epoch in range(EPOCHS):
    run_items = sess.run([y_pred, y_true] + loss)
    rec, prec, mAP = utils.evaluate(run_items[0],
                                    run_items[1],
                                    num_classes,
                                    score_thresh=0.3,
import tensorflow as tf
from core import utils, yolov3
import cv2
from PIL import Image
import numpy as np

input_image = "../data/raccoon_data/images/raccoon-4.jpg"
image = Image.open(input_image)
# image = cv2.imread(input_image)
# image = Image.fromarray(image)
image_resize = cv2.resize(np.array(image) / 255., (416, 416))
image_place = tf.placeholder(dtype=tf.float32, shape=(None, 416, 416, 3))
CLASSES = utils.read_coco_names('../data/objects.names')
ANCHORE = utils.get_anchors("../data/objects.txt", 416, 416)
model = yolov3.yolov3(len(CLASSES), ANCHORE)
with tf.variable_scope('yolov3'):
    pred_feature_map = model.forward(image_place, is_training=False)
    pred = model.predict(pred_feature_map)
sess = tf.Session()
saver = tf.train.Saver()
model_dir = tf.train.latest_checkpoint("../data/raccoon_data/model/")
saver.restore(sess, model_dir)
boxes, confs, prods = sess.run(pred, feed_dict={image_place: np.expand_dims(image_resize, 0)})
boxes, confs, prods = utils.cpu_nms(boxes, confs * prods, len(CLASSES))
utils.draw_boxes(image, boxes, confs, prods, CLASSES, (416, 416), "../data/font/HuaWenXinWei-1.ttf")
print(boxes, confs, prods)
Exemplo n.º 7
0
from core import utils, yolov3

sess = tf.Session()
classes = utils.read_coco_names('./data/coco.names')
num_classes = len(classes)
input_shape = [416, 416]
dataset = utils.read_image_box_from_text(
    './data/train_data/quick_train_data.txt')
anchors = utils.get_anchors('./data/yolo_anchors.txt')

inputs = tf.placeholder(tf.float32, shape=[1, 416, 416, 3])
y_true_13 = tf.placeholder(tf.float32, shape=[1, 13, 13, 3, 85])
y_true_26 = tf.placeholder(tf.float32, shape=[1, 26, 26, 3, 85])
y_true_52 = tf.placeholder(tf.float32, shape=[1, 52, 52, 3, 85])

model = yolov3.yolov3(80)
with tf.variable_scope('yolov3'):
    feature_maps = model.forward(inputs, is_training=True)
    load_ops = utils.load_weights(tf.global_variables(scope='yolov3'),
                                  "./checkpoint/yolov3.weights")
    sess.run(load_ops)
    loss = model.compute_loss(feature_maps, [y_true_13, y_true_26, y_true_52])

optimizer = tf.train.GradientDescentOptimizer(0.001)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())

for image_path in dataset.keys():
    image = Image.open(image_path)
    true_boxes, true_labels = dataset[image_path]
    image, true_boxes = utils.resize_image_correct_bbox(
def main(argv):
    flags = parser(
        description="freeze yolov3 graph from checkpoint file").parse_args()
    print("=> the input image size is [%d, %d]" %
          (flags.image_h, flags.image_w))
    anchors = utils.get_anchors(flags.anchors_path, flags.image_h,
                                flags.image_w)
    # print(anchors)
    # exit()
    model = yolov3.yolov3(flags.num_classes, anchors)

    with tf.Graph().as_default() as graph:
        sess = tf.Session(graph=graph)
        inputs = tf.placeholder(tf.float32,
                                [1, flags.image_h, flags.image_w, 3
                                 ])  # placeholder for detector inputs
        print("=>", inputs)

        with tf.variable_scope('yolov3'):
            feature_map = model.forward(
                inputs, is_training=False)  # 返回3个尺度的feature_map

        # 获取网络给出绝对boxes(左上角,右下角)信息, 未经过最大抑制去除多余boxes
        boxes, confs, probs = model.predict(feature_map)
        scores = confs * probs
        print("=>", boxes.name[:-2], scores.name[:-2])
        # cpu 运行是恢复模型所需要的网络节点的名字
        cpu_out_node_names = [boxes.name[:-2], scores.name[:-2]]
        boxes, scores, labels = utils.gpu_nms(
            boxes,
            scores,
            flags.num_classes,
            score_thresh=flags.score_threshold,
            iou_thresh=flags.iou_threshold)
        print("=>", boxes.name[:-2], scores.name[:-2], labels.name[:-2])
        # gpu 运行是恢复模型所需要的网络节点的名字 , 直接运算得出最终结果
        gpu_out_node_names = [
            boxes.name[:-2], scores.name[:-2], labels.name[:-2]
        ]
        feature_map_1, feature_map_2, feature_map_3 = feature_map
        saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))
        our_out_node_names = [
            "yolov3/yolo-v3/feature_map_1", "yolov3/yolo-v3/feature_map_2",
            "yolov3/yolo-v3/feature_map_3"
        ]

        # 只有第一次需要读取预训练参数yolov3.weights

        # if flags.convert:
        #     load_ops = utils.load_weights(tf.global_variables(scope='yolov3'), flags.weights_path)
        #     sess.run(load_ops)
        #     save_path = saver.save(sess, save_path=flags.ckpt_file)
        #     print('=> model saved in path: {}'.format(save_path))

        # print(flags.freeze)
        if flags.freeze:
            saver.restore(sess, flags.ckpt_file)
            print('=> checkpoint file restored from ', flags.ckpt_file)
            utils.freeze_graph(sess, '../checkpoint/yolov3_cpu_nms.pb',
                               cpu_out_node_names)
            utils.freeze_graph(sess, '../checkpoint/yolov3_gpu_nms.pb',
                               gpu_out_node_names)
            utils.freeze_graph(sess, '../checkpoint/ouryolov3.pb',
                               our_out_node_names)
Exemplo n.º 9
0
    def train(self):
        ANCHORS = utils.get_anchors(self.anchors_path, self.img_h, self.img_w)

        parser = Parser(image_h=self.img_h,
                        image_w=self.img_w,
                        anchors=ANCHORS,
                        num_classes=self.num_classes)

        trainset = dataset(parser,
                           self.train_records,
                           self.batch_size,
                           shuffle=self.shuffle_size)
        testset = dataset(parser,
                          self.test_records,
                          self.batch_size,
                          shuffle=None)

        is_training = tf.placeholder(tf.bool)

        example = tf.cond(is_training, lambda: trainset.get_next(),
                          lambda: testset.get_next())

        images, y_true = example

        model = yolov3.yolov3(self.num_classes, ANCHORS)

        with tf.variable_scope('yolov3'):

            # Give the images to the network, and receive a prediction
            # feature map
            pred_feature_map = model.forward(images,
                                             is_training=is_training,
                                             n_filters_dn=self.n_filters_dn,
                                             n_strides_dn=self.n_strides_dn,
                                             n_ksizes_dn=self.n_ksizes_dn)

            loss = model.compute_loss(pred_feature_map, y_true,
                                      self.iou_threshold)
            y_pred = model.predict(pred_feature_map)

        tf.summary.scalar("loss/coord_loss", loss[1])
        tf.summary.scalar("loss/sizes_loss", loss[2])
        tf.summary.scalar("loss/confs_loss", loss[3])
        tf.summary.scalar("loss/class_loss", loss[4])

        global_step = tf.Variable(0,
                                  trainable=True,
                                  collections=[tf.GraphKeys.LOCAL_VARIABLES])

        write_op = tf.summary.merge_all()
        writer_train = tf.summary.FileWriter("../../data/train_summary",
                                             sess.graph)
        writer_test = tf.summary.FileWriter("../../data/test_summary")

        update_vars = tf.contrib.framework.get_variables_to_restore(
            include=["yolov3/yolo-v3"])

        lr = tf.train.exponential_decay(self.learning_rate,
                                        global_step,
                                        decay_steps=self.decay_steps,
                                        decay_rate=self.decay_rate,
                                        staircase=True)
        optimizer = tf.train.AdamOptimizer(lr)

        # set dependencies for BN ops
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(loss[0],
                                          var_list=update_vars,
                                          global_step=global_step)

        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])

        saver = tf.train.Saver(max_to_keep=2)

        for step in range(self.steps):
            run_items = sess.run([train_op, write_op, y_pred, y_true] + loss,
                                 feed_dict={is_training: True})

            if (step + 1) % self.eval_internal == 0:
                train_rec_value, train_prec_value = utils.evaluate(
                    run_items[2], run_items[3])

            writer_train.add_summary(run_items[1], global_step=step)
            writer_train.flush()  # Flushes the event file to disk

            if (step + 1) % self.save_internal == 0:
                saver.save(sess,
                           save_path=self.checkpoint_path,
                           global_step=step + 1)

            if (step + 1) % self.print_every_n == 0:
                print(f"=> STEP {step+1} [TRAIN]:\tloss_xy: " +
                      f"{run_items[5]:.4f} \tloss_wh:{run_items[6]:.4f} \t" +
                      f"loss_conf:{run_items[7]:.4f} \tloss_class:" +
                      f"{run_items[8]:.4f}")

            run_items = sess.run([write_op, y_pred, y_true] + loss,
                                 feed_dict={is_training: False})

            if (step + 1) % self.eval_internal == 0:
                test_rec_value, test_prec_value = utils.evaluate(
                    run_items[1], run_items[2])
                print(f"\n{20*'='}> evaluation result <{20*'='}\n")
                print(f"=> STEP {step+1} [TRAIN]:\trecall:" +
                      f"{train_rec_value:.2f} \tprecision:" +
                      f"{train_prec_value:.4f}")
                print(f"=> STEP {step+1} [VALID]:\trecall:" +
                      f"{test_rec_value:.2f} \tprecision:" +
                      f"{test_prec_value:.4f}")
                print(f"\n{20*'='}> evaluation result <{20*'='}\n")

            writer_test.add_summary(run_items[0], global_step=step)
            writer_test.flush()  # Flushes the event file to disk
Exemplo n.º 10
0
image_h , image_w = 416, 416
num_classes = 80
iou_threshold = 0.5
score_threshold = 0.5
ckpt_file = './checkpoint/yolov3.ckpt'
anchors_path = './data/raccoon_anchors.txt'
weights_path = './checkpoint/yolov3.weights'




if __name__ == '__main__':
    print("the input image size is {}*{}".format(image_h, image_w))
    anchors = utils.get_anchors(anchors_path, image_h, image_w)
    model = yolov3.yolov3(num_classes, anchors)


    with tf.Graph().as_default() as graph:
        sess = tf.Session(graph=graph)
        inputs = tf.placeholder(tf.float32, [1, image_h, image_w, 3])
        print(inputs)

        with tf.variable_scope('yolov3'):
            feature_map = model.forward(inputs, is_training=False)
        boxes ,confs, probs = model.predict(feature_map)
        scores = confs * probs

        print("=>", boxes.name[:-2], scores.name[:-2])
        cpu_out_node_names = [boxes.name[:-2], scores.name[:-2]]
        boxes, scores, labels = utils.gpu_nms(boxes, scores, num_classes,
Exemplo n.º 11
0
import tensorflow as tf
from core import utils, yolov3
from core.dataset import dataset, Parser
from basicNet.mobilenetV2 import MobilenetV2
from config.config import *
parser = Parser(ANCHORS, NUM_CLASSES)
trainset = dataset(parser,
                   TEST_TFRECORD,
                   BATCH_SIZE,
                   shuffle=SHUFFLE_SIZE,
                   multi_image_size=False)
testset = dataset(parser, TEST_TFRECORD, BATCH_SIZE, shuffle=None)
example = trainset.get_next()

images, *y_true = example
model = yolov3.yolov3(NUM_CLASSES, ANCHORS, basic_net=MobilenetV2)

with tf.variable_scope('yolov3'):
    model.set_anchor(images)
    pred_feature_map = model.forward(images, is_training=False)
    y_pred = model.predict(pred_feature_map)
saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, "./checkpoint/yolov3.ckpt-25000")
    run_items = sess.run([images, y_pred])
    for i in range(8):
        image = run_items[0][i]
        pred_boxes = run_items[1][0][i:i + 1]
        pred_confs = run_items[1][1][i:i + 1]
        pred_probs = run_items[1][2][i:i + 1]
        pred_boxes, pred_scores, pred_labels = utils.cpu_nms(