Exemplo n.º 1
0
def main():
    yolo = YOLO(config.voc_class_num, config.voc_anchors)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(
        feature_y1,
        feature_y2,
        feature_y3,
        config.voc_class_num,
        score_thresh=config.val_score_thresh,
        iou_thresh=config.iou_thresh,
        max_box=config.max_box)

    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(config.voc_model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            Log.add_log("message: load ckpt model:'" +
                        str(ckpt.model_checkpoint_path) + "'")
        else:
            Log.add_log("message:can not find  ckpt model")
            # exit(1)

        # dictionary of name of corresponding id
        word_dict = tools.get_word_dict(config.voc_names)
        # dictionary of per names
        color_table = tools.get_color_table(config.voc_class_num)

        width = config.width
        height = config.height

        for name in os.listdir(config.voc_test_dir):
            img_name = path.join(config.voc_test_dir, name)
            if not path.isfile(img_name):
                print("'%s' is not file" % img_name)
                continue

            start = time.perf_counter()

            img, nw, nh, img_ori, show_img = read_img(img_name, width, height)
            if img is None:
                Log.add_log("message:'" + str(img) + "' is None")
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label],
                                           feed_dict={inputs: img})

            end = time.perf_counter()
            print("%s\t, time:%f s" % (img_name, end - start))

            if config.keep_img_shape:
                # modify coordinates
                dw = (width - nw) / 2
                dh = (height - nh) / 2
                for i in range(len(boxes)):
                    boxes[i][0] = (boxes[i][0] * width - dw) / nw
                    boxes[i][1] = (boxes[i][1] * height - dh) / nh
                    boxes[i][2] = (boxes[i][2] * width - dw) / nw
                    boxes[i][3] = (boxes[i][3] * height - dh) / nh

            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict,
                                     color_table)

            cv2.imshow('img', img_ori)
            cv2.waitKey(0)

            if config.save_img:
                save_img(img_ori, name)
            pass
Exemplo n.º 2
0
def main():
    anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401  #608 anchors
    #anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    yolo = YOLO(80, anchors, width=416, height=416)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(
        feature_y1,
        feature_y2,
        feature_y3,
        80,
        score_thresh=config.val_score_thresh,
        iou_thresh=config.iou_thresh,
        max_box=config.max_box)

    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state("./yolo_weights")
        if ckpt and ckpt.model_checkpoint_path:
            print("restore: ", ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            exit(1)

        # id to names
        word_dict = tools.get_word_dict("./data/coco.names")
        # color of corresponding names
        color_table = tools.get_color_table(80)

        width = 416
        height = 416

        cap = cv2.VideoCapture(0)
        cap.set(6, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
        # cap.set(3, 1920)
        # cap.set(4, 1080)
        while True:

            start = time.perf_counter()

            _, frame = cap.read()
            img_rgb = cv2.resize(frame, (width, height))
            img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
            img_in = img_rgb.reshape((1, width, height, 3)) / 255.
            #img, img_ori = read_img(img_name, width, height)

            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label],
                                           feed_dict={inputs: img_in})

            end = time.perf_counter()

            print("time:%f s" % (end - start))

            frame = tools.draw_img(frame, boxes, score, label, word_dict,
                                   color_table)

            cv2.imshow('img', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
Exemplo n.º 3
0
def main():
    yolo = YOLO(config.voc_class_num, config.voc_anchors)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(
        feature_y1,
        feature_y2,
        feature_y3,
        config.voc_class_num,
        score_thresh=config.val_score_thresh,
        iou_thresh=config.iou_thresh,
        max_box=config.max_box)

    # 初始化
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(config.voc_model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            Log.add_log("message:存在 ckpt 模型:'" +
                        str(ckpt.model_checkpoint_path) + "'")
        else:
            Log.add_log("message:不存在 ckpt 模型")
            exit(1)

        # 名字字典
        word_dict = tools.get_word_dict(config.voc_names)
        # 色表
        color_table = tools.get_color_table(config.voc_class_num)

        width = config.width
        height = config.height

        for name in os.listdir(config.voc_test_dir):
            img_name = path.join(config.voc_test_dir, name)
            if not path.isfile(img_name):
                print("'%s'不是图片" % img_name)
                continue

            start = time.perf_counter()

            img, img_ori = read_img(img_name, width, height)
            if img is None:
                Log.add_log("message:'" + str(img) + "'图片读取错误")
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label],
                                           feed_dict={inputs: img})

            end = time.perf_counter()
            print("%s\t, time:%f s" % (img_name, end - start))

            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict,
                                     color_table)

            cv2.imshow('img', img_ori)
            cv2.waitKey(0)

            if config.save_img:
                save_img(img_ori, name)