예제 #1
0
def main():
    yolo = YOLO(class_num, anchors)

    # Placeholder:0
    inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    # concat_9:0, concat_10:0, concat_11:0
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(feature_y1, feature_y2, feature_y3, class_num, 
                                                                                                score_thresh=config.val_score_thresh, iou_thresh=config.iou_thresh, max_box=config.max_box)

    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(ckpt_file_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            Log.add_log("message: ckpt model:'"+str(ckpt.model_checkpoint_path)+"'")
        else:
            Log.add_log("message:no ckpt model")
            exit(1)

        # save  PB model
        out_graph = tf.graph_util.convert_variables_to_constants(sess,sess.graph_def,['Placeholder','yolo/Conv_1/BiasAdd', 'yolo/Conv_9/BiasAdd', 'yolo/Conv_17/BiasAdd', 'concat_9', 'concat_10', 'concat_11'])  # "yolo/Conv_13/BiasAdd"
        saver_path = tf.train.write_graph(out_graph,"",pd_dir,as_text=False)
        print("saver path: ",saver_path)
예제 #2
0
def main():
    yolo = YOLO()

    inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, class_num, isTrain=False)
    pre_boxes, pre_score, pre_label = get_predict_result(feature_y1, feature_y2, feature_y3,
                                                                                                anchors[2], anchors[1], anchors[0], 
                                                                                                width, height, class_num, 
                                                                                                score_thresh=score_thresh, 
                                                                                                iou_thresh=iou_thresh,
                                                                                                max_box=max_box)

    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            Log.add_log("message: load ckpt model:'"+str(ckpt.model_checkpoint_path)+"'")
        else:
            Log.add_log("message:can not find  ckpt model")
            # exit(1)
            # assert(0)
        
        # dictionary of name of corresponding id
        word_dict = tools.get_word_dict(name_file)
        # dictionary of per names
        color_table = tools.get_color_table(class_num)
        
        for name in os.listdir(val_dir):
            img_name = path.join(val_dir, name)
            if not path.isfile(img_name):
                print("'%s' is not file" %img_name)
                continue

            img, nw, nh, img_ori, show_img = read_img(img_name, width, height)
            if img is None:
                Log.add_log("message:'"+str(img)+"' is None")
                continue

            start = time.perf_counter()
            
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label], feed_dict={inputs:img})
            
            end = time.perf_counter()
            print("%s\t, time:%f s" %(img_name, end-start))
           
            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict, color_table)
            cv2.imshow('img', img_ori)
            cv2.waitKey(0)

            if save_img:
                write_img(img_ori, name)
            pass
def main():
    anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
    yolo = YOLO(80, anchors)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(feature_y1, feature_y2, feature_y3, 80, 
                                                                                                score_thresh=config.val_score_thresh, iou_thresh=config.iou_thresh, max_box=config.max_box)

    # 初始化
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state("./yolo_weights")
        if ckpt and ckpt.model_checkpoint_path:
            print("restore: ", ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            exit(1)

        # 名字字典
        word_dict = tools.get_word_dict("./data/coco.names")
        # 色表
        color_table = tools.get_color_table(80)

        width = 608
        height = 608
        
        val_dir = "./coco_test_img"
        for name in os.listdir(val_dir):
            img_name = path.join(val_dir, name)
            if not path.isfile(img_name):
                print("'%s'不是图片" %img_name)
                continue

            start = time.perf_counter()

            img, img_ori = read_img(img_name, width, height)
            if img is None:
                continue
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label], feed_dict={inputs:img})
            
            end = time.perf_counter()
            print("%s\t, time:%f s" %(img_name, end-start))

            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict, color_table)

            cv2.imshow('img', img_ori)
            cv2.waitKey(0)

            save_img(img_ori, name)
예제 #4
0
def main():
    yolo = YOLO(config.class_num, config.anchors)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(feature_y1, feature_y2, feature_y3, config.class_num, 
                                                                                                score_thresh=config.val_score_thresh, iou_thresh=config.iou_thresh, max_box=config.max_box)

    # 初始化
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(config.model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            Log.add_log("message:存在 ckpt 模型:'"+str(ckpt.model_checkpoint_path)+"'")
        else:
            Log.add_log("message:不存在 ckpt 模型")
            # exit(1)

        # 名字字典
        word_dict = tools.get_word_dict(config.name_file)
        # 色表
        color_table = tools.get_color_table(config.class_num)

        width = config.width
        height = config.height
        
        for name in os.listdir(config.val_dir):
            img_name = path.join(config.val_dir, name)
            if not path.isfile(img_name):
                print("'%s'不是图片" %img_name)
                continue

            start = time.perf_counter()

            img, img_ori = read_img(img_name, width, height)
            if img is None:
                Log.add_log("message:'"+str(img)+"'图片读取错误")
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label], feed_dict={inputs:img})
            
            end = time.perf_counter()
            print("%s\t, time:%f s" %(img_name, end-start))

            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict, color_table)

            cv2.imshow('img', img_ori)
            cv2.waitKey(0)

            if config.save_img:
                save_img(img_ori, name)
예제 #5
0
def convert_weight(model_path, output_dir, size=608):
    save_path = os.path.join(output_dir, 'YOLO_v4_' + str(size) + '.ckpt')
    class_num = 80
    yolo = YOLO()
    with tf.Session() as sess:
        tf_input = tf.placeholder(tf.float32, [1, size, size, 3])

        feature = yolo.forward(tf_input, class_num, isTrain=False)

        saver = tf.train.Saver(var_list=tf.global_variables())

        load_ops = load_weights(tf.global_variables(), model_path)
        sess.run(load_ops)
        saver.save(sess, save_path=save_path)
        print('YOLO v4 weights have been transformed to {}'.format(save_path))
예제 #6
0
def backward():
    yolo = YOLO(config.class_num,
                config.anchors,
                width=config.width,
                height=config.height)
    data = Data(config.train_file,
                config.class_num,
                config.batch_size,
                config.anchors,
                config.multi_scale_img,
                width=config.width,
                height=config.height)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[config.batch_size, None, None, 3])
    y1_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + config.class_num])
    y2_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + config.class_num])
    y3_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + config.class_num])

    feature_y1, feature_y2, feature_y3 = yolo.forward(
        inputs, weight_decay=config.weight_decay, isTrain=True)

    global_step = tf.Variable(0, trainable=False)

    # 损失 yolov4
    loss = yolo.get_loss_v4(feature_y1, feature_y2, feature_y3, y1_true,
                            y2_true, y3_true, config.cls_normalizer,
                            config.ignore_thresh, config.prob_thresh,
                            config.score_thresh)
    l2_loss = tf.compat.v1.losses.get_regularization_loss()

    epoch = compute_curr_epoch(global_step, config.batch_size,
                               len(data.imgs_path))
    lr = config_lr(config.lr_type, config.lr_init, epoch)
    optimizer = config_optimizer(config.optimizer_type, lr, config.momentum)

    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        gvs = optimizer.compute_gradients(loss + l2_loss)
        clip_grad_var = [
            gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]]
            for gv in gvs
        ]
        train_step = optimizer.apply_gradients(clip_grad_var,
                                               global_step=global_step)

    # 初始化
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.compat.v1.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        step = 0

        ckpt = tf.compat.v1.train.get_checkpoint_state(config.model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            step = eval(step)
            Log.add_log("message:存在 ckpt 模型, global_step=" + str(step))
        else:
            Log.add_log("message:不存在 ckpt 模型")

        # 一共迭代这么多次
        total_steps = np.ceil(config.total_epoch * len(data.imgs_path) /
                              config.batch_size)
        while step < total_steps:
            start = time.perf_counter()
            batch_img, y1, y2, y3 = next(data)
            _, loss_, step, lr_ = sess.run([train_step, loss, global_step, lr],
                                           feed_dict={
                                               inputs: batch_img,
                                               y1_true: y1,
                                               y2_true: y2,
                                               y3_true: y3
                                           })
            end = time.perf_counter()
            print(
                "step: %6d, loss: %.5g\t, w: %3d, h: %3d, lr:%.5g\t, time: %5f s"
                % (step, loss_, data.width, data.height, lr_, end - start))

            if step % 5 == 2:
                Log.add_loss(str(step) + "\t" + str(loss_))

            if (step + 1) % config.save_step == 0:
                Log.add_log("message:当前运行模型保存, step=" + str(step) + ", lr=" +
                            str(lr_))
                saver.save(sess,
                           path.join(config.model_path, config.model_name),
                           global_step=0)

        Log.add_log("message:训练完成保存模型, step=" + str(step))
        saver.save(sess,
                   path.join(config.model_path, config.model_name),
                   global_step=step)
    return 0
def main():
    class_num = 80
    width = 608
    height = 608
    score_thresh = 0.5
    iou_thresh = 0.213
    max_box = 50
    anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
    anchors = np.asarray(anchors).astype(np.float32).reshape([-1, 3, 2])
    model_dir = "./yolo_weights"
    name_file = "./data/coco.names"
    val_dir = "./coco_test_img"

    yolo = YOLO()
    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs,
                                                      class_num,
                                                      isTrain=False)
    pre_boxes, pre_score, pre_label = get_predict_result(
        feature_y1,
        feature_y2,
        feature_y3,
        anchors[2],
        anchors[1],
        anchors[0],
        width,
        height,
        class_num,
        score_thresh=score_thresh,
        iou_thresh=iou_thresh,
        max_box=max_box)
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("restore model from ", ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("can not find ckpt model")
            assert (0)

        # id to names
        word_dict = tools.get_word_dict(name_file)
        # color of corresponding names
        color_table = tools.get_color_table(class_num)

        for name in os.listdir(val_dir):
            img_name = path.join(val_dir, name)
            if not path.isfile(img_name):
                print("'%s' is not a file" % img_name)
                continue

            start = time.perf_counter()

            img, img_ori = read_img(img_name, width, height)
            if img is None:
                continue
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label],
                                           feed_dict={inputs: img})

            end = time.perf_counter()
            print("%s\t, time:%f s" % (img_name, end - start))

            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict,
                                     color_table)

            cv2.imshow('img', img_ori)
            cv2.waitKey(0)

            save_img(img_ori, name)
예제 #8
0
def backward():
    class_num = config.voc_class_num
    yolo = YOLO(class_num,
                config.voc_anchors,
                width=config.width,
                height=config.height)
    data = Data(config.voc_root_dir,
                config.voc_dir_ls,
                "./data/voc.names",
                class_num,
                config.batch_size,
                config.voc_anchors,
                config.data_augment,
                config.width,
                config.height,
                data_debug=config.data_debug)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[config.batch_size, None, None, 3])
    y1_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + class_num])
    y2_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + class_num])
    y3_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + class_num])

    feature_y1, feature_y2, feature_y3 = yolo.forward(
        inputs, weight_decay=config.weight_decay, isTrain=True)

    global_step = tf.Variable(0, trainable=False)

    # 损失 yolov4
    loss = yolo.get_loss_v4(feature_y1, feature_y2, feature_y3, y1_true,
                            y2_true, y3_true, config.cls_normalizer,
                            config.ignore_thresh, config.prob_thresh,
                            config.score_thresh)
    l2_loss = tf.compat.v1.losses.get_regularization_loss()

    epoch = compute_curr_epoch(global_step, config.batch_size, data.num_imgs)
    lr = config_lr(config.lr_type, config.lr_init, epoch)
    optimizer = config_optimizer(config.optimizer_type, lr, config.momentum)

    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        gvs = optimizer.compute_gradients(loss + l2_loss)
        clip_grad_var = [
            gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]]
            for gv in gvs
        ]
        train_step = optimizer.apply_gradients(clip_grad_var,
                                               global_step=global_step)

    # 初始化
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.compat.v1.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        step = 0

        ckpt = tf.compat.v1.train.get_checkpoint_state(config.voc_model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            step = eval(step)
            Log.add_log("message:存在 ckpt 模型, global_step=" + str(step))
        else:
            Log.add_log("message:不存在 ckpt 模型")

        # 一共迭代这么多次
        total_steps = np.ceil(config.total_epoch * data.num_imgs /
                              config.batch_size)
        while step < total_steps:
            start = time.perf_counter()
            batch_img, y1, y2, y3 = next(data)
            _, loss_, step, lr_ = sess.run([train_step, loss, global_step, lr],
                                           feed_dict={
                                               inputs: batch_img,
                                               y1_true: y1,
                                               y2_true: y2,
                                               y3_true: y3
                                           })
            end = time.perf_counter()
            print(
                "step: %6d, loss: %.5g\t, w: %3d, h: %3d, lr:%.5g\t, time: %5f s"
                % (step, loss_, data.width, data.height, lr_, end - start))

            if (loss_ > 1e3) and (step > 1e3):
                Log.add_log("error:loss exception, loss_value = " + str(loss_))
                ''' break the process or lower learning rate '''
                raise ValueError("error:loss exception, loss_value = " +
                                 str(loss_) +
                                 ", please lower your learning rate")
                # lr = tf.math.maximum(tf.math.divide(lr, 10), config.lr_lower)

            if step % 5 == 2:
                Log.add_loss(str(step) + "\t" + str(loss_))

            if (step + 1) % config.save_step == 0:
                if config.save_ckpt_model:
                    # save ckpt model
                    Log.add_log("message: save ckpt model, step=" + str(step) +
                                ", lr=" + str(lr_))
                    saver.save(sess,
                               path.join(config.voc_model_path,
                                         config.voc_model_name),
                               global_step=step)
                if config.save_pb_model:
                    Log.add_log("message: save pb model, step=" + str(step) +
                                ", lr=" + str(lr_))
                    pb_model_name = path.join(
                        config.voc_model_path,
                        config.voc_model_name) + '-' + str(step) + ".pb"
                    constant_graph = graph_util.convert_variables_to_constants(
                        sess, sess.graph_def, [
                            'yolo/Conv_1/BiasAdd', 'yolo/Conv_9/BiasAdd',
                            'yolo/Conv_17/BiasAdd'
                        ])
                    # save  PB model
                    with tf.gfile.FastGFile(pb_model_name, mode='wb') as f:
                        f.write(constant_graph.SerializeToString())

        # save ckpt model
        if config.save_ckpt_model:
            Log.add_log("message: save final ckpt model, step=" + str(step))
            saver.save(sess,
                       path.join(config.voc_model_path, config.voc_model_name),
                       global_step=step)

        # save pb model
        if config.save_pb_model:
            Log.add_log("message: save final pb model, step=" + str(step))
            pb_model_name = path.join(
                config.voc_model_path,
                config.voc_model_name) + '-' + str(step) + ".pb"
            constant_graph = graph_util.convert_variables_to_constants(
                sess, sess.graph_def, [
                    'yolo/Conv_1/BiasAdd', 'yolo/Conv_9/BiasAdd',
                    'yolo/Conv_17/BiasAdd'
                ])
            # save  PB model
            with tf.gfile.FastGFile(pb_model_name, mode='wb') as f:
                f.write(constant_graph.SerializeToString())
    return 0
예제 #9
0
def backward():
    yolo = YOLO()
    data = Data(voc_root_dir,
                names_file,
                class_num,
                batch_size,
                anchors,
                is_tiny=False,
                size=size)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[batch_size, None, None, 3])
    y1_true = tf.compat.v1.placeholder(
        dtype=tf.float32, shape=[batch_size, None, None, 3, 4 + 1 + class_num])
    y2_true = tf.compat.v1.placeholder(
        dtype=tf.float32, shape=[batch_size, None, None, 3, 4 + 1 + class_num])
    y3_true = tf.compat.v1.placeholder(
        dtype=tf.float32, shape=[batch_size, None, None, 3, 4 + 1 + class_num])

    feature_y1, feature_y2, feature_y3 = yolo.forward(
        inputs, class_num, weight_decay=weight_decay, isTrain=True)

    global_step = tf.Variable(0, trainable=False)

    # loss value of yolov4
    loss = Loss().yolo_loss([feature_y1, feature_y2, feature_y3],
                            [y1_true, y2_true, y3_true],
                            [anchors[2], anchors[1], anchors[0]],
                            width,
                            height,
                            class_num,
                            cls_normalizer=cls_normalizer,
                            iou_normalizer=iou_normalizer,
                            iou_thresh=iou_thresh,
                            prob_thresh=prob_thresh,
                            score_thresh=score_thresh)
    l2_loss = tf.compat.v1.losses.get_regularization_loss()

    epoch = compute_curr_epoch(global_step, batch_size, len(data.imgs_path))
    lr = Lr.config_lr(lr_type, lr_init, lr_lower=lr_lower, \
                                        piecewise_boundaries=piecewise_boundaries, \
                                        piecewise_values=piecewise_values, epoch=epoch)
    optimizer = Optimizer.config_optimizer(optimizer_type, lr, momentum)

    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        gvs = optimizer.compute_gradients(loss + l2_loss)
        clip_grad_var = [
            gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]]
            for gv in gvs
        ]
        train_step = optimizer.apply_gradients(clip_grad_var,
                                               global_step=global_step)

    # initialize
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.compat.v1.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        step = 0

        ckpt = tf.compat.v1.train.get_checkpoint_state(model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            step = eval(step)
            Log.add_log("message: load ckpt model, global_step=" + str(step))
        else:
            Log.add_log("message:can not fint ckpt model")

        curr_epoch = step // data.steps_per_epoch
        while curr_epoch < total_epoch:
            for _ in range(data.steps_per_epoch):
                start = time.perf_counter()
                batch_img, y1, y2, y3 = next(data)
                _, loss_, step, lr_ = sess.run(
                    [train_step, loss, global_step, lr],
                    feed_dict={
                        inputs: batch_img,
                        y1_true: y1,
                        y2_true: y2,
                        y3_true: y3
                    })
                end = time.perf_counter()

                if (loss_ > 1e3) and (step > 1e3):
                    Log.add_log("error:loss exception, loss_value = " +
                                str(loss_))
                    ''' break the process or lower learning rate '''
                    raise ValueError("error:loss exception, loss_value = " +
                                     str(loss_) +
                                     ", please lower your learning rate")
                    # lr = tf.math.maximum(tf.math.divide(lr, 10), config.lr_lower)

                if step % 5 == 2:
                    print(
                        "step: %6d, epoch: %3d, loss: %.5g\t, wh: %3d, lr:%.5g\t, time: %5f s"
                        % (step, curr_epoch, loss_, width, lr_, end - start))
                    Log.add_loss(str(step) + "\t" + str(loss_))

            curr_epoch += 1
            if curr_epoch % save_per_epoch == 0:
                # save ckpt model
                Log.add_log("message: save ckpt model, step=" + str(step) +
                            ", lr=" + str(lr_))
                saver.save(sess,
                           path.join(model_path, model_name),
                           global_step=step)

        Log.add_log("message: save final ckpt model, step=" + str(step))
        saver.save(sess, path.join(model_path, model_name), global_step=step)

    return 0
예제 #10
0
def main():
    yolo = YOLO(config.class_num, config.anchors)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(
        feature_y1,
        feature_y2,
        feature_y3,
        config.class_num,
        score_thresh=config.val_score_thresh,
        iou_thresh=config.iou_thresh,
        max_box=config.max_box)

    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state(config.model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            Log.add_log("message: load ckpt model:'" +
                        str(ckpt.model_checkpoint_path) + "'")
        else:
            Log.add_log("message: can not fint ckpt model")
            # exit(1)

        # dictionary of name of corresponding id
        word_dict = tools.get_word_dict(config.name_file)
        # dictionary of per names
        color_table = tools.get_color_table(config.class_num)

        width = config.width
        height = config.height

        for name in os.listdir(config.val_dir):
            img_name = path.join(config.val_dir, name)
            if not path.isfile(img_name):
                print("'%s' is not file" % img_name)
                continue

            start = time.perf_counter()

            img, nw, nh, img_ori, show_img = read_img(img_name, width, height)
            if img is None:
                Log.add_log("message:'" + str(img) + "' is None")
            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label],
                                           feed_dict={inputs: img})

            end = time.perf_counter()
            print("%s\t, time:%f s" % (img_name, end - start))

            # show_img = tools.draw_img(show_img, boxes, score, label, word_dict, color_table)
            # cv2.imshow('img', show_img)
            # cv2.waitKey(0)
            if config.keep_img_shape:
                # 纠正坐标
                dw = (width - nw) / 2
                dh = (height - nh) / 2
                for i in range(len(boxes)):
                    boxes[i][0] = (boxes[i][0] * width - dw) / nw
                    boxes[i][1] = (boxes[i][1] * height - dh) / nh
                    boxes[i][2] = (boxes[i][2] * width - dw) / nw
                    boxes[i][3] = (boxes[i][3] * height - dh) / nh
            img_ori = tools.draw_img(img_ori, boxes, score, label, word_dict,
                                     color_table)
            cv2.imshow('img_ori', img_ori)
            cv2.waitKey(0)

            if config.save_img:
                save_img(img_ori, name)
            pass
예제 #11
0
import os
import sys
import tensorflow as tf
import numpy as np
import config
from src.YOLO import YOLO

from utils.misc_utils import load_weights

weight_path = './yolo_weights/yolov4.weights'
save_path = './yolo_weights/yolov4.ckpt'
#anchors = [10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326]     #for yolov4-416
anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]       # for yolov4.weights
class_num = 80      # for yolov4.weights

yolo = YOLO(class_num, anchors,width=608, height=608)
with tf.Session() as sess:
    inputs = tf.placeholder(tf.float32, [1, 608, 608, 3])

    feature = yolo.forward(inputs, isTrain=False)

    saver = tf.train.Saver(var_list=tf.global_variables())

    load_ops = load_weights(tf.global_variables(), weight_path)
    sess.run(load_ops)
    saver.save(sess, save_path=save_path)
    print('TensorFlow model checkpoint has been saved to {}'.format(save_path))
    

예제 #12
0
    def __init__(self,model_path,GPU_ratio=0.2):
        #----var
        JB_flow = 0
        class_num = 80
        height = 608#416, 608
        width = 608#416, 608
        score_thresh = 0.5
        iou_thresh = 0.213
        max_box = 50
        anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
        anchors = np.asarray(anchors).astype(np.float32).reshape([-1, 3, 2])
        name_file = "./coco.names"

        node_dict = {"input": "Placeholder:0",
                     "pre_boxes": "concat_9:0",
                     "pre_score": "concat_10:0",
                     "pre_label": "concat_11:0",
                     }

        #----model extension check
        if model_path[-2:] == 'pb':
            sess, tf_dict = model_restore_from_pb(model_path, node_dict,GPU_ratio=GPU_ratio)
            tf_input = tf_dict['input']
            tf_pre_boxes = tf_dict["pre_boxes"]
            tf_pre_score = tf_dict['pre_score']
            tf_pre_label = tf_dict['pre_label']
        else:
            width = int(model_path.split("\\")[-1].split(".")[0].split("_")[-1])  # 416, 608
            height = width  # 416, 608
            yolo = YOLO()
            tf_input = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, None, None, 3])

            feature_y1, feature_y2, feature_y3 = yolo.forward(tf_input, class_num, isTrain=False)
            tf_pre_boxes, tf_pre_score, tf_pre_label = get_predict_result(feature_y1, feature_y2, feature_y3,
                                                                 anchors[2], anchors[1], anchors[0],
                                                                 width, height, class_num,
                                                                 score_thresh=score_thresh,
                                                                 iou_thresh=iou_thresh,
                                                                 max_box=max_box)
            init = tf.compat.v1.global_variables_initializer()

            saver = tf.train.Saver()
            #----GPU ratio setting
            config = tf.ConfigProto(log_device_placement=True,  # 印出目前的運算是使用CPU或GPU
                                    allow_soft_placement=True,  # 當設備不存在時允許tf選擇一个存在且可用的設備來繼續執行程式
                                    )
            if GPU_ratio is None:
                config.gpu_options.allow_growth = True  # 依照程式執行所需要的資料來自動調整
            else:
                config.gpu_options.per_process_gpu_memory_fraction = GPU_ratio  # 手動限制GPU資源的使用
            sess = tf.compat.v1.Session(config=config)
            sess.run(init)
            saver.restore(sess, model_path[:-5])

        print("JB> Height: {}, width: {}".format(height, width))

        #----label to class name
        label_dict = tools.get_word_dict(name_file)

        #----color of corresponding names
        color_table = tools.get_color_table(class_num)


        #----local var to global
        self.JB_flow = JB_flow # show flow
        self.width = width
        self.height = height
        self.tf_input = tf_input
        self.pre_boxes = tf_pre_boxes
        self.pre_score = tf_pre_score
        self.pre_label = tf_pre_label
        self.sess = sess
        self.label_dict = label_dict
        self.color_table = color_table
예제 #13
0
def backward():
    yolo = YOLO(config.class_num,
                config.anchors,
                width=config.width,
                height=config.height)
    data = Data(config.train_file,
                config.class_num,
                config.batch_size,
                config.anchors,
                config.data_augment,
                width=config.width,
                height=config.height,
                data_debug=config.data_debug)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[config.batch_size, None, None, 3])
    y1_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + config.class_num])
    y2_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + config.class_num])
    y3_true = tf.compat.v1.placeholder(
        dtype=tf.float32,
        shape=[config.batch_size, None, None, 3, 4 + 1 + config.class_num])

    feature_y1, feature_y2, feature_y3 = yolo.forward(
        inputs, weight_decay=config.weight_decay, isTrain=True)

    global_step = tf.Variable(0, trainable=False)

    # loss value of yolov4
    loss = yolo.get_loss_v4(feature_y1, feature_y2, feature_y3, y1_true,
                            y2_true, y3_true, config.cls_normalizer,
                            config.ignore_thresh, config.prob_thresh,
                            config.score_thresh)
    l2_loss = tf.compat.v1.losses.get_regularization_loss()

    epoch = compute_curr_epoch(global_step, config.batch_size,
                               len(data.imgs_path))
    lr = config_lr(config.lr_type, config.lr_init, epoch)
    optimizer = config_optimizer(config.optimizer_type, lr, config.momentum)

    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        gvs = optimizer.compute_gradients(loss + l2_loss)
        clip_grad_var = [
            gv if gv[0] is None else [tf.clip_by_norm(gv[0], 100.), gv[1]]
            for gv in gvs
        ]
        train_step = optimizer.apply_gradients(clip_grad_var,
                                               global_step=global_step)

    # initialize
    init = tf.compat.v1.global_variables_initializer()

    saver = tf.compat.v1.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        step = 0

        ckpt = tf.compat.v1.train.get_checkpoint_state(config.model_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            step = eval(step)
            Log.add_log("message: load ckpt model, global_step=" + str(step))
        else:
            Log.add_log("message: can not find ckpt model")

        total_steps = np.ceil(config.total_epoch * len(data.imgs_path) /
                              config.batch_size)
        while step < total_steps:
            start = time.perf_counter()
            batch_img, y1, y2, y3 = next(data)
            _, loss_, step, lr_ = sess.run([train_step, loss, global_step, lr],
                                           feed_dict={
                                               inputs: batch_img,
                                               y1_true: y1,
                                               y2_true: y2,
                                               y3_true: y3
                                           })
            end = time.perf_counter()
            print(
                "step: %6d, loss: %.5g\t, w: %3d, h: %3d, lr:%.5g\t, time: %5f s"
                % (step, loss_, data.width, data.height, lr_, end - start))

            if (loss_ > 1e3) and (step > 1e3):
                Log.add_log("error:loss exception, loss_value = " + str(loss_))
                ''' break the process or lower learning rate '''
                raise ValueError("error:loss exception, loss_value = " +
                                 str(loss_) +
                                 ", please lower your learning rate")
                # lr = tf.math.maximum(tf.math.divide(lr, 10), config.lr_lower)

            if step % 5 == 2:
                Log.add_loss(str(step) + "\t" + str(loss_))

            if (step + 1) % config.save_step == 0:
                # save ckpt model
                Log.add_log("message: save ckpt model, step=" + str(step) +
                            ", lr=" + str(lr_))
                saver.save(sess,
                           path.join(config.model_path, config.model_name),
                           global_step=step)

        # save ckpt model
        Log.add_log("message:save final ckpt model, step=" + str(step))
        saver.save(sess,
                   path.join(config.model_path, config.model_name),
                   global_step=step)

    return 0
예제 #14
0
def main():
    anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401  #608 anchors
    #anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    yolo = YOLO(80, anchors, width=416, height=416)

    inputs = tf.compat.v1.placeholder(dtype=tf.float32,
                                      shape=[1, None, None, 3])
    feature_y1, feature_y2, feature_y3 = yolo.forward(inputs, isTrain=False)
    pre_boxes, pre_score, pre_label = yolo.get_predict_result(
        feature_y1,
        feature_y2,
        feature_y3,
        80,
        score_thresh=config.val_score_thresh,
        iou_thresh=config.iou_thresh,
        max_box=config.max_box)

    init = tf.compat.v1.global_variables_initializer()

    saver = tf.train.Saver()
    with tf.compat.v1.Session() as sess:
        sess.run(init)
        ckpt = tf.compat.v1.train.get_checkpoint_state("./yolo_weights")
        if ckpt and ckpt.model_checkpoint_path:
            print("restore: ", ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            exit(1)

        # id to names
        word_dict = tools.get_word_dict("./data/coco.names")
        # color of corresponding names
        color_table = tools.get_color_table(80)

        width = 416
        height = 416

        cap = cv2.VideoCapture(0)
        cap.set(6, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
        # cap.set(3, 1920)
        # cap.set(4, 1080)
        while True:

            start = time.perf_counter()

            _, frame = cap.read()
            img_rgb = cv2.resize(frame, (width, height))
            img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
            img_in = img_rgb.reshape((1, width, height, 3)) / 255.
            #img, img_ori = read_img(img_name, width, height)

            boxes, score, label = sess.run([pre_boxes, pre_score, pre_label],
                                           feed_dict={inputs: img_in})

            end = time.perf_counter()

            print("time:%f s" % (end - start))

            frame = tools.draw_img(frame, boxes, score, label, word_dict,
                                   color_table)

            cv2.imshow('img', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
예제 #15
0
import os
import sys
import tensorflow as tf
import numpy as np
import config
from src.YOLO import YOLO

from utils.misc_utils import load_weights

weight_path = './yolo_weights/yolov4.weights'
save_path = './yolo_weights/yolov4.ckpt'
anchors = [
    12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459,
    401
]  # for yolov4.weights
class_num = 80  # for yolov4.weights

yolo = YOLO()
with tf.Session() as sess:
    inputs = tf.placeholder(tf.float32, [1, 608, 608, 3])

    feature = yolo.forward(inputs, class_num, isTrain=False)

    saver = tf.train.Saver(var_list=tf.global_variables())

    load_ops = load_weights(tf.global_variables(), weight_path)
    sess.run(load_ops)
    saver.save(sess, save_path=save_path)
    print('TensorFlow model checkpoint has been saved to {}'.format(save_path))