def __init__(self):
        self.input_size = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = {0: 'front', 1: 'rear'}
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.annotation_path = cfg.TEST.ANNOT_PATH
        self.weight_file = cfg.TEST.WEIGHT_FILE
        self.write_image = cfg.TEST.WRITE_IMAGE
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        self.test_video_path = cfg.TEST.TEST_VIDEO_PATH
        self.predict_video_path = cfg.TEST.PREDICT_VIDEO_PATH

        with tf.name_scope('input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             shape=[1, 544, 544, 3],
                                             name='input_data')
            self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')

        model = YOLOV3(self.input_data, self.trainable, 1)
        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
        self.conv_sbbox, self.conv_mbbox, self.conv_lbbox = model.conv_sbbox, model.conv_mbbox, model.conv_lbbox
        with tf.name_scope('ema'):
            ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.weight_file)
Ejemplo n.º 2
0
def create_pb(ckpt_file):

    pb_file = "./yolov3_coco.pb"

    output_node_names = [
        "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2",
        "pred_lbbox/concat_2"
    ]

    with tf.name_scope('input'):
        input_data = tf.placeholder(dtype=tf.float32, name='input_data')

    model = YOLOV3(input_data, trainable=False)
    print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    saver = tf.train.Saver()
    saver.restore(sess, ckpt_file)

    converted_graph_def = tf.graph_util.convert_variables_to_constants(
        sess,
        input_graph_def=sess.graph.as_graph_def(),
        output_node_names=output_node_names)

    with tf.gfile.GFile(pb_file, "wb") as f:
        f.write(converted_graph_def.SerializeToString())
Ejemplo n.º 3
0
    def __init__(self):
        self.input_size = cfg.TEST.INPUT_SIZE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.weight_file = cfg.TEST.WEIGHT_FILE
        self.show_label = cfg.TEST.SHOW_LABEL

        with tf.name_scope('input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')

        model = YOLOV3(self.input_data, self.trainable)
        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox

        with tf.name_scope('ema'):
            ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        self.saver.restore(self.sess, self.weight_file)
Ejemplo n.º 4
0
    def __init__(self):
        self.input_size       = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes          = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes      = len(self.classes)
        self.anchors          = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.score_threshold  = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold    = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.annotation_path  = cfg.TEST.ANNOT_PATH
        self.weight_file      = cfg.TEST.WEIGHT_FILE
        self.write_image      = cfg.TEST.WRITE_IMAGE
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        self.show_label       = cfg.TEST.SHOW_LABEL

        with tf.name_scope('input'):
            self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
            self.trainable  = tf.placeholder(dtype=tf.bool,    name='trainable')

        model = YOLOV3() #(self.input_data, self.trainable)
        sqback = backbone.squeezenet('sqz_full.mat')
        sqback.forward(imgs=self.input_data, trainable=False)

        model.forward(sqback, False)

        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox

        with tf.name_scope('ema'):
            ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        self.sess  = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        self.saver.restore(self.sess, self.weight_file)
def main(export_path='/tmp/yolo/1'):

    with tf.name_scope('input'):
        input_data = tf.placeholder(dtype=tf.float32,
                                    name='input_data',
                                    shape=(1, 416, 416, 3))

    model = YOLOV3(input_data, trainable=False)
    print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    saver = tf.train.Saver()
    saver.restore(sess, ckpt_file)

    converted_graph_def = tf.graph_util.convert_variables_to_constants(
        sess,
        input_graph_def=sess.graph.as_graph_def(),
        output_node_names=output_node_names)

    tf.saved_model.simple_save(
        sess,
        export_path,
        inputs={'input_data': input_data},
        outputs={
            'conv_sbbox':
            tf.get_default_graph().get_tensor_by_name('pred_sbbox/concat_2:0'),
            'conv_mbbox':
            tf.get_default_graph().get_tensor_by_name('pred_mbbox/concat_2:0'),
            'conv_lbbox':
            tf.get_default_graph().get_tensor_by_name('pred_lbbox/concat_2:0'),
        },
    )
Ejemplo n.º 6
0
def main(_):
    with tf.name_scope('input'):
        input_data = tf.placeholder(dtype=tf.float32, name='input_data')

    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'x': tf.FixedLenFeature(shape=[], dtype=tf.float32),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)

    tf_example['x'] = tf.reshape(tf_example['x'], (1, 416, 416, 3))
    input_tensor = tf.identity(tf_example['x'],
                               name='x')  # use tf.identity() to assign name

    model = YOLOV3(input_data, trainable=False)
    print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    saver = tf.train.Saver()
    saver.restore(sess, ckpt_file)

    # Create SavedModelBuilder class
    # defines where the model will be exported
    export_path_base = FLAGS.export_model_dir
    export_path = os.path.join(tf.compat.as_bytes(export_path_base),
                               tf.compat.as_bytes(str(FLAGS.model_version)))
    print('Exporting trained model to', export_path)

    builder = tf.saved_model.builder.SavedModelBuilder(export_path)

    tensor_info_input = tf.saved_model.utils.build_tensor_info(input_tensor)
    tensor_conv_sbbox_output = tf.saved_model.utils.build_tensor_info(
        model.conv_sbbox)
    tensor_conv_mbbox_output = tf.saved_model.utils.build_tensor_info(
        model.conv_mbbox)
    tensor_conv_lbbox_output = tf.saved_model.utils.build_tensor_info(
        model.conv_lbbox)

    prediction_signature = (
        tf.saved_model.signature_def_utils.build_signature_def(
            inputs={'images': tensor_info_input},
            outputs={
                'conv_sbbox': tensor_conv_sbbox_output,
                'conv_mbbox': tensor_conv_mbbox_output,
                'conv_lbbox': tensor_conv_lbbox_output
            },
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
    )

    builder.add_meta_graph_and_variables(
        sess, [tf.saved_model.tag_constants.SERVING],
        signature_def_map={
            'predict_images': prediction_signature,
        })

    # export the model
    builder.save(as_text=True)
    print('Done exporting!')
def main(argv=None):
    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(None, 608, 608, 3),
                                name='inputs')
    model = YOLOV3(input_data, trainable=False, tiny=FLAGS.tiny)
    load_ops = load_weights(tf.global_variables(), FLAGS.weights_file)

    with tf.Session() as sess:
        sess.run(load_ops)
        freeze_graph(sess, FLAGS.output_graph)
    def __init__(self):
        self.input_size = 416
        self.anchor_per_scale = 3

        self.path1 = r"./data/classes/antenna.names"  #修改

        self.classes = utils.read_class_names(self.path1)
        self.num_classes = len(self.classes)

        self.path2 = r"./data/anchors/basline_anchors.txt"

        self.anchors = np.array(utils.get_anchors(self.path2))
        self.score_threshold = 0.4
        self.iou_threshold = 0.5
        self.moving_ave_decay = 0.9995

        self.path3 = "./data/dataset/antenna_train.txt"

        self.annotation_path = self.path3

        self.path4 = r'checkpoint1/yolov3_test_loss=0.9668.ckpt-705'
        # self.path4 = r'checkpoint/model/yolov3_test_loss=49.6633.ckpt-56'

        self.weight_file = self.path4
        self.write_image = True

        self.path5 = r"./data/detection1/"

        self.write_image_path = self.path5  # 是否将图片的预测结果保存
        self.show_label = True
        with tf.name_scope('input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
        print("in", self.input_data.shape)
        model = YOLOV3(self.input_data, self.trainable)
        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
        print(1 + 2)
        with tf.name_scope('ema'):
            ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        self.saver.restore(self.sess, self.weight_file)
Ejemplo n.º 9
0
def inference(input_data):

    model = YOLOV3(False, opts)
    # construct model
    # we will put whole network on one ipu
    layers = []
    # build layer functions for backbone and upsample
    layers.extend(model.build_backbone())
    # last layer of darknet53 is classification layer, so it have 52 conv layers
    assert len(layers) == 52
    layers.extend(model.build_upsample())
    # there is 25 conv layers if we count upsmaple as a conv layer
    assert len(layers) == 52 + 25
    # decoding layer and loss layer is always put on last IPU
    layers.append(model.decode_boxes)

    # reuse stages_constructor so we don't need to pass params by hand
    network_func = stages_constructor(
        [layers], ["input_data", "nums"],
        ["pred_sbbox", "pred_mbbox", "pred_lbbox", "nums"])[0]
    return network_func(input_data)
Ejemplo n.º 10
0
 def __init__(self, opts):
     """Create a training class
     The constructor init all needed parameters
     """
     self.opts = opts
     self.learn_rate_init = opts["train"]["learn_rate_init"]
     self.learn_rate_end = opts["train"]["learn_rate_end"]
     self.epochs = opts["train"]["epochs"]
     self.warmup_epochs = opts["train"]["warmup_epochs"]
     self.initial_weight = opts["train"]["initial_weight"]
     self.moving_avg_decay = opts["yolo"]["moving_avg_decay"]
     self.trainset = Dataset("train", self.opts)
     self.steps_per_epoch = len(
         self.trainset) / opts["train"]["total_replicas"]
     self.precision = tf.float16 if opts["yolo"][
         "precision"] == "fp16" else tf.float32
     self.model = YOLOV3(opts["train"]["bn_trainable"], opts)
     self.batch_size = opts["train"]["batch_size"]
     self.data_threads_number = opts["train"]["data_threads_number"]
     self.loss_scaling = opts["train"]["loss_scaling"]
     self.repeat_count = opts["train"]["repeat_count"]
     self.for_speed_test = opts["train"]["for_speed_test"]
Ejemplo n.º 11
0
    def __init__(self):
        self.input_size = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.annotation_path = cfg.TEST.ANNOT_PATH
        self.weight_file = cfg.TEST.WEIGHT_FILE
        self.write_image = cfg.TEST.WRITE_IMAGE
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        self.show_label = cfg.TEST.SHOW_LABEL

        # model_dir = os.path.abspath(os.path.dirname(self.weight_file))
        # self.weight_file = os.path.join(model_dir, 'yolov3_test_loss=8.1157.ckpt-24')
        # self.weight_file = tf.train.latest_checkpoint(model_dir)
        # print(self.weight_file)

        with tf.name_scope('input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')

        model = YOLOV3(self.input_data, self.trainable)
        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox

        # with tf.name_scope('ema'):
        #     ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        # self.sess = tf.Session()
        # self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        self.saver = tf.train.Saver(tf.global_variables())
        self.saver.restore(self.sess, self.weight_file)
Ejemplo n.º 12
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 100
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.input_image_with_bboxes = tf.placeholder(dtype=tf.float32,
                                                          name='input_image')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, trainable=True, tiny=TINY)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + 2 * self.conf_loss + 5 * self.prob_loss  # 增加权重,调整了一下3种loss的影响力度

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')

            # tf.cond : if pred, return fn1 ,else, return fn2 ; warm_up对应原版的burn_in(power=1),learn rate随着epoch线性增长 ,
            # 之后的learn rate更新策略是cos曲线下降
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            # ExponentialMovingAverage  滑动平均的方法更新参数
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            # 第一阶段训练:仅仅训练三个分支的最后卷积层
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)

            # tf.control_dependencies(), 指定某些操作执行的依赖关系,后面的操作要在()的操作之后执行
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            # 第二阶段训练:训练所有的层,其实也就是 finetunning
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('learning_rate'):
            tf.summary.scalar("learn_rate", self.learn_rate)
        with tf.name_scope('loss'):
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)
        with tf.name_scope('images'):
            tf.summary.image("input_data", self.input_data)
            tf.summary.image("input_image", self.input_image_with_bboxes)
        logdir = "./data/log/"
        if os.path.exists(logdir):
            shutil.rmtree(logdir)
        os.mkdir(logdir)
        self.write_op = tf.summary.merge_all()

        loss = tf.summary.scalar("test_loss", self.loss)
        test_img = tf.summary.image("input_data", self.input_data)
        self.write_op_test = tf.summary.merge([loss, test_img])
        self.summary_writer = tf.summary.FileWriter(logdir,
                                                    graph=self.sess.graph)
Ejemplo n.º 13
0
# ================================================================

import tensorflow as tf
from core.yolov3 import YOLOV3

TINY = True
INPUT_SIZE = 608
pb_file = "./pb/testtiny.pb"
ckpt_file = "./checkpoint/yolov3_test_loss=3.5116.ckpt-200"
# output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"]
output_node_names = ['inputs', 'output_boxes']

input_data = tf.placeholder(dtype=tf.float32,
                            shape=(None, INPUT_SIZE, INPUT_SIZE, 3),
                            name='inputs')

model = YOLOV3(input_data, trainable=False, tiny=TINY)
# print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())
    def __init__(self):  # 从config文件获取到一些变量
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"  # 日志保存地址
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        with tf.name_scope('define_input'):  # 定义输入层
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):  # 定义损失函数
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):  # 定义学习率
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)
        '''
        warmup_steps作用:   
        神经网络在刚开始训练的过程中容易出现loss=NaN的情况,为了尽量避免这个情况,因此初始的学习率设置得很低
        但是这又使得训练速度变慢了。因此,采用逐渐增大的学习率,从而达到既可以尽量避免出现nan,又可以等训练过程稳定了再增大训练速度的目的。
        '''

        with tf.name_scope(
                "define_weight_decay"):  # 指数平滑,可以让算法在最后不那么震荡,结果更有鲁棒性
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        # 指定需要恢复的参数。层等信息, 位置提前,减少模型体积。
        with tf.name_scope('loader_and_saver'):
            variables_to_restore = [
                v for v in self.net_var if v.name.split('/')[0] not in
                ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']
            ]
            self.loader = tf.train.Saver(variables_to_restore)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)

        with tf.name_scope("define_first_stage_train"):  # 第一阶段训练,只训练指定层
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):  # 第二阶段训练,释放所有层
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"  #  日志保存地址
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
Ejemplo n.º 15
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(
            cfg.YOLO.CLASSES)  #dict类型,ID---name
        self.num_classes = len(self.classes)  #检测类别数量
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY  #滑动平均时的decay值
        self.max_bbox_per_scale = 150  #每个尺度上检测目标的数量
        self.train_logdir = "./data/log/train"  # 训练日志保存路径
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.config = tf.ConfigProto(allow_soft_placement=True)
        #self.config.gpu_options.per_process_gpu_memory_fraction = 0.2  # 占用40%显存
        self.sess = tf.Session(config=self.config)

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(
                dtype=tf.bool, name='training')  # 占位符,对训练时,为True,验证时为False

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss  # 损失函数

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(
                    tf.trainable_variables())  #给模型中的变量创建滑动平均(滑动平均,作用于模型中的变量)

        # 第一阶段训练:仅仅训练三个分支的最后卷积层
        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        # 第二阶段训练:训练所有的层,其实也就是 fine-tunning 阶段
        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)
            #tf.summary.image("input_image", self.input_data)

            logdir = "./data/log/"
            if os.path.exists(logdir):
                shutil.rmtree(logdir)  #递归删除文件夹下的所有子文件夹和子文件
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all(
            )  #将所有summary全部保存到磁盘,以便tensorboard显示
            self.summary_writer = tf.summary.FileWriter(
                logdir, graph=self.sess.graph)  #指定一个文件用来保存图
Ejemplo n.º 16
0
def export():

    serialized_tf_example = tf.placeholder(tf.string,
                                           shape=[None],
                                           name='encoded_image_tensor')

    images = tf.map_fn(preprocess_image, serialized_tf_example, tf.float32)

    model = YOLOV3(images, trainable=False)
    print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(args.ckpt_dir)
    saver.restore(sess, ckpt.model_checkpoint_path)

    converted_graph_def = tf.graph_util.convert_variables_to_constants(
        sess,
        input_graph_def=sess.graph.as_graph_def(),
        output_node_names=output_node_names)

    with tf.gfile.GFile(args.freeze_graph_dir, "wb") as f:
        f.write(converted_graph_def.SerializeToString())

    optimize_graph('',
                   args.freeze_graph_dir,
                   transforms,
                   output_node_names,
                   outname=args.optimizer_graph_dir)

    graph_def = get_graph_def_from_file(args.optimizer_graph_dir)

    with tf.Graph().as_default():

        tf.import_graph_def(graph_def,
                            input_map=None,
                            return_elements=None,
                            name="")

        with tf.Session() as sess:

            # Export inference model.
            output_path = os.path.join(
                tf.compat.as_bytes(args.output_dir),
                tf.compat.as_bytes(str(args.model_version)))
            print('Exporting trained model to', output_path)
            builder = tf.saved_model.builder.SavedModelBuilder(output_path)

            input_tensor = tf.get_default_graph().get_tensor_by_name(
                args.input_tensor)
            sbbox_tensor = tf.get_default_graph().get_tensor_by_name(
                args.sbbox_tensor)
            mbbox_tensor = tf.get_default_graph().get_tensor_by_name(
                args.mbbox_tensor)
            lbbox_tensor = tf.get_default_graph().get_tensor_by_name(
                args.lbbox_tensor)
            class_num = args.class_num

            sbbox_tensor = tf.reshape(tf.convert_to_tensor(sbbox_tensor),
                                      shape=[-1, 5 + class_num])
            mbbox_tensor = tf.reshape(tf.convert_to_tensor(mbbox_tensor),
                                      shape=[-1, 5 + class_num])
            lbbox_tensor = tf.reshape(tf.convert_to_tensor(lbbox_tensor),
                                      shape=[-1, 5 + class_num])
            output_tensor = tf.concat(
                [sbbox_tensor, mbbox_tensor, lbbox_tensor], 0)
            # top_100 = tf.nn.top_k(output_tensor[:, 4], 100)
            # output_tensor = tf.gather(output_tensor, top_100.indices)
            num_tensor = tf.constant(100)

            classes_tensor = tf.argmax(output_tensor[:, 5:], axis=1)
            scores_tensor = output_tensor[:, 4]
            raw_boxs_tensor = output_tensor[:, 0:4] / IMAGE_SIZE
            print(raw_boxs_tensor.shape)
            boxs_tensor_com = raw_boxs_tensor
            print(boxs_tensor_com.shape)

            boxs_tensor_minx = raw_boxs_tensor[:,
                                               0] - raw_boxs_tensor[:, 2] * 0.5
            boxs_tensor_miny = raw_boxs_tensor[:,
                                               1] - raw_boxs_tensor[:, 3] * 0.5
            boxs_tensor_maxx = raw_boxs_tensor[:,
                                               0] + raw_boxs_tensor[:, 2] * 0.5
            boxs_tensor_maxy = raw_boxs_tensor[:,
                                               1] + raw_boxs_tensor[:, 3] * 0.5
            boxs_tensor_minx = tf.expand_dims(boxs_tensor_minx, 1)
            boxs_tensor_miny = tf.expand_dims(boxs_tensor_miny, 1)
            boxs_tensor_maxx = tf.expand_dims(boxs_tensor_maxx, 1)
            boxs_tensor_maxy = tf.expand_dims(boxs_tensor_maxy, 1)
            boxs_tensor_com = tf.concat([
                boxs_tensor_miny, boxs_tensor_minx, boxs_tensor_maxy,
                boxs_tensor_maxx
            ], 1)

            boxs_tensor_indices = tf.image.non_max_suppression(
                boxs_tensor_com, scores_tensor, 100, 0.5)
            boxs_tensor_com = tf.gather(boxs_tensor_com, boxs_tensor_indices)
            classes_tensor = tf.gather(classes_tensor, boxs_tensor_indices)
            scores_tensor = tf.gather(scores_tensor, boxs_tensor_indices)

            scores_tensor_info = tf.saved_model.utils.build_tensor_info(
                tf.expand_dims(scores_tensor, 0))
            classes_tensor_info = tf.saved_model.utils.build_tensor_info(
                tf.expand_dims(classes_tensor, 0))
            boxes_tensor_info = tf.saved_model.utils.build_tensor_info(
                tf.expand_dims(boxs_tensor_com, 0))
            raw_boxes_tensor_info = tf.saved_model.utils.build_tensor_info(
                tf.expand_dims(raw_boxs_tensor, 0))
            num_tensor_info = tf.saved_model.utils.build_tensor_info(
                tf.expand_dims(num_tensor, 0))

            inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
                input_tensor)

            tensor_info_inputs = {'inputs': inputs_tensor_info}
            print(scores_tensor_info, inputs_tensor_info, classes_tensor_info,
                  boxes_tensor_info, num_tensor_info)

            prediction_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs=tensor_info_inputs,
                    # outputs=tensor_info_outputs,
                    outputs={
                        'detection_scores': scores_tensor_info,
                        'detection_classes': classes_tensor_info,
                        'detection_boxes': boxes_tensor_info,
                        # 'raw_boxes': raw_boxes_tensor_info,
                        'num_detections': num_tensor_info,
                    },
                    method_name=tf.saved_model.signature_constants.
                    PREDICT_METHOD_NAME))

            builder.add_meta_graph_and_variables(
                sess,
                [tf.saved_model.tag_constants.SERVING],
                signature_def_map={
                    tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    prediction_signature,
                },
            )

            builder.save()
            print('Successfully exported model to %s' % args.output_dir)
Ejemplo n.º 17
0
import tensorflow as tf
from core.backbone import darknet53
from core.yolov3 import YOLOV3


def stats_graph(graph):
    flops = tf.profiler.profile(
        graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
    params = tf.profiler.profile(graph,
                                 options=tf.profiler.ProfileOptionBuilder.
                                 trainable_variables_parameter())
    print('FLOPs: {};    Trainable params: {}'.format(flops.total_float_ops,
                                                      params.total_parameters))


with tf.Graph().as_default() as graph:
    image = tf.placeholder(dtype=tf.float32,
                           shape=[1, 544, 544, 3],
                           name="input")
    out = YOLOV3(image, True, 1)
    stats_graph(graph)
Ejemplo n.º 18
0
    def __init__(self):
        self.anchor_per_scale = 3
        self.classes = utils.read_class_names(
            "/home/Pedestrian/Documents/TensorFlow_YOLOv3-master/LabelImage_v1.8.1/data/predefined_classes.txt"
        )
        self.num_classes = len(self.classes)
        self.train_epochs = 160
        self.max_bbox_per_scale = 150
        self.trainset = Dataset()
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=True))

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(
                1.0, dtype=tf.float32, trainable=False, name='global_step'
            )  # step 1: 1.0  ==>   num * self.steps_per_period

            self.update_global_step = tf.assign_add(self.global_step, 1.0)

            self.learn_rate = tf.Variable(1e-4,
                                          dtype=tf.float32,
                                          trainable=False,
                                          name='learn_rate')

            self.update_learn_rate = tf.assign(self.learn_rate,
                                               self.learn_rate * 0.8)

        with tf.name_scope("train"):
            self.optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(
                self.loss, var_list=tf.trainable_variables())

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [self.optimizer, self.update_global_step]):
                    self.train_op_with_trainable_variables = tf.no_op()

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "log"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
Ejemplo n.º 19
0
    def __init__(self):
        # 每一层上Anchor的数目
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        # 类别
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        # 类别数目
        self.num_classes = len(self.classes)
        # 初始化学习率
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        # 最小学习率
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        # 第一阶段的阶段数
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        # 第二阶段的阶段数
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        # 学习率变化参数
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        # 模型训练持久化恢复对应的文件路径
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        # 当前时间信息
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        # 滑动平均的系数值(用于训练数据对应的滑动平均系数)
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        # 每个尺度最多允许的边框数目
        self.max_bbox_per_scale = 150
        # 训练数据所在的文件路径
        self.train_logdir = "./data/log/train"
        # 加载训练数据
        # TODO: 修改为支持自己数据的形式
        self.trainset = Dataset('train')
        # 加载测试数据
        self.testset = Dataset('test')
        # 总训练数据
        self.steps_per_period = len(self.trainset)
        # 构建会话对象
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        # 定义网络输入
        # 注输出是置信度和边框,也就是一个输入对应两个输出,在这里就是6个输出
        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')  # 候选框
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')  # 位置信息
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool,
                                            name='training')  #是训练还是测试

        # 定义网络及损失函数
        with tf.name_scope("define_loss"):
            # 构建模型
            self.model = YOLOV3(self.input_data, self.trainable)

            # 获取网络中所有的模型变量
            self.net_var = tf.global_variables()

            # 构建计算位置、可信度以及概率损失函数
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)

            # 三个损失函数合并成一个损失函数
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            # 构建一个全局变量对象
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')

            # 学习率变化阈值(默认两个批次之前)
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            # 总训练步骤
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')

            # 在两个批次之前,如果训练迭代次数小于阈值,那么设置为初始概率的一部分。
            # 当批次超过阈值之后,做一个学习率转换
            self.learn_rate = tf.cond(
                pred=self.global_step <
                warmup_steps,  #刚开始学习率大,train_steps和warmup_steps为固定值
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))

            # 定义全局步骤变量更新参数
            global_step_update = tf.assign_add(self.global_step, 1.0)

        # 定义模型参数滑动平均更新(目的:为了让模型参数更加平滑)
        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay) \
                .apply(tf.trainable_variables())

        # 第一阶段的模型训练相关参数设置
        with tf.name_scope("define_first_stage_train"):
            # 获取第一阶段模型训练相关参数
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            # 第一阶段的模型优化器
            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate) \
                .minimize(self.loss, var_list=self.first_stage_trainable_var_list)

            # 加入依赖控制
            # 先做BN的更新操作、在进行模型训练/步骤参数更新、最终更新模型参数值
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):  # 批归一化
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        # 定义一个训练操作(实际上不做任何操作,仅仅触发所有操作)
                        self.train_op_with_frozen_variables = tf.no_op()

        # 第二阶段的模型训练相关参数设置
        with tf.name_scope("define_second_stage_train"):
            # 获取第二阶段的相关训练参数(所有训练参数)
            second_stage_trainable_var_list = tf.trainable_variables()

            # 构建第二阶段的优化器对象
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate) \
                .minimize(self.loss, var_list=second_stage_trainable_var_list)

            # 加入依赖控制
            # 先做BN的更新操作、在进行模型训练/步骤参数更新、最终更新模型参数值
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        # 定义一个训练操作(实际上不做任何操作,仅仅触发所有操作)
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            # 定义模型加载对象以及模型持久化对象
            self.loader = tf.train.Saver(
                self.net_var)  # 仅加载网络参数(也就是前向过程中的所有参数)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

        with tf.name_scope('summary'):
            # 可视化相关内容
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            # 可视化日志文件构建
            logdir = "./data/log2/"
            if os.path.exists(logdir):
                shutil.rmtree(logdir)
            os.mkdir(logdir)

            # 输出可视化对象
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)

        print("初始化完成.....")
Ejemplo n.º 20
0
    def __init__(self):
    
        # Initialize Horovod
        hvd.init()
        config=tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.visible_device_list = str(hvd.local_rank())
        
        self.anchor_per_scale    = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes             = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes         = len(self.classes)
        self.learn_rate_init     = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end      = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs  = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods      = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight      = cfg.TRAIN.INITIAL_WEIGHT
        self.time                = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay    = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale  = 150
        self.train_logdir        = "./data/log/train"
        self.trainset            = Dataset('train')
        self.testset             = Dataset('test')
        self.steps_per_period    = len(self.trainset)
        self.sess                = tf.Session(config=config)
        
        
        with tf.name_scope('define_input'):
            self.input_data   = tf.placeholder(dtype=tf.float32, name='input_data')
            self.label_sbbox  = tf.placeholder(dtype=tf.float32, name='label_sbbox')
            self.label_mbbox  = tf.placeholder(dtype=tf.float32, name='label_mbbox')
            self.label_lbbox  = tf.placeholder(dtype=tf.float32, name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
            self.trainable     = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                                                    self.label_sbbox,  self.label_mbbox,  self.label_lbbox,
                                                    self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
            warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
                                        dtype=tf.float64, name='warmup_steps')
            train_steps = tf.constant( (self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period,
                                        dtype=tf.float64, name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
                                    (1 + tf.cos(
                                        (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
            )
            global_step_update = tf.assign_add(self.global_step, 1.0)
            #for Horovod expand learning rate
            self.learn_rate = self.learn_rate * hvd.size()

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
                    self.first_stage_trainable_var_list.append(var)
            
            first_opt = tf.train.AdamOptimizer(self.learn_rate)
            #for Horovod
            first_opt = hvd.DistributedOptimizer(first_opt)
            #for Horovod
            hooks = [hvd.BroadcastGlobalVariablesHook(0)]
            first_stage_optimizer = first_opt.minimize(self.loss,
                                                      var_list=self.first_stage_trainable_var_list)
                                                    
            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_opt = tf.train.AdamOptimizer(self.learn_rate)
            #for Horovod
            second_opt = hvd.DistributedOptimizer(second_opt)
            second_stage_optimizer = second_opt.minimize(self.loss,
                                                      var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver  = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate",      self.learn_rate)
            tf.summary.scalar("giou_loss",  self.giou_loss)
            tf.summary.scalar("conf_loss",  self.conf_loss)
            tf.summary.scalar("prob_loss",  self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer  = tf.summary.FileWriter(logdir, graph=self.sess.graph)
Ejemplo n.º 21
0
            i += 1
    print("ptr:", ptr)
    return assign_ops


with open(arguments.config, "r") as f:
    opts = json.load(f)
with tf.name_scope("input"):
    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(1, 320, 320, 3),
                                name="input_data")

# converge to a ckpt with fp32
# then use another script to convert to fp16 if needed
opts["yolo"]["precision"] = "fp32"
model = YOLOV3(False, opts)

# construct model
layers = [
    model.build_backbone_part1,
    model.build_backbone_part2,
    model.build_backbone_part3,
    model.build_backbone_part4,
    model.build_backbone_part5,
    model.build_backbone_part6,
]
# reuse stages_constructor so we don't need to pass params by hand
network_func = stages_constructor(
    [layers], ["image"], ["pred_sbbox", "pred_mbbox", "pred_lbbox"])[0]
network_func(input_data)
Ejemplo n.º 22
0
    if not os.path.exists(ckpt_file + '.index'):
        print('freeze_ckpt_to_pb ckpt_file=', ckpt_file, ' not exist')
        sys.exit()

    pb_file = ckpt_file + '.pb' #argv[4]
    print('freeze_ckpt_to_pb gpu_id=%s, net_type=%s, ckpt_file=%s, pb_file=%s' % (gpu_id, net_type, ckpt_file, pb_file))
    
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)

    output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"]
    #output_node_names = ["input/input_data", "conv_sbbox/BiasAdd", "conv_mbbox/BiasAdd", "conv_lbbox/BiasAdd"]
    with tf.name_scope('input'):
        input_data = tf.placeholder(dtype=tf.float32, name='input_data')

    if net_type == 'yolov3':
        model = YOLOV3(input_data, trainable=False, freeze_pb=False)
    elif net_type == 'yolov4':
        model = YOLOV4(input_data, trainable=False)
    elif net_type == 'yolov5':
        model = YOLOV5(input_data, trainable=False)
    else:
        print('net_type=', net_type, ' error')

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    saver = tf.train.Saver()
    saver.restore(sess, ckpt_file)

    converted_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def=sess.graph.as_graph_def(),
                                                                       output_node_names=output_node_names)
    with tf.gfile.GFile(pb_file, "wb") as f:
        f.write(converted_graph_def.SerializeToString())
Ejemplo n.º 23
0
with tf.name_scope('input'):
    serialized_input_data = tf.placeholder(dtype=tf.string, name='input_data')
    feature_configs = {
        'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
    }
    input_data = tf.parse_example(serialized_input_data, feature_configs)
    jpegs = input_data['image/encoded']
    image_string = tf.reshape(jpegs, shape=[])

    image_tensor = tf.image.decode_image(image_string, channels=3)
    image_tensor.set_shape((None, None, 3))

    image_input = image_tensor[np.newaxis, ...] / 255

model = YOLOV3(image_input, trainable=False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

# Export inference model.
output_path = os.path.join(tf.compat.as_bytes(output_dir),
                           tf.compat.as_bytes(str(model_version)))
print('Exporting trained model to', output_path)
Ejemplo n.º 24
0
    def __init__(self, net_type):
        self.net_type = net_type
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        
        self.ckpt_path = cfg.TRAIN.CKPT_PATH        
        if not os.path.exists(self.ckpt_path):
            os.makedirs(self.ckpt_path)
        
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150

        self.log_path = ('log/%s' % net_type)
        if os.path.exists(self.log_path):
            shutil.rmtree(self.log_path)
            #os.removedirs(self.log_path)
        os.makedirs(self.log_path)

        self.trainset = Dataset('train', self.net_type)
        self.testset = Dataset('test', self.net_type)
        self.steps_per_period = len(self.trainset)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)

        with tf.name_scope('input'):
            if net_type == 'tiny':
                self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
                self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
                self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')

                self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
                self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
                self.trainable = tf.placeholder(dtype=tf.bool, name='training')

            else:                
                self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
                self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
                self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
                self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')

                self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
                self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
                self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
                self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope('define_loss'):
            if self.net_type == 'tiny':
                self.model = YOLOV3Tiny(self.input_data, self.trainable)
                self.net_var = tf.global_variables()
                self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_mbbox, self.label_lbbox,
                                                                                        self.true_mbboxes, self.true_lbboxes)
                self.loss = self.iou_loss + self.conf_loss + self.prob_loss

            elif self.net_type == 'yolov3':
                self.model = YOLOV3(self.input_data, self.trainable)
                self.net_var = tf.global_variables()
                self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox,
                                                                                        self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
                self.loss = self.iou_loss + self.conf_loss + self.prob_loss
            
            elif self.net_type == 'yolov4' or self.net_type == 'yolov5':
                iou_use = 1  # (0, 1, 2) ==> (giou_loss, diou_loss, ciou_loss)
                focal_use = False  # (False, True) ==> (normal, focal_loss)
                label_smoothing = 0

                if self.net_type == 'yolov4':
                    self.model = YOLOV4(self.input_data, self.trainable)
                else:
                    self.model = YOLOV5(self.input_data, self.trainable)

                self.net_var = tf.global_variables()
                self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox,
                                                                                        self.true_sbboxes, self.true_mbboxes, self.true_lbboxes,
                                                                                        iou_use, focal_use, label_smoothing)
                self.loss = self.iou_loss + self.conf_loss + self.prob_loss
                # self.loss = tf.Print(self.loss, [self.iou_loss, self.conf_loss, self.prob_loss], message='loss: ')
            else:
                print('self.net_type=%s error' % self.net_type)

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
            warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period, dtype=tf.float64, name='warmup_steps')
            train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
                                       dtype=tf.float64, name='train_steps')
            
            self.learn_rate = tf.cond(pred=self.global_step < warmup_steps, true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
                                      false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) * \
                                              (1 + tf.cos((self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope('define_weight_decay'):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope('define_first_stage_train'):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if net_type == 'tiny':
                    bboxes = ['conv_mbbox', 'conv_lbbox']
                else:
                    bboxes = ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']
                
                if var_name_mess[0] in bboxes:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope('define_second_stage_train'):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)

        with tf.name_scope('summary'):
            tf.summary.scalar('learn_rate', self.learn_rate)
            tf.summary.scalar('iou_loss', self.iou_loss)
            tf.summary.scalar('conf_loss', self.conf_loss)
            tf.summary.scalar('prob_loss', self.prob_loss)
            tf.summary.scalar('total_loss', self.loss)

            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(self.log_path, graph=self.sess.graph)
Ejemplo n.º 25
0
        var_name = var.op.name
        var_name_mess = str(var_name).split('/')
        var_shape = var.shape
        org_weights_mess.append([var_name, var_shape])
        print("=> " + str(var_name).ljust(50), var_shape)
print()
tf.reset_default_graph()

cur_weights_mess = []
tf.Graph().as_default()
with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(1, 416, 416, 3),
                                name='input_data')
    training = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(input_data, training)
for var in tf.global_variables():
    var_name = var.op.name
    var_name_mess = str(var_name).split('/')
    var_shape = var.shape
    print(var_name_mess[0])
    cur_weights_mess.append([var_name, var_shape])
    print("=> " + str(var_name).ljust(50), var_shape)

org_weights_num = len(org_weights_mess)
cur_weights_num = len(cur_weights_mess)
if cur_weights_num != org_weights_num:
    raise RuntimeError

print('=> Number of weights that will rename:\t%d' % cur_weights_num)
cur_to_org_dict = {}
Ejemplo n.º 26
0
#
#================================================================

import tensorflow as tf
from core.yolov3 import YOLOV3

pb_file = "./yolov3_coco.pb"
ckpt_file = "./checkpoint/yolov3_test_loss=10.6896.ckpt-50"
output_node_names = [
    "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2",
    "pred_lbbox/concat_2"
]

with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

model = YOLOV3(input_data, trainable=tf.cast(False, tf.bool))
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())
#
#================================================================


import tensorflow as tf
from core.yolov3 import YOLOV3
from core.config import cfg

pb_file = "./yolov3_mark_no_manualFlip_no_codeFlip.pb"
ckpt_file = cfg.TEST.WEIGHT_FILE
output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"]

with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

model = YOLOV3(input_data, trainable=False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess  = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(sess,
                            input_graph_def  = sess.graph.as_graph_def(),
                            output_node_names = output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())


Ejemplo n.º 28
0
    def __init__(self):
        self.anchor_per_scale = 3

        self.path1 = r"./data/classes/antenna.names"  #修改

        self.classes = utils.read_class_names(self.path1)
        self.num_classes = len(self.classes)
        self.learn_rate_init = 1e-4     # 1e-4
        self.learn_rate_end = 1e-6     # 1e-6
        self.first_stage_epochs = 20  # 40     加载不到预训练权重,不进行一阶段训练
        self.warmup_periods = 2     # 默认为23444

        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))  # 初始化时间
        self.moving_ave_decay = 0.9995    # 默认为0.9995
        self.max_bbox_per_scale = 150                           # 每个照片最多的ground truth框的数量
        self.trainset = Dataset('train')              # train
        self.testset  = Dataset('test')
        self.steps_per_period = len(self.trainset)            # 整个数据集迭代一个epoch,需要多少个batch
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        with tf.name_scope('define_input'):
            self.input_data   = tf.placeholder(dtype=tf.float32, name='input_data')
            self.label_sbbox  = tf.placeholder(dtype=tf.float32, name='label_sbbox')
            self.label_mbbox  = tf.placeholder(dtype=tf.float32, name='label_mbbox')
            self.label_lbbox  = tf.placeholder(dtype=tf.float32, name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
            self.trainable    = tf.placeholder(dtype=tf.bool, name='training')

        # TODO 主要的部分
        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                                                    self.label_sbbox,  self.label_mbbox,  self.label_lbbox,
                                                    self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        # warm up
        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float32, trainable=False, name='global_step')
            # 指数衰减lr
            self.learn_rate = tf.train.exponential_decay(learning_rate=self.learn_rate_init,
                      global_step=self.global_step,
                      decay_steps=self.steps_per_period * 20,
                      decay_rate=0.9,
                      staircase=False,
                      name=None)
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                      var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()      #  tf.no_op()表示执行完control_dependencies的变量更新之后,不做任何操作,主要确保control_dependencies的变量更新。

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver  = tf.train.Saver(tf.global_variables(), max_to_keep=5)
Ejemplo n.º 29
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.train_logdir = "./data/log/train"

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        #self.train_input_sizes = cfg.TRAIN.INPUT_SIZE
        ##[320, 352, 384, 416, 448, 480, 512, 544, 576, 608]

        #self.train_data = Dataset('train',batch_size=1,num_parallel=4)

        self.yolo_train_data = Dataset('train', num_parallel=1)
        self.yolo_val_data = Dataset('val', num_parallel=1)
        self.train_data = self.yolo_train_data.get_dataset()
        self.val_data = self.yolo_val_data.get_dataset()
        self.iterator = tf.data.Iterator.from_structure(
            self.train_data.output_types, self.train_data.output_shapes)
        self.train_init_op = self.iterator.make_initializer(self.train_data)
        self.val_init_op = self.iterator.make_initializer(self.val_data)

        self.steps_per_period = self.yolo_train_data.batches_per_epoch

        with tf.name_scope('define_input'):
            # self.input_data   = tf.placeholder(dtype=tf.float32, name='input_data')
            # self.label_sbbox  = tf.placeholder(dtype=tf.float32, name='label_sbbox')
            # self.label_mbbox  = tf.placeholder(dtype=tf.float32, name='label_mbbox')
            # self.label_lbbox  = tf.placeholder(dtype=tf.float32, name='label_lbbox')
            # self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
            # self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
            # self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):

            self.input_data, self.label_sbbox, self.label_mbbox, self.label_lbbox,\
            self.true_sbboxes, self.true_mbboxes, self.true_lbboxes = self.iterator.get_next()

            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
Ejemplo n.º 30
0
def create_model(
    config,
    max_box_per_image,
    warmup_batches,
    multi_gpu,
    saved_weights_name,
    lr
):
    if config["model"]["model_name"] == "yolov3":
        print('[INFO] YOLOV3 Model Creating...')
        if multi_gpu > 1:
            with tf.device('/cpu:0'):
                yolo_model = YOLOV3(
                    config=config,
                    max_box_per_image=max_box_per_image,
                    batch_size=config["train"]["batch_size"] // multi_gpu,
                    warmup_batches=warmup_batches)
                template_model, infer_model = yolo_model.model()
        else:
            yolo_model = YOLOV3(
                config=config,
                max_box_per_image=max_box_per_image,
                batch_size=config["train"]["batch_size"],
                warmup_batches=warmup_batches)
            template_model, infer_model = yolo_model.model()

    elif config["model"]["model_name"] == "yolov4":
        print('[INFO] YOLOV4 Model Creating...')
        if multi_gpu > 1:
            with tf.device('/cpu:0'):
                yolo_model = YOLOV4(
                    config=config,
                    max_box_per_image=max_box_per_image,
                    batch_size=config["train"]["batch_size"] // multi_gpu,
                    warmup_batches=warmup_batches)
                template_model, infer_model = yolo_model.model()
        else:
            yolo_model = YOLOV4(
                config=config,
                max_box_per_image=max_box_per_image,
                batch_size=config["train"]["batch_size"],
                warmup_batches=warmup_batches)
            template_model, infer_model = yolo_model.model()
    else:
        pass

    # load the pretrained weight if exists, otherwise load the backend weight only
    if os.path.exists(saved_weights_name):
        print("[INFO] Find pretrained weights...")
        print("\n[INFO] Loading pretrained weights...\n")
        template_model.load_weights(saved_weights_name)
    # else:
        # template_model.load_weights("backend.h5", by_name=True)

    if multi_gpu > 1:
        train_model = multi_gpu_model(template_model, gpus=multi_gpu)
    else:
        train_model = template_model

    optimizer = Adam(lr=lr, clipnorm=0.001)
    train_model.compile(loss=dummy_loss, optimizer=optimizer)

    return train_model, infer_model