Exemple #1
0
    def __init__(self):
        self.anchor_per_scale = 3
        self.classes = utils.read_class_names(
            "/home/Pedestrian/Documents/TensorFlow_YOLOv3-master/LabelImage_v1.8.1/data/predefined_classes.txt"
        )
        self.num_classes = len(self.classes)
        self.train_epochs = 160
        self.max_bbox_per_scale = 150
        self.trainset = Dataset()
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=True))

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(
                1.0, dtype=tf.float32, trainable=False, name='global_step'
            )  # step 1: 1.0  ==>   num * self.steps_per_period

            self.update_global_step = tf.assign_add(self.global_step, 1.0)

            self.learn_rate = tf.Variable(1e-4,
                                          dtype=tf.float32,
                                          trainable=False,
                                          name='learn_rate')

            self.update_learn_rate = tf.assign(self.learn_rate,
                                               self.learn_rate * 0.8)

        with tf.name_scope("train"):
            self.optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(
                self.loss, var_list=tf.trainable_variables())

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [self.optimizer, self.update_global_step]):
                    self.train_op_with_trainable_variables = tf.no_op()

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "log"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
Exemple #2
0
import tensorflow as tf
from core.yolov3 import YOLOV3
from core.utils import load_weights

INPUT_SIZE = 608
TINY = True
darknet_weights = '/home/tk/Desktop/machine_learning/tensorFlowTrain/models_custom/object_detection/tfyolov3/darknet_weights/bird_tiny.weights'
ckpt_file = './checkpoint/bird-tiny.ckpt'

input_data = tf.placeholder(dtype=tf.float32,
                            shape=(None, INPUT_SIZE, INPUT_SIZE, 3),
                            name='inputs')
model = YOLOV3(input_data, trainable=False, tiny=TINY)
load_ops = load_weights(tf.global_variables(), darknet_weights)

saver = tf.train.Saver(tf.global_variables())

with tf.Session() as sess:
    sess.run(load_ops)
    save_path = saver.save(sess, save_path=ckpt_file)
    print('Model saved in path: {}'.format(save_path))
import core.backbone as backbone

pb_file = "./yolov3_kitti.pb"
ckpt_file = "./checkpoint/yolov3_test_loss=nan.ckpt-1"
output_node_names = [
    "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2",
    "pred_lbbox/concat_2"
]

with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

backbone = backbone.squeezenet('sqz_full.mat')
backbone.forward(input_data)

model = YOLOV3()
model.forward(backbone, False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())
Exemple #4
0
    def __init__(self, net_type):
        self.net_type = net_type
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        
        self.ckpt_path = cfg.TRAIN.CKPT_PATH        
        if not os.path.exists(self.ckpt_path):
            os.makdirs(self.ckpt_path)
        
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150

        self.train_logdir = ('./%s/log/train' % net_type)
        if not os.path.exists(self.train_logdir):
            os.makedirs(self.train_logdir)

        self.trainset = Dataset('train', self.net_type)
        self.testset = Dataset('test', self.net_type)
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        with tf.name_scope('input'):
            if net_type == 'tiny':
                self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
                self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
                self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')

                self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
                self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
                self.trainable = tf.placeholder(dtype=tf.bool, name='training')

            else:                
                self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
                self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
                self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
                self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')

                self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
                self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
                self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
                self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope('define_loss'):
            if self.net_type == 'tiny':
                self.model = YOLOV3Tiny(self.input_data, self.trainable)
                self.net_var = tf.global_variables()
                self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_mbbox, self.label_lbbox,
                                                                                        self.true_mbboxes, self.true_lbboxes)
                self.loss = self.iou_loss + self.conf_loss + self.prob_loss

            elif self.net_type == 'yolov3':
                self.model = YOLOV3(self.input_data, self.trainable)
                self.net_var = tf.global_variables()
                self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox,
                                                                                        self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
                self.loss = self.iou_loss + self.conf_loss + self.prob_loss
            
            elif self.net_type == 'yolov4' or self.net_type == 'yolov5' :
                iou_use = 1  # (0, 1, 2) ==> (giou_loss, diou_loss, ciou_loss)
                focal_use = False  # (False, True) ==> (normal, focal_loss)
                label_smoothing = 0

                if self.net_type == 'yolov4':
                    self.model = YOLOV4(self.input_data, self.trainable)
                else:
                    self.model = YOLOV5(self.input_data, self.trainable)

                self.net_var = tf.global_variables()
                self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox,
                                                                                        self.true_sbboxes, self.true_mbboxes, self.true_lbboxes,
                                                                                        iou_use, focal_use, label_smoothing)
                self.loss = self.iou_loss + self.conf_loss + self.prob_loss
                # self.loss = tf.Print(self.loss, [self.iou_loss, self.conf_loss, self.prob_loss], message='loss: ')
            else:
                print('self.net_type=%s error' % self.net_type)

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
            warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period, dtype=tf.float64, name='warmup_steps')
            train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period,
                                       dtype=tf.float64, name='train_steps')
            
            self.learn_rate = tf.cond(pred=self.global_step < warmup_steps, true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
                                      false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) * \
                                              (1 + tf.cos((self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope('define_weight_decay'):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope('define_first_stage_train'):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                
                if net_type == 'tiny':
                    bboxes = ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']
                else
                    bboxes = ['conv_mbbox', 'conv_lbbox']
                
                if var_name_mess[0] inbboxes:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope('define_second_stage_train'):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver  = tf.train.Saver(tf.global_variables(), max_to_keep=1000)

        with tf.name_scope('summary'):
            tf.summary.scalar('learn_rate', self.learn_rate)
            tf.summary.scalar('iou_loss', self.iou_loss)
            tf.summary.scalar('conf_loss', self.conf_loss)
            tf.summary.scalar('prob_loss', self.prob_loss)
            tf.summary.scalar('total_loss', self.loss)

            logdir = ('./%s/log/' % self.net_type)
            if os.path.exists(logdir): 
                shutil.rmtree(logdir)
            os.makedirs(logdir)
            
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
import tensorflow as tf
from core.backbone import darknet53
from core.yolov3 import YOLOV3


def stats_graph(graph):
    flops = tf.profiler.profile(
        graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
    params = tf.profiler.profile(graph,
                                 options=tf.profiler.ProfileOptionBuilder.
                                 trainable_variables_parameter())
    print('FLOPs: {};    Trainable params: {}'.format(flops.total_float_ops,
                                                      params.total_parameters))


with tf.Graph().as_default() as graph:
    image = tf.placeholder(dtype=tf.float32,
                           shape=[1, 544, 544, 3],
                           name="input")
    out = YOLOV3(image, True, 1)
    stats_graph(graph)
#! /usr/bin/env python
# coding=utf-8

import tensorflow as tf
from core.yolov3 import YOLOV3
from tensorflow.saved_model import signature_def_utils, signature_constants, tag_constants
from tensorflow.saved_model import utils as save_model_utils

img_size = 608
num_channels = 3
with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

model = YOLOV3(input_data, trainable=False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)
print("{} trainable variables".format(len(tf.trainable_variables())))


def model_transfer(savemodel_file_path, ckpt_file):
    with tf.Session() as sess:
        saver = tf.train.Saver()
        saver.restore(sess, ckpt_file)

        x_op = sess.graph.get_operation_by_name('input/input_data')
        x = x_op.outputs[0]
        pred_op = sess.graph.get_operation_by_name('pred_multi_scale/concat')
        pred = pred_op.outputs[0]

        print("prediction signature")
        prediction_signature = signature_def_utils.build_signature_def(
            inputs={"input": save_model_utils.build_tensor_info(x)},
Exemple #7
0
    def __init__(self, opts):
        self.input_size = opts["test"]["input_size"]
        self.classes = utils.read_class_names(opts["yolo"]["classes"])
        self.num_classes = len(self.classes)
        self.score_threshold = opts["test"]["score_threshold"]
        self.iou_threshold = opts["test"]["iou_threshold"]
        self.moving_avg_decay = opts["yolo"]["moving_avg_decay"]
        self.annotation_path = opts["test"]["annot_path"]
        self.weight_file = opts["test"]["weight_file"]
        self.write_image = opts["test"]["write_image"]
        self.write_image_path = opts["test"]["write_image_path"]
        self.show_label = opts["test"]["show_label"]
        self.batch_size = opts["test"]["batch_size"]
        self.precision = tf.float16 if opts["yolo"]["precision"] == "fp16" else tf.float32
        self.use_moving_avg = opts["yolo"]["use_moving_avg"]
        self.repeat_count = opts["test"]["repeat_count"]
        self.use_infeed_queue = opts["test"]["use_infeed_queue"]
        self.predicted_file_path = opts["test"]["predicted_file_path"]
        self.ground_truth_file_path = opts["test"]["ground_truth_file_path"]
        self.meta_dict = {}
        self.testset = Dataset("test", opts)

        # Configure arguments for targeting the IPU
        config = ipu.config.IPUConfig()
        config.auto_select_ipus = 1
        config.configure_ipu_system()

        model = YOLOV3(False, opts)
        # construct model
        # we will put whole network on one ipu
        layers = []
        # build layer functions for backbone and upsample
        layers.extend(model.build_backbone())
        # last layer of darknet53 is classification layer, so it have 52 conv layers
        assert len(layers) == 52
        layers.extend(model.build_upsample())
        # there is 25 conv layers if we count upsmaple as a conv layer
        assert len(layers) == 52+25
        # decoding layer and loss layer is always put on last IPU
        layers.append(model.decode_boxes)

        # reuse stages_constructor so we don't need to pass params by hand
        network_func = stages_constructor(
            [layers],
            ["input_data", "nums"],
            ["pred_sbbox", "pred_mbbox", "pred_lbbox", "nums"])[0]
        input_shape = (self.batch_size, self.input_size, self.input_size, 3)
        self.lines, self.image_dict = self.load_data()
        if self.use_infeed_queue:
            # The dataset for feeding the graphs
            def data_gen():
                return self.data_generator()
            with tf.device("cpu"):
                ds = tf.data.Dataset.from_generator(data_gen,
                                                    output_types=(tf.float16, tf.int32),
                                                    output_shapes=(input_shape, (self.batch_size,))
                                                    )
            ds = ds.repeat()
            ds = ds.prefetch(self.repeat_count*10)
            # The host side queues
            infeed_queue = ipu_infeed_queue.IPUInfeedQueue(ds)
            outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()

            def model_func(input_data, nums):
                pred_sbbox, pred_mbbox, pred_lbbox, nums = network_func(input_data, nums)
                outfeed = outfeed_queue.enqueue(
                    {"pred_sbbox": pred_sbbox, "pred_mbbox": pred_mbbox, "pred_lbbox": pred_lbbox, "nums": nums})
                return outfeed

            def my_net():
                r = loops.repeat(self.repeat_count,
                                 model_func, [], infeed_queue)
                return r

            with ipu.scopes.ipu_scope("/device:IPU:0"):
                self.run_loop = ipu.ipu_compiler.compile(
                    my_net, inputs=[])
            # The outfeed dequeue has to happen after the outfeed enqueue
            self.dequeue_outfeed = outfeed_queue.dequeue()
            self.sess = tf.Session(config=tf.ConfigProto())
            self.sess.run(infeed_queue.initializer)
        else:
            # if using feed dict, it will be simpler
            # the cost is throughput
            with tf.device("cpu"):
                with tf.name_scope("input"):
                    # three channel images
                    self.input_data = tf.placeholder(
                        shape=input_shape, dtype=self.precision, name="input_data")
                    self.nums = tf.placeholder(
                        shape=(self.batch_size), dtype=tf.int32, name="nums")

            with ipu.scopes.ipu_scope("/device:IPU:0"):
                self.output = ipu.ipu_compiler.compile(
                    network_func, [self.input_data, self.nums])

            self.sess = tf.Session(
                config=tf.ConfigProto())
        if self.use_moving_avg:
            with tf.name_scope("ema"):
                ema_obj = tf.train.ExponentialMovingAverage(
                    self.moving_avg_decay)
            self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        else:
            self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.weight_file)
Exemple #8
0
    def __init__(self):
        # 每一层上Anchor的数目
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        # 类别
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        # 类别数目
        self.num_classes = len(self.classes)
        # 初始化学习率
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        # 最小学习率
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        # 第一阶段的阶段数
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        # 第二阶段的阶段数
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        # 学习率变化参数
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        # 模型训练持久化恢复对应的文件路径
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        # 当前时间信息
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        # 滑动平均的系数值(用于训练数据对应的滑动平均系数)
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        # 每个尺度最多允许的边框数目
        self.max_bbox_per_scale = 150
        # 训练数据所在的文件路径
        self.train_logdir = "./data/log/train"
        # 加载训练数据
        # TODO: 修改为支持自己数据的形式
        self.trainset = Dataset('train')
        # 加载测试数据
        self.testset = Dataset('test')
        # 总训练数据
        self.steps_per_period = len(self.trainset)
        # 构建会话对象
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        # 定义网络输入
        # 注输出是置信度和边框,也就是一个输入对应两个输出,在这里就是6个输出
        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')  # 候选框
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')  # 位置信息
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool,
                                            name='training')  #是训练还是测试

        # 定义网络及损失函数
        with tf.name_scope("define_loss"):
            # 构建模型
            self.model = YOLOV3(self.input_data, self.trainable)

            # 获取网络中所有的模型变量
            self.net_var = tf.global_variables()

            # 构建计算位置、可信度以及概率损失函数
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)

            # 三个损失函数合并成一个损失函数
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            # 构建一个全局变量对象
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')

            # 学习率变化阈值(默认两个批次之前)
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            # 总训练步骤
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')

            # 在两个批次之前,如果训练迭代次数小于阈值,那么设置为初始概率的一部分。
            # 当批次超过阈值之后,做一个学习率转换
            self.learn_rate = tf.cond(
                pred=self.global_step <
                warmup_steps,  #刚开始学习率大,train_steps和warmup_steps为固定值
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))

            # 定义全局步骤变量更新参数
            global_step_update = tf.assign_add(self.global_step, 1.0)

        # 定义模型参数滑动平均更新(目的:为了让模型参数更加平滑)
        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay) \
                .apply(tf.trainable_variables())

        # 第一阶段的模型训练相关参数设置
        with tf.name_scope("define_first_stage_train"):
            # 获取第一阶段模型训练相关参数
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            # 第一阶段的模型优化器
            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate) \
                .minimize(self.loss, var_list=self.first_stage_trainable_var_list)

            # 加入依赖控制
            # 先做BN的更新操作、在进行模型训练/步骤参数更新、最终更新模型参数值
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):  # 批归一化
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        # 定义一个训练操作(实际上不做任何操作,仅仅触发所有操作)
                        self.train_op_with_frozen_variables = tf.no_op()

        # 第二阶段的模型训练相关参数设置
        with tf.name_scope("define_second_stage_train"):
            # 获取第二阶段的相关训练参数(所有训练参数)
            second_stage_trainable_var_list = tf.trainable_variables()

            # 构建第二阶段的优化器对象
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate) \
                .minimize(self.loss, var_list=second_stage_trainable_var_list)

            # 加入依赖控制
            # 先做BN的更新操作、在进行模型训练/步骤参数更新、最终更新模型参数值
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        # 定义一个训练操作(实际上不做任何操作,仅仅触发所有操作)
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            # 定义模型加载对象以及模型持久化对象
            self.loader = tf.train.Saver(
                self.net_var)  # 仅加载网络参数(也就是前向过程中的所有参数)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

        with tf.name_scope('summary'):
            # 可视化相关内容
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            # 可视化日志文件构建
            logdir = "./data/log2/"
            if os.path.exists(logdir):
                shutil.rmtree(logdir)
            os.mkdir(logdir)

            # 输出可视化对象
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)

        print("初始化完成.....")
def export() -> object:

    serialized_tf_example = tf.placeholder(tf.string,
                                           shape=[None, 1],
                                           name='encoded_image_tensor')
    # batch_size = tf.shape(serialized_tf_example)[0]
    # serialized_tf_example = tf.reshape(serialized_tf_example, shape=[batch_size])
    # with tf.control_dependencies([tf.print(serialized_tf_example)]):
    images = tf.map_fn(preprocess_image, serialized_tf_example, tf.float32)

    model = YOLOV3(images, trainable=False)
    print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(args.ckpt_dir)
    saver.restore(sess, ckpt.model_checkpoint_path)

    converted_graph_def = tf.graph_util.convert_variables_to_constants(
        sess,
        input_graph_def=sess.graph.as_graph_def(),
        output_node_names=output_node_names)

    with tf.gfile.GFile(args.freeze_graph_dir, "wb") as f:
        f.write(converted_graph_def.SerializeToString())

    optimize_graph('',
                   args.freeze_graph_dir,
                   transforms,
                   output_node_names,
                   outname=args.optimizer_graph_dir)

    graph_def = get_graph_def_from_file(args.optimizer_graph_dir)
    # tf.reset_default_graph()
    # graph = tf.Graph()
    # sess = tf.Session()
    # # Create a TensorRT inference graph from a SavedModel:
    # with graph.as_default():
    #     with tf.Session() as sess:
    #         trt_graph = trt.create_inference_graph(
    #             input_graph_def=graph_def,
    #             outputs=output_node_names,
    #             max_batch_size=16,
    #             max_workspace_size_bytes=4000000000,
    #             is_dynamic_op=True,
    #             # precision_mode="FP16")
    #             # precision_mode=’FP32')
    #             precision_mode="INT8")
    #
    # trt_graph = trt.TrtGraphConverter.convert()

    # output_node = tf.import_graph_def(trt_graph, return_elements=output_node_names)
    # # sess.run(output_node)
    # tf.saved_model.simple_save(sess,
    # 'model/optimized_yolov3_helmet_tensorTR.pb',
    # inputs = {‘input_image’: graph.get_tensor_by_name(‘{}: 0’.format(node.name))
    # for node in graph.as_graph_def().node if node.op ==’Placeholder’},
    # outputs={t:graph.get_tensor_by_name(‘import / ’+t) for t in outputs}
    #
    # )

    with tf.Graph().as_default():

        tf.import_graph_def(graph_def,
                            input_map=None,
                            return_elements=None,
                            name="")

    with tf.Session() as sess:

        # Export inference model.
        output_path = os.path.join(tf.compat.as_bytes(args.output_dir),
                                   tf.compat.as_bytes(str(args.model_version)))
        print('Exporting trained model to', output_path)
        builder = tf.saved_model.builder.SavedModelBuilder(output_path)

        input_tensor = tf.get_default_graph().get_tensor_by_name(
            args.input_tensor)
        sbbox_tensor = tf.get_default_graph().get_tensor_by_name(
            args.sbbox_tensor)
        mbbox_tensor = tf.get_default_graph().get_tensor_by_name(
            args.mbbox_tensor)
        lbbox_tensor = tf.get_default_graph().get_tensor_by_name(
            args.lbbox_tensor)
        class_num = args.class_num

        batch_size = tf.shape(input_tensor)[0]
        sbbox_tensor = tf.reshape(tf.convert_to_tensor(sbbox_tensor),
                                  shape=[batch_size, -1, 5 + class_num])
        mbbox_tensor = tf.reshape(tf.convert_to_tensor(mbbox_tensor),
                                  shape=[batch_size, -1, 5 + class_num])
        lbbox_tensor = tf.reshape(tf.convert_to_tensor(lbbox_tensor),
                                  shape=[batch_size, -1, 5 + class_num])
        output_tensor = tf.concat([sbbox_tensor, mbbox_tensor, lbbox_tensor],
                                  1)
        # top_100 = tf.nn.top_k(output_tensor[:, 4], 100)
        # output_tensor = tf.gather(output_tensor, top_100.indices)
        num_tensor = tf.cast(tf.tile([[100]], multiples=[batch_size, 1]),
                             dtype=tf.int32)

        classes_tensor = tf.argmax(output_tensor[:, :, 5:], axis=2)
        scores_tensor = output_tensor[:, :, 4]
        raw_boxs_tensor = output_tensor[:, :, 0:4] / IMAGE_SIZE
        print(raw_boxs_tensor.shape)
        boxs_tensor_com = raw_boxs_tensor
        print(boxs_tensor_com.shape)

        boxs_tensor_minx = raw_boxs_tensor[:, :,
                                           0] - raw_boxs_tensor[:, :, 2] * 0.5
        boxs_tensor_miny = raw_boxs_tensor[:, :,
                                           1] - raw_boxs_tensor[:, :, 3] * 0.5
        boxs_tensor_maxx = raw_boxs_tensor[:, :,
                                           0] + raw_boxs_tensor[:, :, 2] * 0.5
        boxs_tensor_maxy = raw_boxs_tensor[:, :,
                                           1] + raw_boxs_tensor[:, :, 3] * 0.5
        boxs_tensor_minx = tf.expand_dims(boxs_tensor_minx, 2)
        boxs_tensor_miny = tf.expand_dims(boxs_tensor_miny, 2)
        boxs_tensor_maxx = tf.expand_dims(boxs_tensor_maxx, 2)
        boxs_tensor_maxy = tf.expand_dims(boxs_tensor_maxy, 2)

        boxs_tensor_com = tf.concat([
            boxs_tensor_miny, boxs_tensor_minx, boxs_tensor_maxy,
            boxs_tensor_maxx
        ], 2)
        scores_tensor_map = tf.expand_dims(scores_tensor, 2)
        classes_tensor_map = tf.expand_dims(classes_tensor, 2)
        classes_tensor_map = tf.cast(classes_tensor_map, dtype=tf.float32)
        nms_tensor_map = tf.concat(
            [boxs_tensor_com, scores_tensor_map, classes_tensor_map], 2)
        tensor_result = tf.map_fn(preprocess_nms,
                                  nms_tensor_map,
                                  tf.float32,
                                  name="map2")

        # with tf.control_dependencies([tf.print(tensor_result[0, :, 5])]):
        #     tmpt = tensor_result[:, :, 5]
        scores_tensor_info = tf.saved_model.utils.build_tensor_info(
            tensor_result[:, :, 5])
        classes_tensor_info = tf.saved_model.utils.build_tensor_info(
            tf.cast(tensor_result[:, :, 4], dtype=tf.int64))
        boxes_tensor_info = tf.saved_model.utils.build_tensor_info(
            tensor_result[:, :, 0:4])
        raw_boxes_tensor_info = tf.saved_model.utils.build_tensor_info(
            raw_boxs_tensor)
        num_tensor_info = tf.saved_model.utils.build_tensor_info(num_tensor)

        print(scores_tensor_info)
        print(classes_tensor_info)
        print(boxes_tensor_info)
        print(num_tensor_info)

        # scores_tensor_info = tf.saved_model.utils.build_tensor_info(tf.expand_dims(scores_tensor, 0))
        # classes_tensor_info = tf.saved_model.utils.build_tensor_info(tf.expand_dims(classes_tensor, 0))
        # boxes_tensor_info = tf.saved_model.utils.build_tensor_info(tf.expand_dims(boxs_tensor_com, 0))
        # raw_boxes_tensor_info = tf.saved_model.utils.build_tensor_info(tf.expand_dims(raw_boxs_tensor, 0))
        # num_tensor_info = tf.saved_model.utils.build_tensor_info(tf.expand_dims(num_tensor, 0))

        inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
            input_tensor)

        tensor_info_inputs = {'inputs': inputs_tensor_info}
        print(scores_tensor_info, inputs_tensor_info, classes_tensor_info,
              boxes_tensor_info, num_tensor_info)

        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs=tensor_info_inputs,
                # outputs=tensor_info_outputs,
                outputs={
                    'detection_scores': scores_tensor_info,
                    'detection_classes': classes_tensor_info,
                    'detection_boxes': boxes_tensor_info,
                    # 'raw_boxes': raw_boxes_tensor_info,
                    'num_detections': num_tensor_info,
                },
                method_name=tf.saved_model.signature_constants.
                PREDICT_METHOD_NAME))

        builder.add_meta_graph_and_variables(
            sess,
            [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                prediction_signature,
            },
        )

        builder.save()
        print('Successfully exported model to %s' % args.output_dir)

        print("start tensorrt optimization")
        converter = trt.TrtGraphConverter(
            input_saved_model_dir=
            "/home/zyc/tensorflow-yolov3-master/model/yolov3/1",
            max_batch_size=16,
            max_workspace_size_bytes=4000000000,
            is_dynamic_op=True,
            precision_mode="FP16")
        graph_def = converter.convert()

        # filename = 'bus.jpg'
        # with open(filename, 'rb') as f:
        #     data = f.read()
        #
        # calibrated_graph_def = converter.calibrate(
        #     fetch_names=['softmax_tensor'],
        #     num_runs=1,
        #     feed_dict_fn=lambda: {'inputs:0': np.array([data])}
        # )
        converter.save(args.output_trt_dir)
Exemple #10
0
#================================================================

import tensorflow as tf
from core.yolov3 import YOLOV3

pb_file = "./yolov3_coco.pb"
ckpt_file = "./checkpoint/yolov3_test_loss=12.5364.ckpt-50"
output_node_names = [
    "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2",
    "pred_lbbox/concat_2"
]

with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

#model = YOLOV3(input_data, trainable=False)
model = YOLOV3(input_data, trainable=True)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())
Exemple #11
0
        print("=> " + str(var_name).ljust(50), var_shape)
print()
tf.reset_default_graph()

cfg = ipu.config.IPUConfig()
cfg.configure_ipu_system()

cur_weights_mess = []
tf.Graph().as_default()
with tf.name_scope("input"):
    input_data = tf.placeholder(dtype=tf.float32, shape=(1, 416, 416, 3), name="input_data")

# converge to a ckpt with fp32
# then use another script to convert to fp16 if needed
opts["yolo"]["precision"] = "fp32"
model = YOLOV3(False, opts)

# construct model
# we will put whole network on one ipu
layers = []
# build layer functions for backbone and upsample
layers.extend(model.build_backbone())
# last layer of darknet53 is classification layer, so it have 52 conv layers
assert len(layers) == 52
layers.extend(model.build_upsample())
# there is 25 conv layers if we count upsmaple as a conv layer
assert len(layers) == 52+25
# decoding layer and loss layer is always put on last IPU
layers.append(model.decode_boxes)

# reuse stages_constructor so we don't need to pass params by hand
Exemple #12
0
# 该默认名称可以通过下面的 print(model.pred_sbbox, model.pred_mbbox, model.pred_lbbox) 打印输出进行查看,打印结果为:
# Tensor("pred_sbbox/concat_2:0", shape=(?, ?, ?, 3, 85), dtype=float32) \
# Tensor("pred_mbbox/concat_2:0", shape=(?, ?, ?, 3, 85), dtype=float32) \
# Tensor("pred_lbbox/concat_2:0", shape=(?, ?, ?, 3, 85), dtype=float32)
# 故务必对concat_2的来源有所知晓!
output_node_names = [
    "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2",
    "pred_lbbox/concat_2"
]

# 定义模型的输入
with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

model = YOLOV3(
    input_data,
    trainable=False)  # 恢复模型之前,首先定义一遍网络结构,然后才能把变量的值恢复到网络中,注意此处trainable=False
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)
print(model.pred_sbbox, model.pred_mbbox, model.pred_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),  #取出图定义
    output_node_names=output_node_names)  #需要保存的指定的 节点 名称
# 只有定义了输出结点,freeze才会把得到
# 输出结点所必要的结点都保存下来,或者哪些结点可以丢弃
Exemple #13
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.chkpnt_to_restore = cfg.TRAIN.RESTORE_CHKPT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.folder_name = cfg.YOLO.ROOT_DIR + cfg.YOLO.EXP_DIR

        with tf.name_scope('output_folder'):
            timestr = datetime.datetime.now().strftime('%d%h%y_%H%M')
            for i in range(0,
                           len(sp.getstatusoutput('git branch')[1].split())):
                if sp.getstatusoutput('git branch')[1].split()[i] == '*':
                    gitBranch = sp.getstatusoutput('git branch')[1].split()[i +
                                                                            1]
            gitCommitID = sp.getstatusoutput('git rev-parse --short HEAD')[1]
            self.output_folder = os.path.join(self.folder_name[0] +
                                              self.folder_name[1] + '_' +
                                              timestr + '_' + gitCommitID)
            if not os.path.exists(self.output_folder):
                os.makedirs(self.output_folder)
            cfg_new_path = os.path.join(self.output_folder, 'configFile.txt')
            shutil.copyfile(
                '/home/tamar/RecceLite_code_packages/yolo3_baseline2/core/config.py',
                cfg_new_path)

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = self.output_folder + "/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer_train = tf.summary.FileWriter(
                logdir + "/train", graph=self.sess.graph)
            self.summary_writer_test = tf.summary.FileWriter(
                logdir + "/test", graph=self.sess.graph)
Exemple #14
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 100
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.input_image_with_bboxes = tf.placeholder(dtype=tf.float32,
                                                          name='input_image')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, trainable=True, tiny=TINY)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + 2 * self.conf_loss + 5 * self.prob_loss  # 增加权重,调整了一下3种loss的影响力度

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')

            # tf.cond : if pred, return fn1 ,else, return fn2 ; warm_up对应原版的burn_in(power=1),learn rate随着epoch线性增长 ,
            # 之后的learn rate更新策略是cos曲线下降
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            # ExponentialMovingAverage  滑动平均的方法更新参数
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            # 第一阶段训练:仅仅训练三个分支的最后卷积层
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)

            # tf.control_dependencies(), 指定某些操作执行的依赖关系,后面的操作要在()的操作之后执行
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            # 第二阶段训练:训练所有的层,其实也就是 finetunning
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('learning_rate'):
            tf.summary.scalar("learn_rate", self.learn_rate)
        with tf.name_scope('loss'):
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)
        with tf.name_scope('images'):
            tf.summary.image("input_data", self.input_data)
            tf.summary.image("input_image", self.input_image_with_bboxes)
        logdir = "./data/log/"
        if os.path.exists(logdir):
            shutil.rmtree(logdir)
        os.mkdir(logdir)
        self.write_op = tf.summary.merge_all()

        loss = tf.summary.scalar("test_loss", self.loss)
        test_img = tf.summary.image("input_data", self.input_data)
        self.write_op_test = tf.summary.merge([loss, test_img])
        self.summary_writer = tf.summary.FileWriter(logdir,
                                                    graph=self.sess.graph)
Exemple #15
0
    def __init__(self):
        # 每一层上Anchor的数目
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        # 类别
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        # 类别数目
        self.num_classes = len(self.classes)
        # 初始化学习率
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        # 最小学习率
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        # 第一阶段的阶段数
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        # 第二阶段的阶段数
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        # 学习率变化参数
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        # 模型训练持久化恢复对应的文件路径
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        # 当前时间信息
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        # 滑动平均的系数值(用于训练数据对应的滑动平均系数)
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        # 每个尺度最多允许的边框数目
        self.max_bbox_per_scale = 150
        # 训练数据所在的文件路径
        self.train_logdir = "./data/log/train"

        self.trainset = Dataset('train')  # 加载训练数据
        self.testset = Dataset('test')  # 加载测试数据
        self.steps_per_period = len(self.trainset)  # 总训练数据

        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        # 定义网络输入
        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')  # 输入的原始图像
            self.label_sbbox = tf.placeholder(
                dtype=tf.float32,
                name='label_sbbox')  # 三个分支上各个anchor box对应的目标属性
            self.label_mbbox = tf.placeholder(
                dtype=tf.float32,
                name='label_mbbox')  # 三个分支上各个anchor box对应的目标属性
            self.label_lbbox = tf.placeholder(
                dtype=tf.float32,
                name='label_lbbox')  # 三个分支上各个anchor box对应的目标属性
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')  # 每个图像的实际边框信息
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')  # 每个图像的实际边框信息
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')  # 每个图像的实际边框信息
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

            # # fixme 这里是为了debug演示后面的尺度,实际训练是多尺度训练(上面的代码),shape不固定的。
            # N = cfg.TRAIN.BATCH_SIZE
            # self.input_data = tf.placeholder(tf.float32, [N, 416, 416, 3], name='input_data')  # 输入的原始图像
            # self.label_sbbox = tf.placeholder(tf.float32, [N, 52, 52, 3, 6], name='label_sbbox')  # 三个分支上各个anchor box对应的目标属性
            # self.label_mbbox = tf.placeholder(tf.float32, [N, 26, 26, 3, 6], name='label_mbbox')  # 三个分支上各个anchor box对应的目标属性
            # self.label_lbbox = tf.placeholder(tf.float32, [N, 13, 13, 3, 6], name='label_lbbox')  # 三个分支上各个anchor box对应的目标属性
            # self.true_sbboxes = tf.placeholder(tf.float32, [N, None, 4], name='sbboxes')  # [N, M, 4]  M代表一个图片上真实边框数量
            # self.true_mbboxes = tf.placeholder(tf.float32, [N, None, 4], name='mbboxes')
            # self.true_lbboxes = tf.placeholder(tf.float32, [N, None, 4], name='lbboxes')
            # self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        # 定义网络及损失函数
        with tf.name_scope("define_loss"):
            print("开始构建网络前向过程....")
            # 构建模型(包括所有的前向过程都完成了)
            self.model = YOLOV3(self.input_data, self.trainable)

            # 获取网络中所有的模型变量
            self.net_var = tf.global_variables()  # 366

            # 构建计算位置、可信度以及概率损失函数
            # NOTE: YOLOv3中代码的损失函数的构建和PPT上有一定的区别
            print("开始构建损失函数.....")
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)

            # 三个损失函数合并成一个损失函数
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            # 构建一个全局变量对象(每更新一次模型参数,该值累加1)
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')

            # 学习率变化阈值(默认两个批次之前)《更新次数在这个之前的时候,学习率是增大的,在这个之后的位置,学习率是减小的》
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            # 总训练步骤(总训练批次:第一阶段训练次数<第一阶段是指部分参数微调> + 第二阶段训练次数<全部所有参数调整>)
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')

            # 在两个批次之前,如果训练迭代次数小于阈值,那么设置为初始概率的一部分。
            # 当批次超过阈值之后,做一个学习率转换
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))

            # 定义全局步骤变量更新参数
            global_step_update = tf.assign_add(self.global_step, 1.0)

        # 定义模型参数滑动平均更新(目的:为了让模型参数更加平滑)
        with tf.name_scope("define_weight_decay"):
            # trainable_variables = decay * moving_ave + (1 - decay) * trainable_variables
            moving_ave = tf.train.ExponentialMovingAverage(decay=self.moving_ave_decay) \
                .apply(tf.trainable_variables())

        # 第一阶段的模型训练相关参数设置(只是对最终输出的位置参数进行调整)
        print("第一阶段训练对象构建....")
        with tf.name_scope("define_first_stage_train"):
            # 获取第一阶段模型训练相关参数
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():  # 遍历所有的训练参数
                var_name = var.op.name  # 获取得到这个参数的名称
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'conv_sbbox', 'conv_mbbox', 'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            # 第一阶段的模型优化器
            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate) \
                .minimize(self.loss, var_list=self.first_stage_trainable_var_list)

            # 加入依赖控制
            # 先做BN的更新操作、再进行模型训练/步骤参数更新、最终更新模型参数值
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        # 定义一个训练操作(实际上不做任何操作,仅仅触发所有操作)
                        self.train_op_with_frozen_variables = tf.no_op()

        # 第二阶段的模型训练相关参数设置
        print("第二阶段训练对象构建....")
        with tf.name_scope("define_second_stage_train"):
            # 获取第二阶段的相关训练参数(所有训练参数)
            second_stage_trainable_var_list = tf.trainable_variables()

            # 构建第二阶段的优化器对象
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate) \
                .minimize(self.loss, var_list=second_stage_trainable_var_list)

            # 加入依赖控制
            # 先做BN的更新操作、在进行模型训练/步骤参数更新、最终更新模型参数值
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        # 定义一个训练操作(实际上不做任何操作,仅仅触发所有操作)
                        self.train_op_with_all_variables = tf.no_op()

        print("持久化器对象构建.....")
        with tf.name_scope('loader_and_saver'):
            # 定义模型加载对象以及模型持久化对象
            self.loader = tf.train.Saver(
                self.net_var)  # 仅加载网络参数(也就是前向过程中的所有参数,对于优化器中的参数)
            self.saver = tf.train.Saver(tf.global_variables(),
                                        max_to_keep=3)  # 保存是保存所有参数

        print("可视化器对象构建.....")
        with tf.name_scope('summary'):
            # 可视化相关内容
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            # 可视化日志文件构建
            logdir = "./data/log/"
            if os.path.exists(logdir):
                shutil.rmtree(logdir)
            os.mkdir(logdir)

            # 输出可视化对象
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)

        print("初始化完成.....")
Exemple #16
0
#
#================================================================

import tensorflow as tf
from core.yolov3 import YOLOV3

pb_file = "./yolov3_coco.pb"
ckpt_file = "./checkpoint/yolov3_test_loss=10.6896.ckpt-50"
output_node_names = [
    "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2",
    "pred_lbbox/concat_2"
]

with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32, name='input_data')

model = YOLOV3(input_data, trainable=tf.cast(False, tf.bool))
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())
    def __init__(self):  # 从config文件获取到一些变量
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"  # 日志保存地址
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        with tf.name_scope('define_input'): # 定义输入层
            self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
            self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"): # 定义损失函数
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'): # 定义学习率
            self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
            warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
                                       dtype=tf.float64, name='warmup_steps')
            train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
                                      dtype=tf.float64, name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
                                 (1 + tf.cos(
                                     (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
            )
            global_step_update = tf.assign_add(self.global_step, 1.0)

        '''
        warmup_steps作用:   
        神经网络在刚开始训练的过程中容易出现loss=NaN的情况,为了尽量避免这个情况,因此初始的学习率设置得很低
        但是这又使得训练速度变慢了。因此,采用逐渐增大的学习率,从而达到既可以尽量避免出现nan,又可以等训练过程稳定了再增大训练速度的目的。
        '''

        with tf.name_scope("define_weight_decay"):  # 指数平滑,可以让算法在最后不那么震荡,结果更有鲁棒性
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        # 指定需要恢复的参数。层等信息, 位置提前,减少模型体积。
        with tf.name_scope('loader_and_saver'):
            variables_to_restore = [v for v in self.net_var if
                                    v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']]
            self.loader = tf.train.Saver(variables_to_restore)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)

        with tf.name_scope("define_first_stage_train"): # 第一阶段训练,只训练指定层
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                                                     var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):    # 第二阶段训练,释放所有层
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                                                      var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"  #  日志保存地址
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
    def __init__(self):
        self.anchor_per_scale = 3

        self.path1 = r"./data/classes/antenna.names"  #修改

        self.classes = utils.read_class_names(self.path1)
        self.num_classes = len(self.classes)
        self.learn_rate_init = 1e-4     # 1e-4
        self.learn_rate_end = 1e-6     # 1e-6
        self.first_stage_epochs = 20  # 40     加载不到预训练权重,不进行一阶段训练
        self.warmup_periods = 2     # 默认为23444

        self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))  # 初始化时间
        self.moving_ave_decay = 0.9995    # 默认为0.9995
        self.max_bbox_per_scale = 150                           # 每个照片最多的ground truth框的数量
        self.trainset = Dataset('train')              # train
        self.testset  = Dataset('test')
        self.steps_per_period = len(self.trainset)            # 整个数据集迭代一个epoch,需要多少个batch
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        with tf.name_scope('define_input'):
            self.input_data   = tf.placeholder(dtype=tf.float32, name='input_data')
            self.label_sbbox  = tf.placeholder(dtype=tf.float32, name='label_sbbox')
            self.label_mbbox  = tf.placeholder(dtype=tf.float32, name='label_mbbox')
            self.label_lbbox  = tf.placeholder(dtype=tf.float32, name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
            self.trainable    = tf.placeholder(dtype=tf.bool, name='training')

        # TODO 主要的部分
        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                                                    self.label_sbbox,  self.label_mbbox,  self.label_lbbox,
                                                    self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        # warm up
        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float32, trainable=False, name='global_step')
            # 指数衰减lr
            self.learn_rate = tf.train.exponential_decay(learning_rate=self.learn_rate_init,
                      global_step=self.global_step,
                      decay_steps=self.steps_per_period * 20,
                      decay_rate=0.9,
                      staircase=False,
                      name=None)
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
                                                      var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()      #  tf.no_op()表示执行完control_dependencies的变量更新之后,不做任何操作,主要确保control_dependencies的变量更新。

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver  = tf.train.Saver(tf.global_variables(), max_to_keep=5)
Exemple #19
0
    def __init__(self, steps_per_period):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)

        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.steps_per_period = steps_per_period

        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS

        self.max_bbox_per_scale = 150

        with tf.name_scope('define_input'):
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3()  #(self.input_data, self.trainable)

            self.model.forward(backbone, trainable)
            self.net_var = tf.global_variables()
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox, self.label_mbbox, self.label_lbbox,
                self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []

            varlist = tf.trainable_variables()
            od_vars = [var for var in varlist if 'od_' in var.name]
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[0] in [
                        'od_conv_sbbox', 'od_conv_mbbox', 'od_conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            varlist = tf.trainable_variables()
            od_vars = [var for var in varlist if 'od_' in var.name]
            bck_vars = [var for var in varlist if 'bck_' in var.name]

            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(self.loss, var_list=od_vars)

            second_stage_optimizerbc = tf.train.AdamOptimizer(
                self.learn_rate).minimize(0.1 * self.loss, var_list=bck_vars)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies([
                        second_stage_optimizer, second_stage_optimizerbc,
                        global_step_update
                ]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
Exemple #20
0
    def __init__(self):
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        self.num_classes = len(self.classes)
        self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
        self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
        self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
        self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
        self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
        self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
        self.time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time()))
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        self.max_bbox_per_scale = 150
        self.train_logdir = "./data/log/train"
        self.trainset = Dataset('train')
        self.testset = Dataset('test')
        self.steps_per_period = len(self.trainset)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))

        with tf.name_scope('define_input'):
            self.input_data = tf.placeholder(dtype=tf.float32,
                                             name='input_data')
            self.label_sbbox_1 = tf.placeholder(
                dtype=tf.float32, name='label_sbbox_1')  #定义新增加的输入变量
            self.label_sbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_sbbox')
            self.label_mbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_mbbox')
            self.label_lbbox = tf.placeholder(dtype=tf.float32,
                                              name='label_lbbox')
            self.true_sbboxes = tf.placeholder(dtype=tf.float32,
                                               name='sbboxes')

            self.true_sbboxes_1 = tf.placeholder(dtype=tf.float32,
                                                 name='sbboxes_1')  #定义新增加的输入变量
            self.true_mbboxes = tf.placeholder(dtype=tf.float32,
                                               name='mbboxes')
            self.true_lbboxes = tf.placeholder(dtype=tf.float32,
                                               name='lbboxes')
            self.trainable = tf.placeholder(dtype=tf.bool, name='training')

        with tf.name_scope("define_loss"):
            self.model = YOLOV3(self.input_data, self.trainable)
            self.net_var = tf.global_variables()
            #修改loss的计算,加入新的损失
            self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
                self.label_sbbox_1, self.label_sbbox, self.label_mbbox,
                self.label_lbbox, self.true_sbboxes_1, self.true_sbboxes,
                self.true_mbboxes, self.true_lbboxes)
            self.loss = self.giou_loss + self.conf_loss + self.prob_loss

        with tf.name_scope('learn_rate'):
            self.global_step = tf.Variable(1.0,
                                           dtype=tf.float64,
                                           trainable=False,
                                           name='global_step')
            warmup_steps = tf.constant(self.warmup_periods *
                                       self.steps_per_period,
                                       dtype=tf.float64,
                                       name='warmup_steps')
            train_steps = tf.constant(
                (self.first_stage_epochs + self.second_stage_epochs) *
                self.steps_per_period,
                dtype=tf.float64,
                name='train_steps')
            self.learn_rate = tf.cond(
                pred=self.global_step < warmup_steps,
                true_fn=lambda: self.global_step / warmup_steps * self.
                learn_rate_init,
                false_fn=lambda: self.learn_rate_end + 0.5 *
                (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos(
                    (self.global_step - warmup_steps) /
                    (train_steps - warmup_steps) * np.pi)))
            global_step_update = tf.assign_add(self.global_step, 1.0)

        with tf.name_scope("define_weight_decay"):
            moving_ave = tf.train.ExponentialMovingAverage(
                self.moving_ave_decay).apply(tf.trainable_variables())

        with tf.name_scope("define_first_stage_train"):
            self.first_stage_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                #修改了两个阶段的变量名
                if var_name_mess[0] in [
                        'conv_sbbox_1', 'conv_sbbox', 'conv_mbbox',
                        'conv_lbbox'
                ]:
                    self.first_stage_trainable_var_list.append(var)

            first_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=self.first_stage_trainable_var_list)
            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [first_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_frozen_variables = tf.no_op()

        with tf.name_scope("define_second_stage_train"):
            second_stage_trainable_var_list = tf.trainable_variables()
            second_stage_optimizer = tf.train.AdamOptimizer(
                self.learn_rate).minimize(
                    self.loss, var_list=second_stage_trainable_var_list)

            with tf.control_dependencies(
                    tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
                with tf.control_dependencies(
                    [second_stage_optimizer, global_step_update]):
                    with tf.control_dependencies([moving_ave]):
                        self.train_op_with_all_variables = tf.no_op()

        with tf.name_scope('loader_and_saver'):
            self.loader = tf.train.Saver(self.net_var)
            self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        with tf.name_scope('summary'):
            tf.summary.scalar("learn_rate", self.learn_rate)
            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.prob_loss)
            tf.summary.scalar("total_loss", self.loss)

            logdir = "./data/log/"
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.write_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir,
                                                        graph=self.sess.graph)
#
#================================================================

import tensorflow as tf
from core.yolov3 import YOLOV3

pb_file = "./yolov3_0910.pb"
ckpt_file = "./checkpoint/yolov3_RGB_0910_loss=11.9811.ckpt-122"
output_node_names = [
    "input/input_rgb", "input/input_lwir", "pred_sbbox/concat_2",
    "pred_mbbox/concat_2", "pred_lbbox/concat_2"
]

with tf.name_scope('input'):
    input_rgb = tf.placeholder(dtype=tf.float32, name='input_rgb')
    input_lwir = tf.placeholder(dtype=tf.float32, name='input_lwir')
model = YOLOV3(input_rgb, input_lwir, trainable=False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
    sess,
    input_graph_def=sess.graph.as_graph_def(),
    output_node_names=output_node_names)

with tf.gfile.GFile(pb_file, "wb") as f:
    f.write(converted_graph_def.SerializeToString())
            if (var_name_mess[-1] not in ['weights', 'gamma', 'beta', 'moving_mean', 'moving_variance']) or \
                    (var_name_mess[1] == 'yolo-v3' and (var_name_mess[-2] in preserve_org_names)):
                continue
        org_weights_mess.append([var_name, var_shape])
        print("=> " + str(id), str(var_name).ljust(50), var_shape)
print()
tf.reset_default_graph()

cur_weights_mess = []
tf.Graph().as_default()
with tf.name_scope('input'):
    input_data = tf.placeholder(dtype=tf.float32,
                                shape=(1, 416, 416, 3),
                                name='input_data')  #输入为1张416X416的彩色图片
    training = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(input_data, training)
for var in tf.global_variables():
    var_name = var.op.name
    var_name_mess = str(var_name).split('/')
    var_shape = var.shape
    print(var_name_mess[0])
    if flag.train_from_coco:
        if var_name_mess[0] in preserve_cur_names: continue
    cur_weights_mess.append([var_name, var_shape])
    print("=> " + str(var_name).ljust(50), var_shape)

org_weights_num = len(org_weights_mess)
cur_weights_num = len(cur_weights_mess)
if cur_weights_num != org_weights_num:
    raise RuntimeError
Exemple #23
0
        org_weights_mess.append([var_name, var_shape])
        print("=> " + str(var_name).ljust(50), var_shape)
print()
tf.reset_default_graph()

cur_weights_mess = []
tf.Graph().as_default()
with tf.name_scope('input'):
    input_rgb = tf.placeholder(dtype=tf.float32,
                               shape=(1, 416, 416, 3),
                               name='input_rgb')
    input_lwir = tf.placeholder(dtype=tf.float32,
                                shape=(1, 416, 416, 3),
                                name='input_lwir')
    training = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(input_rgb, input_lwir, training)
for var in tf.global_variables():
    var_name = var.op.name
    var_name_mess = str(var_name).split('/')
    var_shape = var.shape
    print(var_name_mess[0])
    #    if flag.train_from_coco:
    #        if var_name_mess[0] in preserve_cur_names: continue
    cur_weights_mess.append([var_name, var_shape])
    print("=> " + str(var_name).ljust(50), var_shape)

org_weights_num = len(org_weights_mess)
cur_weights_num = len(cur_weights_mess)
if cur_weights_num != org_weights_num:
    raise RuntimeError