示例#1
0
    def __init__(self, model_path,name,version,export_path='./model'):
        '''

        :param model_path: ckpt模型保存地址
        :param name: 转换后模型标识
        :param version: 版本
        :param export_path: 转换后存放地址
        '''
        self.classes = data_pre.get_classes(cfg.Main.classes)  # 类别
        self.anchors = data_pre.get_anchors(cfg.Main.anchors)  # anchors

        self.model_path = tf.train.get_checkpoint_state(model_path).model_checkpoint_path
        self.output_size = 416
        self.batch_size = 1
        self.export_path = export_path
        self.name = name
        self.version = version

        self.ses = tf.Session()
        self.is_training = cfg.Test.is_training
        np.random.seed(21)

        with tf.name_scope('inputs'):
            self.input_images = tf.placeholder(dtype=tf.float32,
                                               shape=[self.batch_size, self.output_size, self.output_size, 3],
                                               name='input_images')

        with tf.name_scope('model_create'):
            self.model = Yolo3(input_value=self.input_images, is_training=self.is_training)

        with tf.name_scope('saver_log'):
            self.saver = tf.train.Saver(tf.global_variables())
示例#2
0
    def __init__(self,images_path,model_path,output_size,is_save=False):
        self.classes = data_pre.get_classes(cfg.Main.classes)  # 类别
        self.anchors = data_pre.get_anchors(cfg.Main.anchors)  # anchors
        self.images_path = images_path
        self.batch_size = len(self.images_path)
        self.model_path = tf.train.get_checkpoint_state(model_path).model_checkpoint_path
        # self.model_path = './checkpoint/darknet/yolov3.ckpt'
        self.output_size = output_size
        self.ses = tf.Session()
        self.is_training = cfg.Test.is_training
        self.is_save = is_save
        self.nms_score = cfg.Test.nms_score
        self.bboxes_score = cfg.Test.bboxes_score
        np.random.seed(21)

        with tf.name_scope('inputs'):
            self.input_images = tf.placeholder(dtype=tf.float32,shape=[self.batch_size,self.output_size,self.output_size,3], name='input_images')

        with tf.name_scope('model_create'):
            self.model = Yolo3(input_value=self.input_images,is_training=self.is_training)

        with tf.name_scope('saver_log'):
            # global_variables = tf.global_variables()
            # ckpt_variables = self.get_all_variables_name_from_ckpt(self.model_path)
            # print(ckpt_variables)
            # for ids,value in enumerate(global_variables):
            #     print('global:',value)
            #     print('ckpt:',ckpt_variables[ids])
            #     print('\n')
            #
            # os._exit(0)

            self.saver = tf.train.Saver(tf.global_variables())
示例#3
0
    def __init__(self, input_value, is_training):
        self.inputs = input_value
        self.anchors = data_pre.get_anchors(cfg.Main.anchors)
        self.classes = data_pre.get_classes(cfg.Main.classes)
        self.max_iou_ = 0.5
        self.strides = [8, 16, 32]
        self.backbone = cfg.Main.backbone
        self.is_training = is_training
        with tf.variable_scope('yolo3_model'):
            # 52,26,13
            self.conv_one, self.conv_two, self.conv_three = self.__network()
            # 52,26,13
            self.pre_one = self.__yolo_layer(self.conv_one, self.strides[0],
                                             self.anchors[0:3])

            self.pre_two = self.__yolo_layer(self.conv_two, self.strides[1],
                                             self.anchors[3:6])
            # error
            self.pre_three = self.__yolo_layer(self.conv_three,
                                               self.strides[2],
                                               self.anchors[6:9])
示例#4
0
 def __init__(self):
     self.classes = data_pre.get_classes(cfg.Main.classes)
     self.output_size = 416
     self.bboxes_score = cfg.Test.bboxes_score
     self.nms_score = cfg.Test.nms_score
示例#5
0
    def __init__(self):
        # data
        self.classes = data_pre.get_classes(cfg.Main.classes)  # 类别
        self.anchors = data_pre.get_anchors(cfg.Main.anchors)  # anchors
        self.train_set = data_pre.Dataset()
        self.test_set = data_pre.Dataset('test')

        # Train
        self.model_savefile = cfg.Train.darknet_savefile if cfg.Main.backbone == 0 else cfg.Train.mobilenet_savefile
        self.epoch = cfg.Train.epoch
        self.is_twostep = cfg.Train.is_twostep
        self.one_step = cfg.Train.one_step
        self.two_step = cfg.Train.two_step
        self.batch_size = cfg.Train.batch_size
        self.is_training = cfg.Train.is_training
        self.input_size = cfg.Train.input_size
        # learning_rate
        self.first_learn_rate = 1e-4  # cos衰减学习率 ,
        self.decay_steps = 200  # 衰减步长
        self.alpha = 0.3  # alpha
        # other
        self.darknet_savefile = './checkpoint/darknet_yolo3/yolov3.ckpt'
        self.ses = tf.Session(config=gpu_config)
        np.random.seed(21)
        self.train_epoch_loss, self.test_epoch_loss = [], []
        with tf.name_scope('inputs'):
            self.input_images = tf.placeholder(dtype=tf.float32,
                                               shape=[self.batch_size, self.input_size, self.input_size, 3],
                                               name='input_images')
            self.one_bbox = tf.placeholder(dtype=tf.float32, name='one_bbox')
            self.two_bbox = tf.placeholder(dtype=tf.float32, name='two_bbox')
            self.three_bbox = tf.placeholder(dtype=tf.float32, name='three_bbox')

            self.one_recover = tf.placeholder(dtype=tf.float32, name='one_recover')
            self.two_recover = tf.placeholder(dtype=tf.float32, name='two_recover')
            self.three_recover = tf.placeholder(dtype=tf.float32, name='three_recover')
        with tf.name_scope('model_create'):
            self.model = Yolo3(input_value=self.input_images, is_training=self.is_training)
            self.load_variables = tf.global_variables()
        with tf.name_scope('loss'):
            self.giou_loss, self.conf_loss, self.pro_loss = self.model.loss_total(self.one_bbox, self.two_bbox,
                                                                                  self.three_bbox, self.one_recover,
                                                                                  self.two_recover, self.three_recover)

            self.total_loss = self.giou_loss + self.conf_loss + self.pro_loss
        with tf.variable_scope('learn_rate'):
            self.global_step = tf.Variable(0.0, dtype=tf.float32, name='global_step', trainable=False)
            self.save_step = tf.Variable(0.0, dtype=tf.float32, name='save_step', trainable=False)
            self.learn_rate = tf.train.exponential_decay(learning_rate=1e-4, global_step=self.global_step,
                                                         decay_steps=2000, decay_rate=0.9, staircase=True,
                                                         name='learning_rate')
            # self.learn_rate = 1e-4
            self.global_step = tf.assign_add(self.global_step, 1.0)
            self.save_step = tf.assign_add(self.save_step, 1.0)
        # with tf.name_scope("define_weight_decay"):
        #     moving_ave = tf.train.ExponentialMovingAverage(0.9995).apply(tf.trainable_variables())

        with tf.name_scope('optimizer'):
            # 第一部分: 主要训练分类与回归
            self.first_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')

                if var_name_mess[1] in ['conv_three', 'conv_two', 'conv_one']:
                    self.first_trainable_var_list.append(var)
            self.first_op = tf.train.AdamOptimizer(self.learn_rate).minimize(self.total_loss,
                                                                             var_list=self.first_trainable_var_list)
            # 第二部分: 整体训练
            self.second_trainable_var_list = tf.trainable_variables()
            self.second_op = tf.train.GradientDescentOptimizer(self.learn_rate).minimize(self.total_loss,
                                                                                         var_list=self.second_trainable_var_list)
            # tf.keras.callbacks.ReduceLROnPlateau()

            # 梯度截断
            # optimizer = tf.train.AdamOptimizer(self.learn_rate,beta1=0.5)
            # grads, variable = zip(*optimizer.compute_gradients(self.total_loss))
            # grads, global_norm = tf.clip_by_global_norm(grads,5)
            # self.second_op = optimizer.apply_gradients(zip(grads,variable))
            # grad_op = tf.ConditionalAccumulator(dtype=tf.float32)
            # grad_op.set_global_step(self.global_step)
            # grad_op.apply_grad()

        with tf.name_scope('saver_log'):
            self.load = tf.train.Saver(self.load_variables)
            self.saver = tf.train.Saver(tf.global_variables())

            giou_loss = tf.summary.scalar("giou_loss", self.giou_loss)
            conf_loss = tf.summary.scalar("conf_loss", self.conf_loss)
            prob_loss = tf.summary.scalar("prob_loss", self.pro_loss)
            total_loss = tf.summary.scalar("total_loss", self.total_loss)
            learn_rate = tf.summary.scalar("learn_rate", self.learn_rate)

            train_epoch_loss = tf.summary.scalar('train_epoch_loss', np.mean(self.train_epoch_loss))
            test_epoch_loss = tf.summary.scalar('test_epoch_loss', np.mean(self.test_epoch_loss))

            logdir = './log/'
            # if os.path.exists(logdir): shutil.rmtree(logdir)
            # os.mkdir(logdir)

            self.batch_summary_op = tf.summary.merge([giou_loss, conf_loss, prob_loss, total_loss, learn_rate])
            self.epoch_summary_op = tf.summary.merge([train_epoch_loss, test_epoch_loss])

            self.batch_summary_writer = tf.summary.FileWriter(logdir + '/batch/', graph=self.ses.graph)
            self.epoch_summary_writer = tf.summary.FileWriter(logdir + '/epoch/', graph=self.ses.graph)

            self.batch_summary_writer.add_graph(self.ses.graph)
            self.epoch_summary_writer.add_graph(self.ses.graph)
示例#6
0
    def __init__(self):
        # data
        self.classes = data_pre.get_classes(cfg.Main.classes)  # 类别
        self.anchors = data_pre.get_anchors(cfg.Main.anchors)  # anchors
        # Dataset数据集
        self.train_set = dataset_TFRecord.Dataset('train')
        self.test_set = dataset_TFRecord.Dataset('test')
        self.train_itera = self.train_set.bulid_data().make_one_shot_iterator()
        self.test_itera = self.test_set.bulid_data().make_one_shot_iterator()
        # Train
        self.model_savefile = cfg.Train.darknet_savefile if cfg.Main.backbone == 0 else cfg.Train.mobilenet_savefile
        self.epoch = cfg.Train.epoch
        self.one_step = cfg.Train.one_step
        self.two_step = cfg.Train.two_step
        self.batch_size = cfg.Train.batch_size
        self.is_training = cfg.Train.is_training
        self.input_size = cfg.Train.input_size
        # learning_rate
        self.first_learn_rate = 1e-4  # cos衰减学习率 ,
        self.decay_steps = 200  # 衰减步长
        self.alpha = 0.3  # alpha
        # other
        self.darknet_savefile = './checkpoint/darknet/yolov3.ckpt'
        self.ses = tf.Session()
        self.is_train_flag = True
        np.random.seed(21)
        with tf.name_scope('inputs'):
            if self.is_train_flag: # 判断是train, 还是test, 切换不同数据集
                inputs = self.train_itera.get_next()
            else:
                inputs = self.test_itera.get_next()

            # reshape维度
            self.input_images = tf.reshape(inputs[3],[self.batch_size,416,416,3])
            self.one_bbox = tf.reshape(inputs[0],[self.batch_size,52, 52, 3, 65])
            self.two_bbox =  tf.reshape(inputs[1],[self.batch_size,26, 26, 3, 65])
            self.three_bbox =  tf.reshape(inputs[2],[self.batch_size,13, 13, 3, 65])

            self.one_recover = tf.reshape(inputs[4],[self.batch_size,150,4])
            self.two_recover =  tf.reshape(inputs[5],[self.batch_size,150,4])
            self.three_recover =  tf.reshape(inputs[6],[self.batch_size,150,4])
        with tf.name_scope('model_create'):
            self.model = Yolo3(input_value=self.input_images,is_training=self.is_training)
            self.load_variables = tf.global_variables()
        with tf.name_scope('loss'):
            self.giou_loss, self.conf_loss, self.pro_loss = self.model.loss_total(self.one_bbox, self.two_bbox,
                                                                                  self.three_bbox, self.one_recover,
                                                                                self.two_recover, self.three_recover)

            self.total_loss = self.giou_loss + self.conf_loss + self.pro_loss
        with tf.variable_scope('learn_rate'):
            self.global_step = tf.Variable(1.0, dtype=tf.float32, name='global_step', trainable=False)
            # self.learn_rate = tf.train.cosine_decay(learning_rate=self.first_learn_rate, global_step=self.global_step,
            #                                         decay_steps=self.decay_steps, alpha=self.alpha)
            self.learn_rate = tf.train.exponential_decay(learning_rate=1e-4,global_step=self.global_step,decay_steps=300,decay_rate=0.9,name='learning_rate')
            self.global_step = tf.assign_add(self.global_step, 1.0)
        # with tf.name_scope("define_weight_decay"):
        #     moving_ave = tf.train.ExponentialMovingAverage(0.9995).apply(tf.trainable_variables())
        with tf.name_scope('optimizer'):
            # 第一部分: 主要训练分类与回归
            self.first_trainable_var_list = []
            for var in tf.trainable_variables():
                var_name = var.op.name
                var_name_mess = str(var_name).split('/')
                if var_name_mess[1] in ['conv_three', 'conv_two', 'conv_one']:
                    self.first_trainable_var_list.append(var)
            self.first_op = tf.train.AdamOptimizer(self.learn_rate).minimize(self.total_loss,
                                                                              var_list=self.first_trainable_var_list)

            # 第二部分: 整体训练
            second_trainable_var_list = tf.trainable_variables()
            # self.second_op = tf.train.AdamOptimizer(self.learn_rate).minimize(self.total_loss,
            #                                                                   var_list=second_trainable_var_list)
            self.second_op = tf.train.AdamOptimizer(self.learn_rate).minimize(self.total_loss,var_list=second_trainable_var_list)

            # 梯度截断
            # optimizer = tf.train.AdamOptimizer(self.learn_rate,beta1=0.5)
            # grads, variable = zip(*optimizer.compute_gradients(self.total_loss))
            # grads, global_norm = tf.clip_by_global_norm(grads,5)
            # self.second_op = optimizer.apply_gradients(zip(grads,variable))
        with tf.name_scope('saver_log'):
            self.load = tf.train.Saver(self.load_variables)
            self.saver = tf.train.Saver(tf.global_variables())


            tf.summary.scalar("giou_loss", self.giou_loss)
            tf.summary.scalar("conf_loss", self.conf_loss)
            tf.summary.scalar("prob_loss", self.pro_loss)
            tf.summary.scalar("total_loss", self.total_loss)
            tf.summary.scalar("learn_rate", self.learn_rate)

            logdir = './log/'
            if os.path.exists(logdir): shutil.rmtree(logdir)
            os.mkdir(logdir)
            self.summary_op = tf.summary.merge_all()
            self.summary_writer = tf.summary.FileWriter(logdir, graph=self.ses.graph)