def freeze_graph(pb_file, ckpt_file, output_node_names): with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, name='input_data') model = YOLOV4(input_data, trainable=False) print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver = tf.train.Saver() saver.restore(sess, ckpt_file) converted_graph_def = tf.graph_util.convert_variables_to_constants( sess, input_graph_def=sess.graph.as_graph_def(), output_node_names=output_node_names) with tf.gfile.GFile(pb_file, "wb") as f: f.write(converted_graph_def.SerializeToString())
def __init__(self, net_type): self.net_type = net_type self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE self.classes = utils.read_class_names(cfg.YOLO.CLASSES) self.num_classes = len(self.classes) self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT self.ckpt_path = cfg.TRAIN.CKPT_PATH if not os.path.exists(self.ckpt_path): os.makedirs(self.ckpt_path) self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY self.max_bbox_per_scale = 150 self.log_path = ('log/%s' % net_type) if os.path.exists(self.log_path): shutil.rmtree(self.log_path) #os.removedirs(self.log_path) os.makedirs(self.log_path) self.trainset = Dataset('train', self.net_type) self.testset = Dataset('test', self.net_type) self.steps_per_period = len(self.trainset) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) with tf.name_scope('input'): if net_type == 'tiny': self.input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.trainable = tf.placeholder(dtype=tf.bool, name='training') else: self.input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.trainable = tf.placeholder(dtype=tf.bool, name='training') with tf.name_scope('define_loss'): if self.net_type == 'tiny': self.model = YOLOV3Tiny(self.input_data, self.trainable) self.net_var = tf.global_variables() self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_mbbox, self.label_lbbox, self.true_mbboxes, self.true_lbboxes) self.loss = self.iou_loss + self.conf_loss + self.prob_loss elif self.net_type == 'yolov3': self.model = YOLOV3(self.input_data, self.trainable) self.net_var = tf.global_variables() self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox, self.true_sbboxes, self.true_mbboxes, self.true_lbboxes) self.loss = self.iou_loss + self.conf_loss + self.prob_loss elif self.net_type == 'yolov4' or self.net_type == 'yolov5': iou_use = 1 # (0, 1, 2) ==> (giou_loss, diou_loss, ciou_loss) focal_use = False # (False, True) ==> (normal, focal_loss) label_smoothing = 0 if self.net_type == 'yolov4': self.model = YOLOV4(self.input_data, self.trainable) else: self.model = YOLOV5(self.input_data, self.trainable) self.net_var = tf.global_variables() self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(self.label_sbbox, self.label_mbbox, self.label_lbbox, self.true_sbboxes, self.true_mbboxes, self.true_lbboxes, iou_use, focal_use, label_smoothing) self.loss = self.iou_loss + self.conf_loss + self.prob_loss # self.loss = tf.Print(self.loss, [self.iou_loss, self.conf_loss, self.prob_loss], message='loss: ') else: print('self.net_type=%s error' % self.net_type) with tf.name_scope('learn_rate'): self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step') warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period, dtype=tf.float64, name='warmup_steps') train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period, dtype=tf.float64, name='train_steps') self.learn_rate = tf.cond(pred=self.global_step < warmup_steps, true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init, false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) * \ (1 + tf.cos((self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))) global_step_update = tf.assign_add(self.global_step, 1.0) with tf.name_scope('define_weight_decay'): moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables()) with tf.name_scope('define_first_stage_train'): self.first_stage_trainable_var_list = [] for var in tf.trainable_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') if net_type == 'tiny': bboxes = ['conv_mbbox', 'conv_lbbox'] else: bboxes = ['conv_sbbox', 'conv_mbbox', 'conv_lbbox'] if var_name_mess[0] in bboxes: self.first_stage_trainable_var_list.append(var) first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=self.first_stage_trainable_var_list) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): with tf.control_dependencies([first_stage_optimizer, global_step_update]): with tf.control_dependencies([moving_ave]): self.train_op_with_frozen_variables = tf.no_op() with tf.name_scope('define_second_stage_train'): second_stage_trainable_var_list = tf.trainable_variables() second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=second_stage_trainable_var_list) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): with tf.control_dependencies([second_stage_optimizer, global_step_update]): with tf.control_dependencies([moving_ave]): self.train_op_with_all_variables = tf.no_op() with tf.name_scope('loader_and_saver'): self.loader = tf.train.Saver(self.net_var) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000) with tf.name_scope('summary'): tf.summary.scalar('learn_rate', self.learn_rate) tf.summary.scalar('iou_loss', self.iou_loss) tf.summary.scalar('conf_loss', self.conf_loss) tf.summary.scalar('prob_loss', self.prob_loss) tf.summary.scalar('total_loss', self.loss) self.write_op = tf.summary.merge_all() self.summary_writer = tf.summary.FileWriter(self.log_path, graph=self.sess.graph)
def create_model( config, max_box_per_image, warmup_batches, multi_gpu, saved_weights_name, lr ): if config["model"]["model_name"] == "yolov3": print('[INFO] YOLOV3 Model Creating...') if multi_gpu > 1: with tf.device('/cpu:0'): yolo_model = YOLOV3( config=config, max_box_per_image=max_box_per_image, batch_size=config["train"]["batch_size"] // multi_gpu, warmup_batches=warmup_batches) template_model, infer_model = yolo_model.model() else: yolo_model = YOLOV3( config=config, max_box_per_image=max_box_per_image, batch_size=config["train"]["batch_size"], warmup_batches=warmup_batches) template_model, infer_model = yolo_model.model() elif config["model"]["model_name"] == "yolov4": print('[INFO] YOLOV4 Model Creating...') if multi_gpu > 1: with tf.device('/cpu:0'): yolo_model = YOLOV4( config=config, max_box_per_image=max_box_per_image, batch_size=config["train"]["batch_size"] // multi_gpu, warmup_batches=warmup_batches) template_model, infer_model = yolo_model.model() else: yolo_model = YOLOV4( config=config, max_box_per_image=max_box_per_image, batch_size=config["train"]["batch_size"], warmup_batches=warmup_batches) template_model, infer_model = yolo_model.model() else: pass # load the pretrained weight if exists, otherwise load the backend weight only if os.path.exists(saved_weights_name): print("[INFO] Find pretrained weights...") print("\n[INFO] Loading pretrained weights...\n") template_model.load_weights(saved_weights_name) # else: # template_model.load_weights("backend.h5", by_name=True) if multi_gpu > 1: train_model = multi_gpu_model(template_model, gpus=multi_gpu) else: train_model = template_model optimizer = Adam(lr=lr, clipnorm=0.001) train_model.compile(loss=dummy_loss, optimizer=optimizer) return train_model, infer_model
if not os.path.exists(ckpt_file): print('freeze_ckpt_to_pb ckpt_file=', ckpt_file, ' not exist') sys.exit() pb_file = argv[4] print('freeze_ckpt_to_pb gpu_id=%s, net_type=%s, ckpt_file=%s, pb_file=%s' % (gpu_id, net_type, ckpt_file, pb_file)) os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"] with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, name='input_data') if net_type == 'yolov3': model = YOLOV3(input_data, trainable=False) elif net_type == 'yolov4': model = YOLOV4(input_data, trainable=False) elif net_type == 'yolov5': model = YOLOV5(input_data, trainable=False) else: print('net_type=', net_type, ' error') sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver = tf.train.Saver() saver.restore(sess, ckpt_file) converted_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def=sess.graph.as_graph_def(), output_node_names=output_node_names) with tf.gfile.GFile(pb_file, "wb") as f: f.write(converted_graph_def.SerializeToString())