def freeze_graph(pb_file, ckpt_file): output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"] with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, name='input_data') YOLOV4(input_data, trainable=False) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) saver = tf.train.Saver() saver.restore(sess, ckpt_file) converted_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def=sess.graph.as_graph_def(), output_node_names=output_node_names) with tf.gfile.GFile(pb_file, "wb") as f: f.write(converted_graph_def.SerializeToString())
def weight2pb(darknet_weights, pb_file): output_node_names = [ "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2" ] with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, name='input_data') YOLOV4(input_data, trainable=False) load_ops = load_weights(tf.global_variables(), darknet_weights) with tf.Session() as sess: sess.run(load_ops) output_graph_def = tf.graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(), output_node_names=output_node_names) with tf.gfile.GFile(pb_file, "wb") as f: f.write(output_graph_def.SerializeToString()) print("{} ops written to {}.".format(len(output_graph_def.node), pb_file))
def __init__(self): self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE self.classes = utils.read_class_names(cfg.YOLO.CLASSES) self.num_classes = len(self.classes) self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY self.max_bbox_per_scale = 150 self.train_logdir = "./data/log/train" self.trainset = Dataset('train') self.valset = Dataset('test') self.steps_per_period = len(self.trainset) self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) with tf.name_scope('input'): self.input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.trainable = tf.placeholder(dtype=tf.bool, name='training') with tf.name_scope("define_loss"): iou_use = 1 # (0, 1, 2) ==> (giou_loss, diou_loss, ciou_loss) focal_use = False # (False, True) ==> (normal, focal_loss) label_smoothing = 0 # 0.001 self.model = YOLOV4(self.input_data, self.trainable) self.net_var = tf.global_variables() self.iou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss( self.label_sbbox, self.label_mbbox, self.label_lbbox, self.true_sbboxes, self.true_mbboxes, self.true_lbboxes, iou_use, focal_use, label_smoothing) self.loss = self.iou_loss + self.conf_loss + self.prob_loss # self.loss = tf.Print(self.loss, [self.iou_loss, self.conf_loss, self.prob_loss], message='loss: ') with tf.name_scope('learn_rate'): self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step') warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period, dtype=tf.float64, name='warmup_steps') train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period, dtype=tf.float64, name='train_steps') self.learn_rate = tf.cond( pred=self.global_step < warmup_steps, true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init, false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) * (1 + tf.cos( (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi)) ) global_step_update = tf.assign_add(self.global_step, 1.0) with tf.name_scope("define_weight_decay"): moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables()) with tf.name_scope("define_first_stage_train"): self.first_stage_trainable_var_list = [] for var in tf.trainable_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']: self.first_stage_trainable_var_list.append(var) first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=self.first_stage_trainable_var_list) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): with tf.control_dependencies([first_stage_optimizer, global_step_update]): with tf.control_dependencies([moving_ave]): self.train_op_with_frozen_variables = tf.no_op() with tf.name_scope("define_second_stage_train"): second_stage_trainable_var_list = tf.trainable_variables() second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss, var_list=second_stage_trainable_var_list) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): with tf.control_dependencies([second_stage_optimizer, global_step_update]): with tf.control_dependencies([moving_ave]): self.train_op_with_all_variables = tf.no_op() with tf.name_scope('loader_and_saver'): self.loader = tf.train.Saver(self.net_var) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10) # self.saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=10) with tf.name_scope('summary'): tf.summary.scalar("learn_rate", self.learn_rate) tf.summary.scalar("iou_loss", self.iou_loss) tf.summary.scalar("conf_loss", self.conf_loss) tf.summary.scalar("prob_loss", self.prob_loss) tf.summary.scalar("total_loss", self.loss) logdir = "./data/log/" if os.path.exists(logdir): shutil.rmtree(logdir) os.mkdir(logdir) self.write_op = tf.summary.merge_all() self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
import tensorflow as tf from yolov4.model import YOLOV4 from from_darknet_weights_to_ckpt import load_weights # you need to reset graph first. tf.reset_default_graph() input_size = 416 darknet_weights = '/media/devin/F/Ebdatasets/keras_yolo4/model_data/yolov4.weights' pb_file = './yolov4.pb' output_node_names = [ "input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2" ] with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, name='input_data') model = YOLOV4(input_data, trainable=False) load_ops = load_weights(tf.global_variables(), darknet_weights) with tf.Session() as sess: sess.run(load_ops) output_graph_def = tf.graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(), output_node_names=output_node_names) with tf.gfile.GFile(pb_file, "wb") as f: f.write(output_graph_def.SerializeToString()) print("{} ops written to {}.".format(len(output_graph_def.node), pb_file))