def __init__(self): self.__test_input_size = cfg.TEST_INPUT_SIZE self.__anchor_per_scale = cfg.ANCHOR_PER_SCALE self.__classes = cfg.CLASSES self.__num_classes = len(self.__classes) self.__class_to_ind = dict( zip(self.__classes, range(self.__num_classes))) self.__anchors = np.array(cfg.ANCHORS) self.__score_threshold = cfg.SCORE_THRESHOLD self.__iou_threshold = cfg.IOU_THRESHOLD self.__log_dir = os.path.join(cfg.LOG_DIR, 'test') self.__annot_dir_path = cfg.ANNOT_DIR_PATH self.__moving_ave_decay = cfg.MOVING_AVE_DECAY self.__dataset_path = cfg.DATASET_PATH self.__valid_scales = cfg.VALID_SCALES self.__training = False with tf.name_scope('input'): self.__input_data = tf.placeholder(dtype=tf.float32, name='input_data') # self.__training = tf.placeholder(dtype=tf.bool, name='training') _, _, _, self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox = \ YOLO_V3(self.__training).build_nework(self.__input_data) with tf.name_scope('summary'): tf.summary.FileWriter(self.__log_dir).add_graph( tf.get_default_graph()) with tf.name_scope('ema'): ema_obj = tf.train.ExponentialMovingAverage( self.__moving_ave_decay) self.__sess = tf.Session(config=config) self.__saver = tf.train.Saver(ema_obj.variables_to_restore()) self.__saver.restore(self.__sess, os.path.join(cfg.WEIGHTS_DIR, cfg.WEIGHTS_FILE))
def freeze_graph(input_checkpoint, output_graph): # 指定输出的节点名称,该节点名称必须是原模型中存在的节点 output_node_names = [ "input/input_data", "pred_lbbox/pred_bbox", "pred_sbbox/pred_bbox", "pred_mbbox/pred_bbox" ] import config as cfg from model.yolo_v3 import YOLO_V3 __training = False with tf.name_scope('input'): __input_data = tf.placeholder(dtype=tf.float32, name='input_data') #__training = tf.placeholder(dtype=tf.bool, name='training') _, _, _, __pred_sbbox, __pred_mbbox, __pred_lbbox = YOLO_V3( __training).build_nework(__input_data) __moving_ave_decay = cfg.MOVING_AVE_DECAY with tf.name_scope('ema'): ema_obj = tf.train.ExponentialMovingAverage(__moving_ave_decay) saver = tf.train.Saver(ema_obj.variables_to_restore()) # saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) #得到图、clear_devices :Whether or not to clear the device field for an `Operation` or `Tensor` during import. graph = tf.get_default_graph() #获得默认的图 input_graph_def = graph.as_graph_def() #返回一个序列化的图代表当前的图 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, input_checkpoint) #恢复图并得到数据 print("loading model ...") for node in input_graph_def.node: if node.op == 'RefSwitch': node.op = 'Switch' for index in xrange(len(node.input)): if 'moving_' in node.input[index]: node.input[index] = node.input[index] + '/read' elif node.op == 'AssignSub': node.op = 'Sub' if 'use_locking' in node.attr: del node.attr['use_locking'] elif node.op == 'AssignAdd': node.op = 'Add' if 'use_locking' in node.attr: del node.attr['use_locking'] elif node.op == 'Assign': node.op = 'Identity' if 'use_locking' in node.attr: del node.attr['use_locking'] if 'validate_shape' in node.attr: del node.attr['validate_shape'] if len(node.input) == 2: # input0: ref: Should be from a Variable node. May be uninitialized. # input1: value: The value to be assigned to the variable. node.input[0] = node.input[1] del node.input[1] #print ("predictions : ", sess.run("predictions:0", feed_dict={"input_holder:0": [10.0]})) # 测试读出来的模型是否正确,注意这里传入的是输出 和输入 节点的 tensor的名字,不是操作节点的名字 output_graph_def = graph_util.convert_variables_to_constants( # 模型持久化,将变量值固定 sess=sess, input_graph_def=sess.graph.as_graph_def(), # 等于:sess.graph_def output_node_names=output_node_names) with tf.gfile.GFile(output_graph, "wb") as f: #保存模型 f.write(output_graph_def.SerializeToString()) #序列化输出 print("%d ops in the final graph." % len(output_graph_def.node)) #得到当前图有几个操作节点
if (var_name_mess[-1] not in ['kernel', 'gamma', 'beta', 'moving_mean', 'moving_variance']) or \ (var_name_mess[0] in preserve_org_names): continue org_weights_mess.append([var_name, var_shape]) print str(var_name).ljust(50), var_shape print tf.reset_default_graph() cur_weights_mess = [] tf.Graph().as_default() with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, shape=(1, 416, 416, 3), name='input_data') training = tf.placeholder(dtype=tf.bool, name='training') YOLO_V3(training).build_nework(input_data) for var in tf.global_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') var_shape = var.shape if var_name_mess[0] in preserve_cur_names: continue cur_weights_mess.append([var_name, var_shape]) print str(var_name).ljust(50), var_shape org_weights_num = len(org_weights_mess) cur_weights_num = len(cur_weights_mess) if cur_weights_num != org_weights_num: raise RuntimeError print 'Number of weights that will rename:\t%d' % cur_weights_num
def __init__(self): self.__anchor_per_scale = cfg.ANCHOR_PER_SCALE self.__classes = cfg.CLASSES self.__num_classes = len(self.__classes) self.__learn_rate_init = cfg.LEARN_RATE_INIT self.__max_periods = cfg.MAX_PERIODS self.__max_wave_time = cfg.MAX_WAVE_TIME self.__max_learn_rate_decay_time = cfg.MAX_LEARN_RATE_DECAY_TIME self.__weights_dir = cfg.WEIGHTS_DIR self.__weights_file = cfg.WEIGHTS_FILE self.__log_dir = os.path.join(cfg.LOG_DIR, 'train') self.__moving_ave_decay = cfg.MOVING_AVE_DECAY self.__save_iter = cfg.SAVE_ITER self.__max_bbox_per_scale = cfg.MAX_BBOX_PER_SCALE self.__train_data = Data('train') self.__test_data = Data('test') with tf.name_scope('input'): self.__input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.__label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') self.__label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.__label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.__sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') self.__mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.__lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.__training = tf.placeholder(dtype=tf.bool, name='training') self.__yolo = YOLO_V3(self.__training) self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, \ self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox = self.__yolo.build_nework(self.__input_data) self.__net_var = tf.global_variables() print 'Load weights:' for var in self.__net_var: print var.op.name self.__loss = self.__yolo.loss(self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox, self.__label_sbbox, self.__label_mbbox, self.__label_lbbox, self.__sbboxes, self.__mbboxes, self.__lbboxes) with tf.name_scope('learn'): self.__learn_rate = tf.Variable(self.__learn_rate_init, trainable=False, name='learn_rate_init') moving_ave = tf.train.ExponentialMovingAverage(self.__moving_ave_decay).apply(tf.trainable_variables()) self.__trainable_var_list = [] for var in tf.trainable_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']: self.__trainable_var_list.append(var) optimize0 = tf.train.AdamOptimizer(self.__learn_rate).\ minimize(self.__loss, var_list=self.__trainable_var_list) with tf.control_dependencies([optimize0]): with tf.control_dependencies([moving_ave]): self.__train_op_with_frozen_variables = tf.no_op() optimize1 = tf.train.AdamOptimizer(self.__learn_rate).\ minimize(self.__loss, var_list=tf.trainable_variables()) with tf.control_dependencies([optimize1]): with tf.control_dependencies([moving_ave]): self.__train_op_with_all_variables = tf.no_op() self.__train_op = self.__train_op_with_frozen_variables print 'Default trian step0 is freeze the weight of darknet' for var in self.__trainable_var_list: print '\t' + str(var.op.name).ljust(50) + str(var.shape) with tf.name_scope('load_save'): self.__load = tf.train.Saver(self.__net_var) self.__save = tf.train.Saver(tf.global_variables(), max_to_keep=50) with tf.name_scope('summary'): tf.summary.scalar('loss', self.__loss) self.__summary_op = tf.summary.merge_all() self.__summary_writer = tf.summary.FileWriter(self.__log_dir) self.__summary_writer.add_graph(tf.get_default_graph()) self.__sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
if (var_name_mess[-1] not in ['weights', 'gamma', 'beta', 'moving_mean', 'moving_variance']) or \ (var_name_mess[1] == 'yolo-v3' and (var_name_mess[-2] in preserve_org_names)): continue org_weights_mess.append([var_name, var_shape]) print str(var_name).ljust(50), var_shape print tf.reset_default_graph() cur_weights_mess = [] tf.Graph().as_default() with tf.name_scope('input'): input_data = tf.placeholder(dtype=tf.float32, shape=(1, 416, 416, 3), name='input_data') training = tf.placeholder(dtype=tf.bool, name='training') YOLO_V3(True).build_nework(input_data) for var in tf.global_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') var_shape = var.shape if var_name_mess[0] in preserve_cur_names: continue cur_weights_mess.append([var_name, var_shape]) print str(var_name).ljust(50), var_shape org_weights_num = len(org_weights_mess) cur_weights_num = len(cur_weights_mess) if cur_weights_num != org_weights_num: raise RuntimeError print 'Number of weights that will rename:\t%d' % cur_weights_num
def __init__(self): self.__anchor_per_scale = cfg.ANCHOR_PER_SCALE self.__classes = cfg.CLASSES self.__num_classes = len(self.__classes) self.__learn_rate_init = cfg.LEARN_RATE_INIT self.__learn_rate_end = cfg.LEARN_RATE_END self.__max_periods = cfg.MAX_PERIODS self.__periods_for_step0 = cfg.PERIODS_FOR_STEP0 self.__warmup_periods = cfg.WARMUP_PERIODS self.__weights_dir = cfg.WEIGHTS_DIR self.__weights_file = cfg.WEIGHTS_FILE self.__time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) self.__log_dir = os.path.join(cfg.LOG_DIR, 'train', self.__time) self.__moving_ave_decay = cfg.MOVING_AVE_DECAY self.__max_bbox_per_scale = cfg.MAX_BBOX_PER_SCALE self.__train_data = Data('train') self.__test_data = Data('test') self.__steps_per_period = len(self.__train_data) with tf.name_scope('input'): self.__input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.__label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') self.__label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.__label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.__sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') self.__mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.__lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.__training = tf.placeholder(dtype=tf.bool, name='training') self.__yolo = YOLO_V3(self.__training) self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, \ self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox = self.__yolo.build_nework(self.__input_data) self.__net_var = tf.global_variables() logging.info('Load weights:') for var in self.__net_var: logging.info(var.op.name) self.__loss = self.__yolo.loss(self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox, self.__label_sbbox, self.__label_mbbox, self.__label_lbbox, self.__sbboxes, self.__mbboxes, self.__lbboxes) with tf.name_scope('optimize'): with tf.name_scope('learn_rate'): self.__global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step') warmup_steps = tf.constant(self.__warmup_periods * self.__steps_per_period, dtype=tf.float64, name='warmup_steps') train_steps = tf.constant(self.__max_periods * self.__steps_per_period, dtype=tf.float64, name='train_steps') self.__learn_rate = tf.cond( pred=self.__global_step < warmup_steps, true_fn=lambda: self.__global_step / warmup_steps * self. __learn_rate_init, false_fn=lambda: self.__learn_rate_end + 0.5 * (self.__learn_rate_init - self.__learn_rate_end) * (1 + tf.cos((self.__global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))) global_step_update = tf.assign_add(self.__global_step, 1.0) moving_ave = tf.train.ExponentialMovingAverage( self.__moving_ave_decay).apply(tf.trainable_variables()) self.__trainable_var_list = [] for var in tf.trainable_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') if var_name_mess[0] in [ 'conv_sbbox', 'conv_mbbox', 'conv_lbbox' ]: self.__trainable_var_list.append(var) optimize0 = tf.train.AdamOptimizer(self.__learn_rate).\ minimize(self.__loss, var_list=self.__trainable_var_list) with tf.control_dependencies([optimize0, global_step_update]): with tf.control_dependencies([moving_ave]): self.__train_op_with_frozen_variables = tf.no_op() optimize1 = tf.train.AdamOptimizer(self.__learn_rate).\ minimize(self.__loss, var_list=tf.trainable_variables()) with tf.control_dependencies([optimize1, global_step_update]): with tf.control_dependencies([moving_ave]): self.__train_op_with_all_variables = tf.no_op() self.__train_op = self.__train_op_with_frozen_variables logging.info('Default trian step0 is freeze the weight of darknet') for var in self.__trainable_var_list: logging.info('\t' + str(var.op.name).ljust(50) + str(var.shape)) with tf.name_scope('load_save'): self.__load = tf.train.Saver(self.__net_var) self.__save = tf.train.Saver(tf.global_variables(), max_to_keep=50) with tf.name_scope('summary'): self.__loss_ave = tf.Variable(0, dtype=tf.float32, trainable=False) tf.summary.scalar('loss_ave', self.__loss_ave) tf.summary.scalar('learn_rate', self.__learn_rate) self.__summary_op = tf.summary.merge_all() self.__summary_writer = tf.summary.FileWriter(self.__log_dir) self.__summary_writer.add_graph(tf.get_default_graph()) self.__sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True))
def __init__(self): self.__frozen = cfg.FROZEN self.__classes = cfg.CLASSES self.__learn_rate_init = cfg.LEARN_RATE_INIT self.__max_epochs = cfg.MAX_PERIODS self.__lr_decay_epoch = cfg.lr_decay_epoch self.__weights_dir = cfg.WEIGHTS_DIR self.__weights_file = cfg.WEIGHTS_FILE self.__time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) self.__log_dir = os.path.join(cfg.LOG_DIR, 'train', self.__time) self.__moving_ave_decay = cfg.MOVING_AVE_DECAY self.__save_iter = cfg.SAVE_ITER self.__train_data = Data('train') self.__test_data = Data('test') with tf.name_scope('input'): self.__input_data = tf.placeholder(dtype=tf.float32, name='input_data') self.__label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox') self.__label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox') self.__label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox') self.__sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes') self.__mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes') self.__lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes') self.__training = True self.__yolo = YOLO_V3(self.__training) self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, \ self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox = self.__yolo.build_nework(self.__input_data) self.__net_var = tf.global_variables() logging.info('Load weights:') for var in self.__net_var: logging.info(var.op.name) self.__loss = self.__yolo.loss(self.__conv_sbbox, self.__conv_mbbox, self.__conv_lbbox, self.__pred_sbbox, self.__pred_mbbox, self.__pred_lbbox, self.__label_sbbox, self.__label_mbbox, self.__label_lbbox, self.__sbboxes, self.__mbboxes, self.__lbboxes) with tf.name_scope('learn'): self.__learn_rate = tf.Variable(self.__learn_rate_init, trainable=False, name='learn_rate_init') moving_ave = tf.train.ExponentialMovingAverage( self.__moving_ave_decay).apply(tf.trainable_variables()) self.__trainable_var_list = [] for var in tf.trainable_variables(): var_name = var.op.name var_name_mess = str(var_name).split('/') if var_name_mess[0] in [ 'conv_sbbox', 'conv_mbbox', 'conv_lbbox' ]: self.__trainable_var_list.append(var) optimize0 = tf.train.AdamOptimizer(self.__learn_rate).\ minimize(self.__loss, var_list=self.__trainable_var_list) with tf.control_dependencies([optimize0]): with tf.control_dependencies([moving_ave]): self.__train_op_with_frozen_variables = tf.no_op() optimize1 = tf.train.AdamOptimizer(self.__learn_rate).\ minimize(self.__loss, var_list=tf.trainable_variables()) with tf.control_dependencies([optimize1]): with tf.control_dependencies([moving_ave]): self.__train_op_with_all_variables = tf.no_op() if self.__frozen: self.__train_op = self.__train_op_with_frozen_variables logging.info('freeze the weight of darknet') print('freeze the weight of darknet') else: self.__train_op = self.__train_op_with_all_variables logging.info("train all variables") print("train all variables") for var in self.__trainable_var_list: logging.info("++++++ trainable variables list: ++++++" + "\n") logging.info('\t' + str(var.op.name).ljust(50) + str(var.shape)) with tf.name_scope('load_save'): self.__load = tf.train.Saver(self.__net_var) self.__save = tf.train.Saver(tf.global_variables(), max_to_keep=50) with tf.name_scope('summary'): tf.summary.scalar('loss', self.__loss) self.__summary_op = tf.summary.merge_all() self.__summary_writer = tf.summary.FileWriter(self.__log_dir) self.__summary_writer.add_graph(tf.get_default_graph()) self.__sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True))