Пример #1
0
def train():
    with tf.Graph().as_default():
        global_step = tf.Variable(initial_value=0, trainable=False)

        img_batch, label_batch = input_dateset.preprocess_input_data()
        logits = forward_prop.network(img_batch)
        total_loss = forward_prop.loss(logits, label_batch)
        one_step_gradient_update = forward_prop.one_step_train(
            total_loss, globle_step)

        saver = tf.train.Saver(var_list=tf.all_varibale())
        all_summary_obj = tf.merge_all_summaries()
        initiate_variables = tf.initialize_all_variables()

        with tf.Session(config=tf.ConfigProto(
                log_device_placement=False)) as sess:
            sess.run(initialize_variables)
            tf.train.start_queue_runners(sess=sess)
            Event_writer = tf.train.summaryWriter(logdir=event_log_path,
                                                  graph=sess.graph)
            for step in range(max_iter_num):
                _, loss_value = sess.run(
                    fetches=[one_step_gradient_update, total_loss])
                assert not np.isnan(loss_value)
                if step % 10 == 0:
                    print('step %d, the loss_vlaue is %.2f' %
                          (step, loss_value))
                if step % 100 == 0:
                    all_summaries = sess.run(all_summary_obj)
                    Event_writer.add_summary(summary=all_summaries,
                                             global_step=step)
                if step % 1000 == 0 or (step + 1) == max_iter_num:
                    variable_save_path = os.path.join(checkpoint_path,
                                                      'model-parameter.bin')
                    saver.save(sess, variables_save_path, global_step=step)
Пример #2
0
def evaluate():
	with tf.Graph().as_default() as g:
		img_batch, labels = input_dataset.input_data(eval_flag=True)
		logits = forward_prop.network(img_batch)

		predict_true_or_false = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)

		moving_average_op = tf.trian.ExponentialMovingAverage(decay=forward_prop, moving_average_decay)
		variables_to_restore = moving_average_op.variables_to_restore()
		saver = tf.train.Saver(var_list=variables_to_restore)

		summary_op = tf.merge_all_summaries()
		summary_writer = tf.train.SummaryWriter(logdir='./event-log-test', graph=g)
		eval_once(summary_op, summary_writer, saver, predict_true_or_false)
Пример #3
0
def evaluate():  
    with tf.Graph().as_default() as g:  
        img_batch, labels = input_dataset.input_data(eval_flag=True)#读入测试数据集  
        logits = forward_prop.network(img_batch)#使用moving average操作前的模型参数,计算模型输出值  
#判断targets是否在前k个predictions里面,当k=1时等价于常规的计算正确率的方法,sess.run(predict_true_or_false)会执行符号计算  
        predict_true_or_false = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)  
        #恢复moving average操作后的模型参数  
        moving_average_op = tf.train.ExponentialMovingAverage(decay=forward_prop.moving_average_decay)  
#返回要恢复的names到Variables的映射,也即一个map映射。如果一个变量有moving average,就使用moving average变量名作为the restore  
# name, 否则就使用变量名  
        variables_to_restore = moving_average_op.variables_to_restore()  
        saver = tf.train.Saver(var_list=variables_to_restore)  
          
        summary_op = tf.merge_all_summaries() #创建序列化后的summary对象  
#创建一个event file,用于之后写summary对象到logdir目录下的文件中  
        summary_writer = tf.train.SummaryWriter(logdir='./event-log-test', graph=g)  
        eval_once(summary_op, summary_writer, saver, predict_true_or_false)         
Пример #4
0
def train():
    with tf.Graph().as_default():  #指定当前图为默认graph
        global_step = tf.Variable(
            initial_value=0, trainable=False
        )  #设置trainable=False,是因为防止训练过程中对global_step变量也进行滑动更新操作
        img_batch, label_batch = input_dataset.preprocess_input_data(
        )  #输入图像的预处理,包括亮度、对比度、图像翻转等操作
        # img_batch, label_batch = input_dataset.input_data(eval_flag=False)
        logits = forward_prop.network(img_batch)  #图像信号的前向传播过程
        total_loss = forward_prop.loss(logits, label_batch)  #计算损失
        one_step_gradient_update = forward_prop.one_step_train(
            total_loss, global_step)  #返回一步梯度更新操作
        #创建一个saver对象,用于保存参数到文件中
        saver = tf.train.Saver(var_list=tf.all_variables(
        ))  #tf.all_variables return a list of `Variable` objects
        all_summary_obj = tf.merge_all_summaries(
        )  #返回所有summary对象先merge再serialize后的的字符串类型tensor
        initiate_variables = tf.initialize_all_variables()
        #log_device_placement参数可以记录每一个操作使用的设备,这里的操作比较多,就不需要记录了,故设置为False
        with tf.Session(config=tf.ConfigProto(
                log_device_placement=False)) as sess:
            sess.run(initiate_variables)  #变量初始化
            tf.train.start_queue_runners(sess=sess)  #启动所有的queuerunners
            Event_writer = tf.train.SummaryWriter(logdir=event_log_path,
                                                  graph=sess.graph)
            for step in range(max_iter_num):
                _, loss_value = sess.run(
                    fetches=[one_step_gradient_update, total_loss])
                assert not np.isnan(loss_value)  #用于验证当前迭代计算出的loss_value是否合理
                if step % 10 == 0:
                    print('step %d, the loss_value is %.2f' %
                          (step, loss_value))
                if step % 100 == 0:
                    # 添加`Summary`协议缓存到事件文件中,故不能写total_loss变量到事件文件中,因为这里的total_loss为普通的tensor类型
                    all_summaries = sess.run(all_summary_obj)
                    Event_writer.add_summary(summary=all_summaries,
                                             global_step=step)
                if step % 1000 == 0 or (step + 1) == max_iter_num:
                    variables_save_path = os.path.join(
                        checkpoint_path,
                        'model-parameters.bin')  #路径合并,返回合并后的字符串
                    saver.save(
                        sess, variables_save_path, global_step=step
                    )  #把所有变量(包括moving average前后的模型参数)保存在variables_save_path路径下